blob: 8456a409fc215ec2df92cd2ffc3e3a03bcfcc373 [file] [log] [blame]
Satya Tangirala138adbb2019-10-24 14:44:24 -07001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright 2019 Google LLC
4 */
5#ifndef __LINUX_BIO_CRYPT_CTX_H
6#define __LINUX_BIO_CRYPT_CTX_H
7
8enum blk_crypto_mode_num {
Satya Tangiralae12563c2019-12-17 14:26:29 -08009 BLK_ENCRYPTION_MODE_INVALID,
10 BLK_ENCRYPTION_MODE_AES_256_XTS,
11 BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
12 BLK_ENCRYPTION_MODE_ADIANTUM,
13 BLK_ENCRYPTION_MODE_MAX,
Satya Tangirala138adbb2019-10-24 14:44:24 -070014};
15
16#ifdef CONFIG_BLOCK
17#include <linux/blk_types.h>
18
19#ifdef CONFIG_BLK_INLINE_ENCRYPTION
Satya Tangiralae12563c2019-12-17 14:26:29 -080020
21#define BLK_CRYPTO_MAX_KEY_SIZE 64
Barani Muthukumarand42ba872020-01-02 11:57:39 -080022#define BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE 128
Satya Tangiralae12563c2019-12-17 14:26:29 -080023
24/**
25 * struct blk_crypto_key - an inline encryption key
26 * @crypto_mode: encryption algorithm this key is for
27 * @data_unit_size: the data unit size for all encryption/decryptions with this
28 * key. This is the size in bytes of each individual plaintext and
29 * ciphertext. This is always a power of 2. It might be e.g. the
30 * filesystem block size or the disk sector size.
31 * @data_unit_size_bits: log2 of data_unit_size
32 * @size: size of this key in bytes (determined by @crypto_mode)
33 * @hash: hash of this key, for keyslot manager use only
Barani Muthukumaran166fda72020-02-06 18:01:20 -080034 * @is_hw_wrapped: @raw points to a wrapped key to be used by an inline
35 * encryption hardware that accepts wrapped keys.
Satya Tangiralae12563c2019-12-17 14:26:29 -080036 * @raw: the raw bytes of this key. Only the first @size bytes are used.
37 *
38 * A blk_crypto_key is immutable once created, and many bios can reference it at
39 * the same time. It must not be freed until all bios using it have completed.
40 */
41struct blk_crypto_key {
Satya Tangirala138adbb2019-10-24 14:44:24 -070042 enum blk_crypto_mode_num crypto_mode;
Satya Tangiralae12563c2019-12-17 14:26:29 -080043 unsigned int data_unit_size;
Satya Tangirala138adbb2019-10-24 14:44:24 -070044 unsigned int data_unit_size_bits;
Satya Tangiralae12563c2019-12-17 14:26:29 -080045 unsigned int size;
46 unsigned int hash;
Barani Muthukumaran166fda72020-02-06 18:01:20 -080047 bool is_hw_wrapped;
Barani Muthukumarand42ba872020-01-02 11:57:39 -080048 u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE];
Satya Tangiralae12563c2019-12-17 14:26:29 -080049};
50
51#define BLK_CRYPTO_MAX_IV_SIZE 32
52#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64))
53
54/**
55 * struct bio_crypt_ctx - an inline encryption context
56 * @bc_key: the key, algorithm, and data unit size to use
57 * @bc_keyslot: the keyslot that has been assigned for this key in @bc_ksm,
58 * or -1 if no keyslot has been assigned yet.
59 * @bc_dun: the data unit number (starting IV) to use
60 * @bc_ksm: the keyslot manager into which the key has been programmed with
61 * @bc_keyslot, or NULL if this key hasn't yet been programmed.
62 *
63 * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for
64 * write requests) or decrypted (for read requests) inline by the storage device
65 * or controller, or by the crypto API fallback.
66 */
67struct bio_crypt_ctx {
68 const struct blk_crypto_key *bc_key;
69 int bc_keyslot;
70
71 /* Data unit number */
72 u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
Satya Tangirala138adbb2019-10-24 14:44:24 -070073
74 /*
75 * The keyslot manager where the key has been programmed
76 * with keyslot.
77 */
Satya Tangiralae12563c2019-12-17 14:26:29 -080078 struct keyslot_manager *bc_ksm;
Satya Tangirala138adbb2019-10-24 14:44:24 -070079};
80
Satya Tangiralae12563c2019-12-17 14:26:29 -080081int bio_crypt_ctx_init(void);
82
83struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask);
84
85void bio_crypt_free_ctx(struct bio *bio);
Satya Tangirala138adbb2019-10-24 14:44:24 -070086
87static inline bool bio_has_crypt_ctx(struct bio *bio)
88{
89 return bio->bi_crypt_context;
90}
91
Satya Tangiralae12563c2019-12-17 14:26:29 -080092void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
93
94static inline void bio_crypt_set_ctx(struct bio *bio,
95 const struct blk_crypto_key *key,
96 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
97 gfp_t gfp_mask)
Satya Tangirala138adbb2019-10-24 14:44:24 -070098{
Satya Tangiralae12563c2019-12-17 14:26:29 -080099 struct bio_crypt_ctx *bc = bio_crypt_alloc_ctx(gfp_mask);
100
101 bc->bc_key = key;
102 memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
103 bc->bc_ksm = NULL;
104 bc->bc_keyslot = -1;
105
106 bio->bi_crypt_context = bc;
107}
108
109void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc);
110
111int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
112 struct keyslot_manager *ksm);
113
114struct request;
115bool bio_crypt_should_process(struct request *rq);
116
117static inline bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
118 unsigned int bytes,
119 u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
120{
121 int i = 0;
122 unsigned int inc = bytes >> bc->bc_key->data_unit_size_bits;
123
Eric Biggers75fea5f2020-01-21 09:39:22 -0800124 while (i < BLK_CRYPTO_DUN_ARRAY_SIZE) {
Satya Tangiralae12563c2019-12-17 14:26:29 -0800125 if (bc->bc_dun[i] + inc != next_dun[i])
126 return false;
127 inc = ((bc->bc_dun[i] + inc) < inc);
128 i++;
129 }
130
131 return true;
132}
133
134
135static inline void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
136 unsigned int inc)
137{
138 int i = 0;
139
140 while (inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE) {
141 dun[i] += inc;
142 inc = (dun[i] < inc);
143 i++;
Satya Tangirala138adbb2019-10-24 14:44:24 -0700144 }
145}
146
Satya Tangiralae12563c2019-12-17 14:26:29 -0800147static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
Satya Tangirala138adbb2019-10-24 14:44:24 -0700148{
Satya Tangiralae12563c2019-12-17 14:26:29 -0800149 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
150
151 if (!bc)
152 return;
153
154 bio_crypt_dun_increment(bc->bc_dun,
155 bytes >> bc->bc_key->data_unit_size_bits);
Satya Tangirala138adbb2019-10-24 14:44:24 -0700156}
157
Satya Tangiralae12563c2019-12-17 14:26:29 -0800158bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2);
Satya Tangirala138adbb2019-10-24 14:44:24 -0700159
Satya Tangiralae12563c2019-12-17 14:26:29 -0800160bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
161 struct bio *b_2);
Satya Tangirala138adbb2019-10-24 14:44:24 -0700162
163#else /* CONFIG_BLK_INLINE_ENCRYPTION */
Satya Tangirala138adbb2019-10-24 14:44:24 -0700164static inline int bio_crypt_ctx_init(void)
165{
166 return 0;
167}
168
Satya Tangirala138adbb2019-10-24 14:44:24 -0700169static inline bool bio_has_crypt_ctx(struct bio *bio)
170{
171 return false;
172}
173
Satya Tangiralae12563c2019-12-17 14:26:29 -0800174static inline void bio_crypt_clone(struct bio *dst, struct bio *src,
175 gfp_t gfp_mask) { }
176
Satya Tangirala138adbb2019-10-24 14:44:24 -0700177static inline void bio_crypt_free_ctx(struct bio *bio) { }
178
Satya Tangiralae12563c2019-12-17 14:26:29 -0800179static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) { }
Satya Tangirala138adbb2019-10-24 14:44:24 -0700180
181static inline bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
182{
183 return true;
184}
185
Satya Tangiralae12563c2019-12-17 14:26:29 -0800186static inline bool bio_crypt_ctx_mergeable(struct bio *b_1,
187 unsigned int b1_bytes,
188 struct bio *b_2)
Satya Tangirala138adbb2019-10-24 14:44:24 -0700189{
190 return true;
191}
192
193#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
Satya Tangiralae12563c2019-12-17 14:26:29 -0800194
Eric Biggerse1a94e62020-01-21 09:27:47 -0800195#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
196static inline void bio_set_skip_dm_default_key(struct bio *bio)
197{
198 bio->bi_skip_dm_default_key = true;
199}
200
201static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
202{
203 return bio->bi_skip_dm_default_key;
204}
205
206static inline void bio_clone_skip_dm_default_key(struct bio *dst,
207 const struct bio *src)
208{
209 dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key;
210}
211#else /* CONFIG_DM_DEFAULT_KEY */
212static inline void bio_set_skip_dm_default_key(struct bio *bio)
213{
214}
215
216static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
217{
218 return false;
219}
220
221static inline void bio_clone_skip_dm_default_key(struct bio *dst,
222 const struct bio *src)
223{
224}
225#endif /* !CONFIG_DM_DEFAULT_KEY */
226
Satya Tangirala138adbb2019-10-24 14:44:24 -0700227#endif /* CONFIG_BLOCK */
Satya Tangiralae12563c2019-12-17 14:26:29 -0800228
Satya Tangirala138adbb2019-10-24 14:44:24 -0700229#endif /* __LINUX_BIO_CRYPT_CTX_H */