aboutsummaryrefslogtreecommitdiff
path: root/block/blk-crypto.c
blob: f56bbec1132f515ab224b1d5a49f286e67bb4a0e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright 2019 Google LLC
 */

/*
 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
 */

#define pr_fmt(fmt) "blk-crypto: " fmt

#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/keyslot-manager.h>
#include <linux/random.h>
#include <linux/siphash.h>

#include "blk-crypto-internal.h"

const struct blk_crypto_mode blk_crypto_modes[] = {
	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
		.cipher_str = "xts(aes)",
		.keysize = 64,
		.ivsize = 16,
	},
	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
		.cipher_str = "essiv(cbc(aes),sha256)",
		.keysize = 16,
		.ivsize = 16,
	},
	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
		.cipher_str = "adiantum(xchacha12,aes)",
		.keysize = 32,
		.ivsize = 32,
	},
};

/* Check that all I/O segments are data unit aligned */
static int bio_crypt_check_alignment(struct bio *bio)
{
	const unsigned int data_unit_size =
				bio->bi_crypt_context->bc_key->data_unit_size;
	struct bvec_iter iter;
	struct bio_vec bv;

	bio_for_each_segment(bv, bio, iter) {
		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
			return -EIO;
	}
	return 0;
}

/**
 * blk_crypto_submit_bio - handle submitting bio for inline encryption
 *
 * @bio_ptr: pointer to original bio pointer
 *
 * If the bio doesn't have inline encryption enabled or the submitter already
 * specified a keyslot for the target device, do nothing.  Else, a raw key must
 * have been provided, so acquire a device keyslot for it if supported.  Else,
 * use the crypto API fallback.
 *
 * When the crypto API fallback is used for encryption, blk-crypto may choose to
 * split the bio into 2 - the first one that will continue to be processed and
 * the second one that will be resubmitted via generic_make_request.
 * A bounce bio will be allocated to encrypt the contents of the aforementioned
 * "first one", and *bio_ptr will be updated to this bounce bio.
 *
 * Return: 0 if bio submission should continue; nonzero if bio_endio() was
 *	   already called so bio submission should abort.
 */
int blk_crypto_submit_bio(struct bio **bio_ptr)
{
	struct bio *bio = *bio_ptr;
	struct request_queue *q;
	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
	int err;

	if (!bc || !bio_has_data(bio))
		return 0;

	/*
	 * When a read bio is marked for fallback decryption, its bi_iter is
	 * saved so that when we decrypt the bio later, we know what part of it
	 * was marked for fallback decryption (when the bio is passed down after
	 * blk_crypto_submit bio, it may be split or advanced so we cannot rely
	 * on the bi_iter while decrypting in blk_crypto_endio)
	 */
	if (bio_crypt_fallback_crypted(bc))
		return 0;

	err = bio_crypt_check_alignment(bio);
	if (err) {
		bio->bi_status = BLK_STS_IOERR;
		goto out;
	}

	q = bio->bi_disk->queue;

	if (bc->bc_ksm) {
		/* Key already programmed into device? */
		if (q->ksm == bc->bc_ksm)
			return 0;

		/* Nope, release the existing keyslot. */
		bio_crypt_ctx_release_keyslot(bc);
	}

	/* Get device keyslot if supported */
	if (keyslot_manager_crypto_mode_supported(q->ksm,
						  bc->bc_key->crypto_mode,
						  bc->bc_key->data_unit_size,
						  bc->bc_key->is_hw_wrapped)) {
		err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
		if (!err)
			return 0;

		pr_warn_once("Failed to acquire keyslot for %s (err=%d).  Falling back to crypto API.\n",
			     bio->bi_disk->disk_name, err);
	}

	/* Fallback to crypto API */
	err = blk_crypto_fallback_submit_bio(bio_ptr);
	if (err)
		goto out;

	return 0;
out:
	bio_endio(*bio_ptr);
	return err;
}

/**
 * blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio
 *
 * @bio: the bio to clean up
 *
 * If blk_crypto_submit_bio decided to fallback to crypto API for this bio,
 * we queue the bio for decryption into a workqueue and return false,
 * and call bio_endio(bio) at a later time (after the bio has been decrypted).
 *
 * If the bio is not to be decrypted by the crypto API, this function releases
 * the reference to the keyslot that blk_crypto_submit_bio got.
 *
 * Return: true if bio_endio should continue; false otherwise (bio_endio will
 * be called again when bio has been decrypted).
 */
bool blk_crypto_endio(struct bio *bio)
{
	struct bio_crypt_ctx *bc = bio->bi_crypt_context;

	if (!bc)
		return true;

	if (bio_crypt_fallback_crypted(bc)) {
		/*
		 * The only bios who's crypto is handled by the blk-crypto
		 * fallback when they reach here are those with
		 * bio_data_dir(bio) == READ, since WRITE bios that are
		 * encrypted by the crypto API fallback are handled by
		 * blk_crypto_encrypt_endio().
		 */
		return !blk_crypto_queue_decrypt_bio(bio);
	}

	if (bc->bc_keyslot >= 0)
		bio_crypt_ctx_release_keyslot(bc);

	return true;
}

/**
 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
 * @blk_key: Pointer to the blk_crypto_key to initialize.
 * @raw_key: Pointer to the raw key.
 * @raw_key_size: Size of raw key.  Must be at least the required size for the
 *                chosen @crypto_mode; see blk_crypto_modes[].  (It's allowed
 *                to be longer than the mode's actual key size, in order to
 *                support inline encryption hardware that accepts wrapped keys.
 *                @is_hw_wrapped has to be set for such keys)
 * @is_hw_wrapped: Denotes @raw_key is wrapped.
 * @crypto_mode: identifier for the encryption algorithm to use
 * @data_unit_size: the data unit size to use for en/decryption
 *
 * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error.  When
 *	   done using the key, it must be freed with blk_crypto_free_key().
 */
int blk_crypto_init_key(struct blk_crypto_key *blk_key,
			const u8 *raw_key, unsigned int raw_key_size,
			bool is_hw_wrapped,
			enum blk_crypto_mode_num crypto_mode,
			unsigned int data_unit_size)
{
	const struct blk_crypto_mode *mode;
	static siphash_key_t hash_key;

	memset(blk_key, 0, sizeof(*blk_key));

	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
		return -EINVAL;

	BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);

	mode = &blk_crypto_modes[crypto_mode];
	if (is_hw_wrapped) {
		if (raw_key_size < mode->keysize ||
		    raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
			return -EINVAL;
	} else {
		if (raw_key_size != mode->keysize)
			return -EINVAL;
	}

	if (!is_power_of_2(data_unit_size))
		return -EINVAL;

	blk_key->crypto_mode = crypto_mode;
	blk_key->data_unit_size = data_unit_size;
	blk_key->data_unit_size_bits = ilog2(data_unit_size);
	blk_key->size = raw_key_size;
	blk_key->is_hw_wrapped = is_hw_wrapped;
	memcpy(blk_key->raw, raw_key, raw_key_size);

	/*
	 * The keyslot manager uses the SipHash of the key to implement O(1) key
	 * lookups while avoiding leaking information about the keys.  It's
	 * precomputed here so that it only needs to be computed once per key.
	 */
	get_random_once(&hash_key, sizeof(hash_key));
	blk_key->hash = siphash(raw_key, raw_key_size, &hash_key);

	return 0;
}
EXPORT_SYMBOL_GPL(blk_crypto_init_key);

/**
 * blk_crypto_start_using_mode() - Start using blk-crypto on a device
 * @crypto_mode: the crypto mode that will be used
 * @data_unit_size: the data unit size that will be used
 * @is_hw_wrapped_key: whether the key will be hardware-wrapped
 * @q: the request queue for the device
 *
 * Upper layers must call this function to ensure that either the hardware
 * supports the needed crypto settings, or the crypto API fallback has
 * transforms for the needed mode allocated and ready to go.
 *
 * Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto
 *	   settings and blk-crypto-fallback is either disabled or the needed
 *	   algorithm is disabled in the crypto API; or another -errno code.
 */
int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode,
				unsigned int data_unit_size,
				bool is_hw_wrapped_key,
				struct request_queue *q)
{
	if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode,
						  data_unit_size,
						  is_hw_wrapped_key))
		return 0;
	if (is_hw_wrapped_key) {
		pr_warn_once("hardware doesn't support wrapped keys\n");
		return -EOPNOTSUPP;
	}
	return blk_crypto_fallback_start_using_mode(crypto_mode);
}
EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);

/**
 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
 *			    it may have been programmed into
 * @q: The request queue who's keyslot manager this key might have been
 *     programmed into
 * @key: The key to evict
 *
 * Upper layers (filesystems) should call this function to ensure that a key
 * is evicted from hardware that it might have been programmed into. This
 * will call keyslot_manager_evict_key on the queue's keyslot manager, if one
 * exists, and supports the crypto algorithm with the specified data unit size.
 * Otherwise, it will evict the key from the blk-crypto-fallback's ksm.
 *
 * Return: 0 on success, -err on error.
 */
int blk_crypto_evict_key(struct request_queue *q,
			 const struct blk_crypto_key *key)
{
	if (q->ksm &&
	    keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
						  key->data_unit_size,
						  key->is_hw_wrapped))
		return keyslot_manager_evict_key(q->ksm, key);

	return blk_crypto_fallback_evict_key(key);
}
EXPORT_SYMBOL_GPL(blk_crypto_evict_key);