blob: 75982dabc7a355f29fe8dd8d28c765e8c3583b8c [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Google LLC
4 */
5
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/keyslot-manager.h>
9#include <linux/module.h>
10#include <linux/slab.h>
11
12#include "blk-crypto-internal.h"
13
14static int num_prealloc_crypt_ctxs = 128;
15
16module_param(num_prealloc_crypt_ctxs, int, 0444);
17MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
18 "Number of bio crypto contexts to preallocate");
19
20static struct kmem_cache *bio_crypt_ctx_cache;
21static mempool_t *bio_crypt_ctx_pool;
22
23int __init bio_crypt_ctx_init(void)
24{
25 size_t i;
26
27 bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
28 if (!bio_crypt_ctx_cache)
29 return -ENOMEM;
30
31 bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
32 bio_crypt_ctx_cache);
33 if (!bio_crypt_ctx_pool)
34 return -ENOMEM;
35
36 /* This is assumed in various places. */
37 BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
38
39 /* Sanity check that no algorithm exceeds the defined limits. */
40 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
41 BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
42 BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
43 }
44
45 return 0;
46}
47
48struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
49{
50 return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
51}
52
53void bio_crypt_free_ctx(struct bio *bio)
54{
55 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
56 bio->bi_crypt_context = NULL;
57}
58
59void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
60{
61 const struct bio_crypt_ctx *src_bc = src->bi_crypt_context;
62
63 /*
64 * If a bio is fallback_crypted, then it will be decrypted when
65 * bio_endio is called. As we only want the data to be decrypted once,
66 * copies of the bio must not have have a crypt context.
67 */
68 if (!src_bc || bio_crypt_fallback_crypted(src_bc))
69 return;
70
71 dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
72 *dst->bi_crypt_context = *src_bc;
73
74 if (src_bc->bc_keyslot >= 0)
75 keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot);
76}
77EXPORT_SYMBOL_GPL(bio_crypt_clone);
78
79bool bio_crypt_should_process(struct request *rq)
80{
81 struct bio *bio = rq->bio;
82
83 if (!bio || !bio->bi_crypt_context)
84 return false;
85
86 return rq->q->ksm == bio->bi_crypt_context->bc_ksm;
87}
88EXPORT_SYMBOL_GPL(bio_crypt_should_process);
89
90/*
91 * Checks that two bio crypt contexts are compatible - i.e. that
92 * they are mergeable except for data_unit_num continuity.
93 */
94bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
95{
96 struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
97 struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
98
99 if (bc1 != bc2)
100 return false;
101
102 return !bc1 || bc1->bc_key == bc2->bc_key;
103}
104
105/*
106 * Checks that two bio crypt contexts are compatible, and also
107 * that their data_unit_nums are continuous (and can hence be merged)
108 * in the order b_1 followed by b_2.
109 */
110bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
111 struct bio *b_2)
112{
113 struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
114 struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
115
116 if (!bio_crypt_ctx_compatible(b_1, b_2))
117 return false;
118
119 return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun);
120}
121
122void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc)
123{
124 keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot);
125 bc->bc_ksm = NULL;
126 bc->bc_keyslot = -1;
127}
128
129int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
130 struct keyslot_manager *ksm)
131{
132 int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key);
133
134 if (slot < 0)
135 return slot;
136
137 bc->bc_keyslot = slot;
138 bc->bc_ksm = ksm;
139 return 0;
140}