| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Inline encryption support for fscrypt |
| 4 | * |
| 5 | * Copyright 2019 Google LLC |
| 6 | */ |
| 7 | |
| 8 | /* |
| 9 | * With "inline encryption", the block layer handles the decryption/encryption |
| 10 | * as part of the bio, instead of the filesystem doing the crypto itself via |
| 11 | * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still |
| 12 | * provides the key and IV to use. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/blk-crypto.h> |
| 16 | #include <linux/blkdev.h> |
| 17 | #include <linux/buffer_head.h> |
| 18 | #include <linux/keyslot-manager.h> |
| 19 | |
| 20 | #include "fscrypt_private.h" |
| 21 | |
| 22 | struct fscrypt_blk_crypto_key { |
| 23 | struct blk_crypto_key base; |
| 24 | int num_devs; |
| 25 | struct request_queue *devs[]; |
| 26 | }; |
| 27 | |
| 28 | /* Enable inline encryption for this file if supported. */ |
| 29 | void fscrypt_select_encryption_impl(struct fscrypt_info *ci) |
| 30 | { |
| 31 | const struct inode *inode = ci->ci_inode; |
| 32 | struct super_block *sb = inode->i_sb; |
| 33 | |
| 34 | /* The file must need contents encryption, not filenames encryption */ |
| 35 | if (!S_ISREG(inode->i_mode)) |
| 36 | return; |
| 37 | |
| 38 | /* blk-crypto must implement the needed encryption algorithm */ |
| 39 | if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) |
| 40 | return; |
| 41 | |
| 42 | /* The filesystem must be mounted with -o inlinecrypt */ |
| 43 | if (!sb->s_cop->inline_crypt_enabled || |
| 44 | !sb->s_cop->inline_crypt_enabled(sb)) |
| 45 | return; |
| 46 | |
| 47 | ci->ci_inlinecrypt = true; |
| 48 | } |
| 49 | |
| 50 | int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, |
| 51 | const u8 *raw_key, |
| 52 | unsigned int raw_key_size, |
| 53 | const struct fscrypt_info *ci) |
| 54 | { |
| 55 | const struct inode *inode = ci->ci_inode; |
| 56 | struct super_block *sb = inode->i_sb; |
| 57 | enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; |
| 58 | int num_devs = 1; |
| 59 | int queue_refs = 0; |
| 60 | struct fscrypt_blk_crypto_key *blk_key; |
| 61 | int err; |
| 62 | int i; |
| 63 | |
| 64 | if (sb->s_cop->get_num_devices) |
| 65 | num_devs = sb->s_cop->get_num_devices(sb); |
| 66 | if (WARN_ON(num_devs < 1)) |
| 67 | return -EINVAL; |
| 68 | |
| 69 | blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); |
| 70 | if (!blk_key) |
| 71 | return -ENOMEM; |
| 72 | |
| 73 | blk_key->num_devs = num_devs; |
| 74 | if (num_devs == 1) |
| 75 | blk_key->devs[0] = bdev_get_queue(sb->s_bdev); |
| 76 | else |
| 77 | sb->s_cop->get_devices(sb, blk_key->devs); |
| 78 | |
| 79 | BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > |
| 80 | BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); |
| 81 | |
| 82 | err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, |
| 83 | crypto_mode, sb->s_blocksize); |
| 84 | if (err) { |
| 85 | fscrypt_err(inode, "error %d initializing blk-crypto key", err); |
| 86 | goto fail; |
| 87 | } |
| 88 | |
| 89 | /* |
| 90 | * We have to start using blk-crypto on all the filesystem's devices. |
| 91 | * We also have to save all the request_queue's for later so that the |
| 92 | * key can be evicted from them. This is needed because some keys |
| 93 | * aren't destroyed until after the filesystem was already unmounted |
| 94 | * (namely, the per-mode keys in struct fscrypt_master_key). |
| 95 | */ |
| 96 | for (i = 0; i < num_devs; i++) { |
| 97 | if (!blk_get_queue(blk_key->devs[i])) { |
| 98 | fscrypt_err(inode, "couldn't get request_queue"); |
| 99 | err = -EAGAIN; |
| 100 | goto fail; |
| 101 | } |
| 102 | queue_refs++; |
| 103 | |
| 104 | err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, |
| 105 | blk_key->devs[i]); |
| 106 | if (err) { |
| 107 | fscrypt_err(inode, |
| 108 | "error %d starting to use blk-crypto", err); |
| 109 | goto fail; |
| 110 | } |
| 111 | } |
| 112 | /* |
| 113 | * Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters |
| 114 | * for the per-mode keys, which are shared by multiple inodes.) |
| 115 | */ |
| 116 | smp_store_release(&prep_key->blk_key, blk_key); |
| 117 | return 0; |
| 118 | |
| 119 | fail: |
| 120 | for (i = 0; i < queue_refs; i++) |
| 121 | blk_put_queue(blk_key->devs[i]); |
| 122 | kzfree(blk_key); |
| 123 | return err; |
| 124 | } |
| 125 | |
| 126 | void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) |
| 127 | { |
| 128 | struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; |
| 129 | int i; |
| 130 | |
| 131 | if (blk_key) { |
| 132 | for (i = 0; i < blk_key->num_devs; i++) { |
| 133 | blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); |
| 134 | blk_put_queue(blk_key->devs[i]); |
| 135 | } |
| 136 | kzfree(blk_key); |
| 137 | } |
| 138 | } |
| 139 | |
| 140 | int fscrypt_derive_raw_secret(struct super_block *sb, |
| 141 | const u8 *wrapped_key, |
| 142 | unsigned int wrapped_key_size, |
| 143 | u8 *raw_secret, unsigned int raw_secret_size) |
| 144 | { |
| 145 | struct request_queue *q; |
| 146 | |
| 147 | q = sb->s_bdev->bd_queue; |
| 148 | if (!q->ksm) |
| 149 | return -EOPNOTSUPP; |
| 150 | |
| 151 | return keyslot_manager_derive_raw_secret(q->ksm, |
| 152 | wrapped_key, wrapped_key_size, |
| 153 | raw_secret, raw_secret_size); |
| 154 | } |
| 155 | |
| 156 | /** |
| 157 | * fscrypt_inode_uses_inline_crypto - test whether an inode uses inline |
| 158 | * encryption |
| 159 | * @inode: an inode |
| 160 | * |
| 161 | * Return: true if the inode requires file contents encryption and if the |
| 162 | * encryption should be done in the block layer via blk-crypto rather |
| 163 | * than in the filesystem layer. |
| 164 | */ |
| 165 | bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) |
| 166 | { |
| 167 | return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && |
| 168 | inode->i_crypt_info->ci_inlinecrypt; |
| 169 | } |
| 170 | EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto); |
| 171 | |
| 172 | /** |
| 173 | * fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer |
| 174 | * encryption |
| 175 | * @inode: an inode |
| 176 | * |
| 177 | * Return: true if the inode requires file contents encryption and if the |
| 178 | * encryption should be done in the filesystem layer rather than in the |
| 179 | * block layer via blk-crypto. |
| 180 | */ |
| 181 | bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) |
| 182 | { |
| 183 | return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && |
| 184 | !inode->i_crypt_info->ci_inlinecrypt; |
| 185 | } |
| 186 | EXPORT_SYMBOL_GPL(fscrypt_inode_uses_fs_layer_crypto); |
| 187 | |
| 188 | static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, |
| 189 | u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) |
| 190 | { |
| 191 | union fscrypt_iv iv; |
| 192 | int i; |
| 193 | |
| 194 | fscrypt_generate_iv(&iv, lblk_num, ci); |
| 195 | |
| 196 | BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); |
| 197 | memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); |
| 198 | for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) |
| 199 | dun[i] = le64_to_cpu(iv.dun[i]); |
| 200 | } |
| 201 | |
| 202 | /** |
| 203 | * fscrypt_set_bio_crypt_ctx - prepare a file contents bio for inline encryption |
| 204 | * @bio: a bio which will eventually be submitted to the file |
| 205 | * @inode: the file's inode |
| 206 | * @first_lblk: the first file logical block number in the I/O |
| 207 | * @gfp_mask: memory allocation flags - these must be a waiting mask so that |
| 208 | * bio_crypt_set_ctx can't fail. |
| 209 | * |
| 210 | * If the contents of the file should be encrypted (or decrypted) with inline |
| 211 | * encryption, then assign the appropriate encryption context to the bio. |
| 212 | * |
| 213 | * Normally the bio should be newly allocated (i.e. no pages added yet), as |
| 214 | * otherwise fscrypt_mergeable_bio() won't work as intended. |
| 215 | * |
| 216 | * The encryption context will be freed automatically when the bio is freed. |
| 217 | */ |
| 218 | void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, |
| 219 | u64 first_lblk, gfp_t gfp_mask) |
| 220 | { |
| 221 | const struct fscrypt_info *ci = inode->i_crypt_info; |
| 222 | u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
| 223 | |
| 224 | if (!fscrypt_inode_uses_inline_crypto(inode)) |
| 225 | return; |
| 226 | |
| 227 | fscrypt_generate_dun(ci, first_lblk, dun); |
| 228 | bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask); |
| 229 | } |
| 230 | EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); |
| 231 | |
| 232 | /* Extract the inode and logical block number from a buffer_head. */ |
| 233 | static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, |
| 234 | const struct inode **inode_ret, |
| 235 | u64 *lblk_num_ret) |
| 236 | { |
| 237 | struct page *page = bh->b_page; |
| 238 | const struct address_space *mapping; |
| 239 | const struct inode *inode; |
| 240 | |
| 241 | /* |
| 242 | * The ext4 journal (jbd2) can submit a buffer_head it directly created |
| 243 | * for a non-pagecache page. fscrypt doesn't care about these. |
| 244 | */ |
| 245 | mapping = page_mapping(page); |
| 246 | if (!mapping) |
| 247 | return false; |
| 248 | inode = mapping->host; |
| 249 | |
| 250 | *inode_ret = inode; |
| 251 | *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + |
| 252 | (bh_offset(bh) >> inode->i_blkbits); |
| 253 | return true; |
| 254 | } |
| 255 | |
| 256 | /** |
| 257 | * fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline |
| 258 | * encryption |
| 259 | * @bio: a bio which will eventually be submitted to the file |
| 260 | * @first_bh: the first buffer_head for which I/O will be submitted |
| 261 | * @gfp_mask: memory allocation flags |
| 262 | * |
| 263 | * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead |
| 264 | * of an inode and block number directly. |
| 265 | */ |
| 266 | void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, |
| 267 | const struct buffer_head *first_bh, |
| 268 | gfp_t gfp_mask) |
| 269 | { |
| 270 | const struct inode *inode; |
| 271 | u64 first_lblk; |
| 272 | |
| 273 | if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) |
| 274 | fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); |
| 275 | } |
| 276 | EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); |
| 277 | |
| 278 | /** |
| 279 | * fscrypt_mergeable_bio - test whether data can be added to a bio |
| 280 | * @bio: the bio being built up |
| 281 | * @inode: the inode for the next part of the I/O |
| 282 | * @next_lblk: the next file logical block number in the I/O |
| 283 | * |
| 284 | * When building a bio which may contain data which should undergo inline |
| 285 | * encryption (or decryption) via fscrypt, filesystems should call this function |
| 286 | * to ensure that the resulting bio contains only logically contiguous data. |
| 287 | * This will return false if the next part of the I/O cannot be merged with the |
| 288 | * bio because either the encryption key would be different or the encryption |
| 289 | * data unit numbers would be discontiguous. |
| 290 | * |
| 291 | * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. |
| 292 | * |
| 293 | * Return: true iff the I/O is mergeable |
| 294 | */ |
| 295 | bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, |
| 296 | u64 next_lblk) |
| 297 | { |
| 298 | const struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
| 299 | u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
| 300 | |
| 301 | if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) |
| 302 | return false; |
| 303 | if (!bc) |
| 304 | return true; |
| 305 | |
| 306 | /* |
| 307 | * Comparing the key pointers is good enough, as all I/O for each key |
| 308 | * uses the same pointer. I.e., there's currently no need to support |
| 309 | * merging requests where the keys are the same but the pointers differ. |
| 310 | */ |
| 311 | if (bc->bc_key != &inode->i_crypt_info->ci_key.blk_key->base) |
| 312 | return false; |
| 313 | |
| 314 | fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); |
| 315 | return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); |
| 316 | } |
| 317 | EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); |
| 318 | |
| 319 | /** |
| 320 | * fscrypt_mergeable_bio_bh - test whether data can be added to a bio |
| 321 | * @bio: the bio being built up |
| 322 | * @next_bh: the next buffer_head for which I/O will be submitted |
| 323 | * |
| 324 | * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of |
| 325 | * an inode and block number directly. |
| 326 | * |
| 327 | * Return: true iff the I/O is mergeable |
| 328 | */ |
| 329 | bool fscrypt_mergeable_bio_bh(struct bio *bio, |
| 330 | const struct buffer_head *next_bh) |
| 331 | { |
| 332 | const struct inode *inode; |
| 333 | u64 next_lblk; |
| 334 | |
| 335 | if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) |
| 336 | return !bio->bi_crypt_context; |
| 337 | |
| 338 | return fscrypt_mergeable_bio(bio, inode, next_lblk); |
| 339 | } |
| 340 | EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); |