| rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
 | 2 | /* | 
 | 3 |  * This contains encryption functions for per-file encryption. | 
 | 4 |  * | 
 | 5 |  * Copyright (C) 2015, Google, Inc. | 
 | 6 |  * Copyright (C) 2015, Motorola Mobility | 
 | 7 |  * | 
 | 8 |  * Written by Michael Halcrow, 2014. | 
 | 9 |  * | 
 | 10 |  * Filename encryption additions | 
 | 11 |  *	Uday Savagaonkar, 2014 | 
 | 12 |  * Encryption policy handling additions | 
 | 13 |  *	Ildar Muslukhov, 2014 | 
 | 14 |  * Add fscrypt_pullback_bio_page() | 
 | 15 |  *	Jaegeuk Kim, 2015. | 
 | 16 |  * | 
 | 17 |  * This has not yet undergone a rigorous security audit. | 
 | 18 |  * | 
 | 19 |  * The usage of AES-XTS should conform to recommendations in NIST | 
 | 20 |  * Special Publication 800-38E and IEEE P1619/D16. | 
 | 21 |  */ | 
 | 22 |  | 
 | 23 | #include <linux/pagemap.h> | 
 | 24 | #include <linux/module.h> | 
 | 25 | #include <linux/bio.h> | 
 | 26 | #include <linux/namei.h> | 
 | 27 | #include "fscrypt_private.h" | 
 | 28 |  | 
 | 29 | static void __fscrypt_decrypt_bio(struct bio *bio, bool done) | 
 | 30 | { | 
 | 31 | 	struct bio_vec *bv; | 
 | 32 | 	int i; | 
 | 33 |  | 
 | 34 | 	bio_for_each_segment_all(bv, bio, i) { | 
 | 35 | 		struct page *page = bv->bv_page; | 
 | 36 | 		int ret = fscrypt_decrypt_page(page->mapping->host, page, | 
 | 37 | 				PAGE_SIZE, 0, page->index); | 
 | 38 |  | 
 | 39 | 		if (ret) { | 
 | 40 | 			WARN_ON_ONCE(1); | 
 | 41 | 			SetPageError(page); | 
 | 42 | 		} else if (done) { | 
 | 43 | 			SetPageUptodate(page); | 
 | 44 | 		} | 
 | 45 | 		if (done) | 
 | 46 | 			unlock_page(page); | 
 | 47 | 	} | 
 | 48 | } | 
 | 49 |  | 
 | 50 | void fscrypt_decrypt_bio(struct bio *bio) | 
 | 51 | { | 
 | 52 | 	__fscrypt_decrypt_bio(bio, false); | 
 | 53 | } | 
 | 54 | EXPORT_SYMBOL(fscrypt_decrypt_bio); | 
 | 55 |  | 
 | 56 | static void completion_pages(struct work_struct *work) | 
 | 57 | { | 
 | 58 | 	struct fscrypt_ctx *ctx = | 
 | 59 | 		container_of(work, struct fscrypt_ctx, r.work); | 
 | 60 | 	struct bio *bio = ctx->r.bio; | 
 | 61 |  | 
 | 62 | 	__fscrypt_decrypt_bio(bio, true); | 
 | 63 | 	fscrypt_release_ctx(ctx); | 
 | 64 | 	bio_put(bio); | 
 | 65 | } | 
 | 66 |  | 
 | 67 | void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) | 
 | 68 | { | 
 | 69 | 	INIT_WORK(&ctx->r.work, completion_pages); | 
 | 70 | 	ctx->r.bio = bio; | 
 | 71 | 	fscrypt_enqueue_decrypt_work(&ctx->r.work); | 
 | 72 | } | 
 | 73 | EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); | 
 | 74 |  | 
 | 75 | void fscrypt_pullback_bio_page(struct page **page, bool restore) | 
 | 76 | { | 
 | 77 | 	struct fscrypt_ctx *ctx; | 
 | 78 | 	struct page *bounce_page; | 
 | 79 |  | 
 | 80 | 	/* The bounce data pages are unmapped. */ | 
 | 81 | 	if ((*page)->mapping) | 
 | 82 | 		return; | 
 | 83 |  | 
 | 84 | 	/* The bounce data page is unmapped. */ | 
 | 85 | 	bounce_page = *page; | 
 | 86 | 	ctx = (struct fscrypt_ctx *)page_private(bounce_page); | 
 | 87 |  | 
 | 88 | 	/* restore control page */ | 
 | 89 | 	*page = ctx->w.control_page; | 
 | 90 |  | 
 | 91 | 	if (restore) | 
 | 92 | 		fscrypt_restore_control_page(bounce_page); | 
 | 93 | } | 
 | 94 | EXPORT_SYMBOL(fscrypt_pullback_bio_page); | 
 | 95 |  | 
 | 96 | int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | 
 | 97 | 				sector_t pblk, unsigned int len) | 
 | 98 | { | 
 | 99 | 	struct fscrypt_ctx *ctx; | 
 | 100 | 	struct page *ciphertext_page = NULL; | 
 | 101 | 	struct bio *bio; | 
 | 102 | 	int ret, err = 0; | 
 | 103 |  | 
 | 104 | 	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); | 
 | 105 |  | 
 | 106 | 	ctx = fscrypt_get_ctx(inode, GFP_NOFS); | 
 | 107 | 	if (IS_ERR(ctx)) | 
 | 108 | 		return PTR_ERR(ctx); | 
 | 109 |  | 
 | 110 | 	ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT); | 
 | 111 | 	if (IS_ERR(ciphertext_page)) { | 
 | 112 | 		err = PTR_ERR(ciphertext_page); | 
 | 113 | 		goto errout; | 
 | 114 | 	} | 
 | 115 |  | 
 | 116 | 	while (len--) { | 
 | 117 | 		err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk, | 
 | 118 | 					     ZERO_PAGE(0), ciphertext_page, | 
 | 119 | 					     PAGE_SIZE, 0, GFP_NOFS); | 
 | 120 | 		if (err) | 
 | 121 | 			goto errout; | 
 | 122 |  | 
 | 123 | 		bio = bio_alloc(GFP_NOWAIT, 1); | 
 | 124 | 		if (!bio) { | 
 | 125 | 			err = -ENOMEM; | 
 | 126 | 			goto errout; | 
 | 127 | 		} | 
 | 128 | 		bio_set_dev(bio, inode->i_sb->s_bdev); | 
 | 129 | 		bio->bi_iter.bi_sector = | 
 | 130 | 			pblk << (inode->i_sb->s_blocksize_bits - 9); | 
 | 131 | 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 
 | 132 | 		ret = bio_add_page(bio, ciphertext_page, | 
 | 133 | 					inode->i_sb->s_blocksize, 0); | 
 | 134 | 		if (ret != inode->i_sb->s_blocksize) { | 
 | 135 | 			/* should never happen! */ | 
 | 136 | 			WARN_ON(1); | 
 | 137 | 			bio_put(bio); | 
 | 138 | 			err = -EIO; | 
 | 139 | 			goto errout; | 
 | 140 | 		} | 
 | 141 | 		err = submit_bio_wait(bio); | 
 | 142 | 		if (err == 0 && bio->bi_status) | 
 | 143 | 			err = -EIO; | 
 | 144 | 		bio_put(bio); | 
 | 145 | 		if (err) | 
 | 146 | 			goto errout; | 
 | 147 | 		lblk++; | 
 | 148 | 		pblk++; | 
 | 149 | 	} | 
 | 150 | 	err = 0; | 
 | 151 | errout: | 
 | 152 | 	fscrypt_release_ctx(ctx); | 
 | 153 | 	return err; | 
 | 154 | } | 
 | 155 | EXPORT_SYMBOL(fscrypt_zeroout_range); |