2015-05-16 07:26:10 +08:00
|
|
|
/*
|
|
|
|
* This contains encryption functions for per-file encryption.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2015, Google, Inc.
|
|
|
|
* Copyright (C) 2015, Motorola Mobility
|
|
|
|
*
|
|
|
|
* Written by Michael Halcrow, 2014.
|
|
|
|
*
|
|
|
|
* Filename encryption additions
|
|
|
|
* Uday Savagaonkar, 2014
|
|
|
|
* Encryption policy handling additions
|
|
|
|
* Ildar Muslukhov, 2014
|
|
|
|
* Add fscrypt_pullback_bio_page()
|
|
|
|
* Jaegeuk Kim, 2015.
|
|
|
|
*
|
|
|
|
* This has not yet undergone a rigorous security audit.
|
|
|
|
*
|
|
|
|
* The usage of AES-XTS should conform to recommendations in NIST
|
|
|
|
* Special Publication 800-38E and IEEE P1619/D16.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/mempool.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
#include <linux/ratelimit.h>
|
|
|
|
#include <linux/dcache.h>
|
2016-04-13 07:05:36 +08:00
|
|
|
#include <linux/namei.h>
|
2017-06-19 15:27:58 +08:00
|
|
|
#include <crypto/aes.h>
|
2018-01-06 02:45:00 +08:00
|
|
|
#include <crypto/skcipher.h>
|
2016-11-27 11:05:18 +08:00
|
|
|
#include "fscrypt_private.h"
|
2015-05-16 07:26:10 +08:00
|
|
|
|
|
|
|
static unsigned int num_prealloc_crypto_pages = 32;
|
|
|
|
static unsigned int num_prealloc_crypto_ctxs = 128;
|
|
|
|
|
|
|
|
module_param(num_prealloc_crypto_pages, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(num_prealloc_crypto_pages,
|
|
|
|
"Number of crypto pages to preallocate");
|
|
|
|
module_param(num_prealloc_crypto_ctxs, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
|
|
|
|
"Number of crypto contexts to preallocate");
|
|
|
|
|
|
|
|
static mempool_t *fscrypt_bounce_page_pool = NULL;
|
|
|
|
|
|
|
|
static LIST_HEAD(fscrypt_free_ctxs);
|
|
|
|
static DEFINE_SPINLOCK(fscrypt_ctx_lock);
|
|
|
|
|
2016-12-19 19:25:32 +08:00
|
|
|
struct workqueue_struct *fscrypt_read_workqueue;
|
2015-05-16 07:26:10 +08:00
|
|
|
static DEFINE_MUTEX(fscrypt_init_mutex);
|
|
|
|
|
|
|
|
static struct kmem_cache *fscrypt_ctx_cachep;
|
|
|
|
struct kmem_cache *fscrypt_info_cachep;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fscrypt_release_ctx() - Releases an encryption context
|
|
|
|
* @ctx: The encryption context to release.
|
|
|
|
*
|
|
|
|
* If the encryption context was allocated from the pre-allocated pool, returns
|
|
|
|
* it to that pool. Else, frees it.
|
|
|
|
*
|
|
|
|
* If there's a bounce page in the context, this frees that.
|
|
|
|
*/
|
|
|
|
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2016-12-07 06:53:58 +08:00
|
|
|
if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
|
2015-05-16 07:26:10 +08:00
|
|
|
mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
|
|
|
|
ctx->w.bounce_page = NULL;
|
|
|
|
}
|
|
|
|
ctx->w.control_page = NULL;
|
|
|
|
if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
|
|
|
|
kmem_cache_free(fscrypt_ctx_cachep, ctx);
|
|
|
|
} else {
|
|
|
|
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
|
|
|
|
list_add(&ctx->free_list, &fscrypt_free_ctxs);
|
|
|
|
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(fscrypt_release_ctx);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fscrypt_get_ctx() - Gets an encryption context
|
|
|
|
* @inode: The inode for which we are doing the crypto
|
2016-04-12 06:51:57 +08:00
|
|
|
* @gfp_flags: The gfp flag for memory allocation
|
2015-05-16 07:26:10 +08:00
|
|
|
*
|
|
|
|
* Allocates and initializes an encryption context.
|
|
|
|
*
|
|
|
|
* Return: An allocated and initialized encryption context on success; error
|
|
|
|
* value or NULL otherwise.
|
|
|
|
*/
|
2016-11-14 05:20:47 +08:00
|
|
|
struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
|
2015-05-16 07:26:10 +08:00
|
|
|
{
|
|
|
|
struct fscrypt_ctx *ctx = NULL;
|
|
|
|
struct fscrypt_info *ci = inode->i_crypt_info;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (ci == NULL)
|
|
|
|
return ERR_PTR(-ENOKEY);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We first try getting the ctx from a free list because in
|
|
|
|
* the common case the ctx will have an allocated and
|
|
|
|
* initialized crypto tfm, so it's probably a worthwhile
|
|
|
|
* optimization. For the bounce page, we first try getting it
|
|
|
|
* from the kernel allocator because that's just about as fast
|
|
|
|
* as getting it from a list and because a cache of free pages
|
|
|
|
* should generally be a "last resort" option for a filesystem
|
|
|
|
* to be able to do its job.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
|
|
|
|
ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
|
|
|
|
struct fscrypt_ctx, free_list);
|
|
|
|
if (ctx)
|
|
|
|
list_del(&ctx->free_list);
|
|
|
|
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
|
|
|
|
if (!ctx) {
|
2016-04-12 06:51:57 +08:00
|
|
|
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
|
2015-05-16 07:26:10 +08:00
|
|
|
if (!ctx)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
|
|
|
|
} else {
|
|
|
|
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
|
|
|
|
}
|
2016-12-07 06:53:58 +08:00
|
|
|
ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
|
2015-05-16 07:26:10 +08:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(fscrypt_get_ctx);
|
|
|
|
|
2016-12-19 19:25:32 +08:00
|
|
|
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
|
|
|
|
u64 lblk_num, struct page *src_page,
|
|
|
|
struct page *dest_page, unsigned int len,
|
|
|
|
unsigned int offs, gfp_t gfp_flags)
|
2015-05-16 07:26:10 +08:00
|
|
|
{
|
2016-10-13 11:30:16 +08:00
|
|
|
struct {
|
|
|
|
__le64 index;
|
2017-06-19 15:27:58 +08:00
|
|
|
u8 padding[FS_IV_SIZE - sizeof(__le64)];
|
|
|
|
} iv;
|
2016-03-22 02:03:02 +08:00
|
|
|
struct skcipher_request *req = NULL;
|
2017-10-18 15:00:44 +08:00
|
|
|
DECLARE_CRYPTO_WAIT(wait);
|
2015-05-16 07:26:10 +08:00
|
|
|
struct scatterlist dst, src;
|
|
|
|
struct fscrypt_info *ci = inode->i_crypt_info;
|
2016-03-22 02:03:02 +08:00
|
|
|
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
2015-05-16 07:26:10 +08:00
|
|
|
int res = 0;
|
|
|
|
|
2016-12-07 06:53:55 +08:00
|
|
|
BUG_ON(len == 0);
|
|
|
|
|
2017-06-19 15:27:58 +08:00
|
|
|
BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
|
|
|
|
BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
|
|
|
|
iv.index = cpu_to_le64(lblk_num);
|
|
|
|
memset(iv.padding, 0, sizeof(iv.padding));
|
|
|
|
|
|
|
|
if (ci->ci_essiv_tfm != NULL) {
|
|
|
|
crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
|
|
|
|
(u8 *)&iv);
|
|
|
|
}
|
|
|
|
|
2016-04-12 06:51:57 +08:00
|
|
|
req = skcipher_request_alloc(tfm, gfp_flags);
|
2018-05-01 06:51:38 +08:00
|
|
|
if (!req)
|
2015-05-16 07:26:10 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-03-22 02:03:02 +08:00
|
|
|
skcipher_request_set_callback(
|
2015-05-16 07:26:10 +08:00
|
|
|
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
2017-10-18 15:00:44 +08:00
|
|
|
crypto_req_done, &wait);
|
2015-05-16 07:26:10 +08:00
|
|
|
|
|
|
|
sg_init_table(&dst, 1);
|
2016-12-07 06:53:55 +08:00
|
|
|
sg_set_page(&dst, dest_page, len, offs);
|
2015-05-16 07:26:10 +08:00
|
|
|
sg_init_table(&src, 1);
|
2016-12-07 06:53:55 +08:00
|
|
|
sg_set_page(&src, src_page, len, offs);
|
2017-06-19 15:27:58 +08:00
|
|
|
skcipher_request_set_crypt(req, &src, &dst, len, &iv);
|
2015-05-16 07:26:10 +08:00
|
|
|
if (rw == FS_DECRYPT)
|
2017-10-18 15:00:44 +08:00
|
|
|
res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
|
2015-05-16 07:26:10 +08:00
|
|
|
else
|
2017-10-18 15:00:44 +08:00
|
|
|
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
|
2016-03-22 02:03:02 +08:00
|
|
|
skcipher_request_free(req);
|
2015-05-16 07:26:10 +08:00
|
|
|
if (res) {
|
|
|
|
printk_ratelimited(KERN_ERR
|
2016-03-22 02:03:02 +08:00
|
|
|
"%s: crypto_skcipher_encrypt() returned %d\n",
|
2015-05-16 07:26:10 +08:00
|
|
|
__func__, res);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-19 19:25:32 +08:00
|
|
|
struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
|
|
|
|
gfp_t gfp_flags)
|
2015-05-16 07:26:10 +08:00
|
|
|
{
|
2016-04-12 06:51:57 +08:00
|
|
|
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
|
2015-05-16 07:26:10 +08:00
|
|
|
if (ctx->w.bounce_page == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-12-07 06:53:58 +08:00
|
|
|
ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
|
2015-05-16 07:26:10 +08:00
|
|
|
return ctx->w.bounce_page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fscypt_encrypt_page() - Encrypts a page
|
2016-12-07 06:53:55 +08:00
|
|
|
* @inode: The inode for which the encryption should take place
|
|
|
|
* @page: The page to encrypt. Must be locked for bounce-page
|
|
|
|
* encryption.
|
|
|
|
* @len: Length of data to encrypt in @page and encrypted
|
|
|
|
* data in returned page.
|
|
|
|
* @offs: Offset of data within @page and returned
|
|
|
|
* page holding encrypted data.
|
|
|
|
* @lblk_num: Logical block number. This must be unique for multiple
|
|
|
|
* calls with same inode, except when overwriting
|
|
|
|
* previously written data.
|
|
|
|
* @gfp_flags: The gfp flag for memory allocation
|
2015-05-16 07:26:10 +08:00
|
|
|
*
|
2016-12-07 06:53:55 +08:00
|
|
|
* Encrypts @page using the ctx encryption context. Performs encryption
|
|
|
|
* either in-place or into a newly allocated bounce page.
|
|
|
|
* Called on the page write path.
|
2015-05-16 07:26:10 +08:00
|
|
|
*
|
2016-12-07 06:53:55 +08:00
|
|
|
* Bounce page allocation is the default.
|
|
|
|
* In this case, the contents of @page are encrypted and stored in an
|
|
|
|
* allocated bounce page. @page has to be locked and the caller must call
|
2015-05-16 07:26:10 +08:00
|
|
|
* fscrypt_restore_control_page() on the returned ciphertext page to
|
|
|
|
* release the bounce buffer and the encryption context.
|
|
|
|
*
|
2016-12-07 06:53:56 +08:00
|
|
|
* In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
|
2016-12-07 06:53:55 +08:00
|
|
|
* fscrypt_operations. Here, the input-page is returned with its content
|
|
|
|
* encrypted.
|
|
|
|
*
|
|
|
|
* Return: A page with the encrypted content on success. Else, an
|
2015-05-16 07:26:10 +08:00
|
|
|
* error value or NULL.
|
|
|
|
*/
|
2016-11-14 05:20:47 +08:00
|
|
|
struct page *fscrypt_encrypt_page(const struct inode *inode,
|
2016-12-07 06:53:55 +08:00
|
|
|
struct page *page,
|
|
|
|
unsigned int len,
|
|
|
|
unsigned int offs,
|
|
|
|
u64 lblk_num, gfp_t gfp_flags)
|
2016-11-14 05:20:46 +08:00
|
|
|
|
2015-05-16 07:26:10 +08:00
|
|
|
{
|
|
|
|
struct fscrypt_ctx *ctx;
|
2016-12-07 06:53:55 +08:00
|
|
|
struct page *ciphertext_page = page;
|
2015-05-16 07:26:10 +08:00
|
|
|
int err;
|
|
|
|
|
2016-12-07 06:53:55 +08:00
|
|
|
BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
|
2015-05-16 07:26:10 +08:00
|
|
|
|
2016-12-07 06:53:56 +08:00
|
|
|
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
|
2016-12-07 06:53:54 +08:00
|
|
|
/* with inplace-encryption we just encrypt the page */
|
2016-12-19 19:25:32 +08:00
|
|
|
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
|
|
|
|
ciphertext_page, len, offs,
|
|
|
|
gfp_flags);
|
2016-12-07 06:53:54 +08:00
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
return ciphertext_page;
|
|
|
|
}
|
|
|
|
|
2016-12-07 06:53:56 +08:00
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
|
|
2016-04-12 06:51:57 +08:00
|
|
|
ctx = fscrypt_get_ctx(inode, gfp_flags);
|
2015-05-16 07:26:10 +08:00
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return (struct page *)ctx;
|
|
|
|
|
2016-12-07 06:53:54 +08:00
|
|
|
/* The encryption operation will require a bounce page. */
|
2016-12-19 19:25:32 +08:00
|
|
|
ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
|
2016-12-07 06:53:54 +08:00
|
|
|
if (IS_ERR(ciphertext_page))
|
|
|
|
goto errout;
|
2015-05-16 07:26:10 +08:00
|
|
|
|
2016-12-07 06:53:55 +08:00
|
|
|
ctx->w.control_page = page;
|
2016-12-19 19:25:32 +08:00
|
|
|
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
|
|
|
|
page, ciphertext_page, len, offs,
|
|
|
|
gfp_flags);
|
2015-05-16 07:26:10 +08:00
|
|
|
if (err) {
|
|
|
|
ciphertext_page = ERR_PTR(err);
|
|
|
|
goto errout;
|
|
|
|
}
|
2016-12-07 06:53:54 +08:00
|
|
|
SetPagePrivate(ciphertext_page);
|
|
|
|
set_page_private(ciphertext_page, (unsigned long)ctx);
|
|
|
|
lock_page(ciphertext_page);
|
2015-05-16 07:26:10 +08:00
|
|
|
return ciphertext_page;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
fscrypt_release_ctx(ctx);
|
|
|
|
return ciphertext_page;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(fscrypt_encrypt_page);
|
|
|
|
|
|
|
|
/**
|
2016-11-14 05:20:46 +08:00
|
|
|
* fscrypt_decrypt_page() - Decrypts a page in-place
|
2016-12-07 06:53:55 +08:00
|
|
|
* @inode: The corresponding inode for the page to decrypt.
|
|
|
|
* @page: The page to decrypt. Must be locked in case
|
2016-12-07 06:53:56 +08:00
|
|
|
* it is a writeback page (FS_CFLG_OWN_PAGES unset).
|
2016-12-07 06:53:55 +08:00
|
|
|
* @len: Number of bytes in @page to be decrypted.
|
|
|
|
* @offs: Start of data in @page.
|
|
|
|
* @lblk_num: Logical block number.
|
2015-05-16 07:26:10 +08:00
|
|
|
*
|
|
|
|
* Decrypts page in-place using the ctx encryption context.
|
|
|
|
*
|
|
|
|
* Called from the read completion callback.
|
|
|
|
*
|
|
|
|
* Return: Zero on success, non-zero otherwise.
|
|
|
|
*/
|
2016-11-14 05:20:47 +08:00
|
|
|
int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
|
2016-12-07 06:53:55 +08:00
|
|
|
unsigned int len, unsigned int offs, u64 lblk_num)
|
2015-05-16 07:26:10 +08:00
|
|
|
{
|
2016-12-07 06:53:56 +08:00
|
|
|
if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
|
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
|
|
2016-12-19 19:25:32 +08:00
|
|
|
return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
|
|
|
|
len, offs, GFP_NOFS);
|
2015-05-16 07:26:10 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(fscrypt_decrypt_page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate dentries for encrypted directories to make sure we aren't
|
|
|
|
* potentially caching stale data after a key has been added or
|
|
|
|
* removed.
|
|
|
|
*/
|
|
|
|
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
|
|
|
|
{
|
2016-04-12 06:10:11 +08:00
|
|
|
struct dentry *dir;
|
2015-05-16 07:26:10 +08:00
|
|
|
int dir_has_key, cached_with_key;
|
|
|
|
|
2016-04-13 07:05:36 +08:00
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
|
2016-04-12 06:10:11 +08:00
|
|
|
dir = dget_parent(dentry);
|
2017-10-10 03:15:36 +08:00
|
|
|
if (!IS_ENCRYPTED(d_inode(dir))) {
|
2016-04-12 06:10:11 +08:00
|
|
|
dput(dir);
|
2015-05-16 07:26:10 +08:00
|
|
|
return 0;
|
2016-04-12 06:10:11 +08:00
|
|
|
}
|
2015-05-16 07:26:10 +08:00
|
|
|
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
|
cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
|
|
|
|
spin_unlock(&dentry->d_lock);
|
fscrypt: remove broken support for detecting keyring key revocation
Filesystem encryption ostensibly supported revoking a keyring key that
had been used to "unlock" encrypted files, causing those files to become
"locked" again. This was, however, buggy for several reasons, the most
severe of which was that when key revocation happened to be detected for
an inode, its fscrypt_info was immediately freed, even while other
threads could be using it for encryption or decryption concurrently.
This could be exploited to crash the kernel or worse.
This patch fixes the use-after-free by removing the code which detects
the keyring key having been revoked, invalidated, or expired. Instead,
an encrypted inode that is "unlocked" now simply remains unlocked until
it is evicted from memory. Note that this is no worse than the case for
block device-level encryption, e.g. dm-crypt, and it still remains
possible for a privileged user to evict unused pages, inodes, and
dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by
simply unmounting the filesystem. In fact, one of those actions was
already needed anyway for key revocation to work even somewhat sanely.
This change is not expected to break any applications.
In the future I'd like to implement a real API for fscrypt key
revocation that interacts sanely with ongoing filesystem operations ---
waiting for existing operations to complete and blocking new operations,
and invalidating and sanitizing key material and plaintext from the VFS
caches. But this is a hard problem, and for now this bug must be fixed.
This bug affected almost all versions of ext4, f2fs, and ubifs
encryption, and it was potentially reachable in any kernel configured
with encryption support (CONFIG_EXT4_ENCRYPTION=y,
CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or
CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the
shared fs/crypto/ code, but due to the potential security implications
of this bug, it may still be worthwhile to backport this fix to them.
Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode")
Cc: stable@vger.kernel.org # v4.2+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Acked-by: Michael Halcrow <mhalcrow@google.com>
2017-02-22 07:07:11 +08:00
|
|
|
dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
|
2016-04-12 06:10:11 +08:00
|
|
|
dput(dir);
|
2015-05-16 07:26:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the dentry was cached without the key, and it is a
|
|
|
|
* negative dentry, it might be a valid name. We can't check
|
|
|
|
* if the key has since been made available due to locking
|
|
|
|
* reasons, so we fail the validation so ext4_lookup() can do
|
|
|
|
* this check.
|
|
|
|
*
|
|
|
|
* We also fail the validation if the dentry was created with
|
|
|
|
* the key present, but we no longer have the key, or vice versa.
|
|
|
|
*/
|
|
|
|
if ((!cached_with_key && d_is_negative(dentry)) ||
|
|
|
|
(!cached_with_key && dir_has_key) ||
|
|
|
|
(cached_with_key && !dir_has_key))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct dentry_operations fscrypt_d_ops = {
|
|
|
|
.d_revalidate = fscrypt_d_revalidate,
|
|
|
|
};
|
|
|
|
|
|
|
|
void fscrypt_restore_control_page(struct page *page)
|
|
|
|
{
|
|
|
|
struct fscrypt_ctx *ctx;
|
|
|
|
|
|
|
|
ctx = (struct fscrypt_ctx *)page_private(page);
|
|
|
|
set_page_private(page, (unsigned long)NULL);
|
|
|
|
ClearPagePrivate(page);
|
|
|
|
unlock_page(page);
|
|
|
|
fscrypt_release_ctx(ctx);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(fscrypt_restore_control_page);
|
|
|
|
|
|
|
|
static void fscrypt_destroy(void)
|
|
|
|
{
|
|
|
|
struct fscrypt_ctx *pos, *n;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
|
|
|
|
kmem_cache_free(fscrypt_ctx_cachep, pos);
|
|
|
|
INIT_LIST_HEAD(&fscrypt_free_ctxs);
|
|
|
|
mempool_destroy(fscrypt_bounce_page_pool);
|
|
|
|
fscrypt_bounce_page_pool = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fscrypt_initialize() - allocate major buffers for fs encryption.
|
2016-12-07 06:53:57 +08:00
|
|
|
* @cop_flags: fscrypt operations flags
|
2015-05-16 07:26:10 +08:00
|
|
|
*
|
|
|
|
* We only call this when we start accessing encrypted files, since it
|
|
|
|
* results in memory getting allocated that wouldn't otherwise be used.
|
|
|
|
*
|
|
|
|
* Return: Zero on success, non-zero otherwise.
|
|
|
|
*/
|
2016-12-07 06:53:57 +08:00
|
|
|
int fscrypt_initialize(unsigned int cop_flags)
|
2015-05-16 07:26:10 +08:00
|
|
|
{
|
|
|
|
int i, res = -ENOMEM;
|
|
|
|
|
2017-10-29 18:30:19 +08:00
|
|
|
/* No need to allocate a bounce page pool if this FS won't use it. */
|
|
|
|
if (cop_flags & FS_CFLG_OWN_PAGES)
|
2015-05-16 07:26:10 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&fscrypt_init_mutex);
|
|
|
|
if (fscrypt_bounce_page_pool)
|
|
|
|
goto already_initialized;
|
|
|
|
|
|
|
|
for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
|
|
|
|
struct fscrypt_ctx *ctx;
|
|
|
|
|
|
|
|
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
|
|
|
|
if (!ctx)
|
|
|
|
goto fail;
|
|
|
|
list_add(&ctx->free_list, &fscrypt_free_ctxs);
|
|
|
|
}
|
|
|
|
|
|
|
|
fscrypt_bounce_page_pool =
|
|
|
|
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
|
|
|
|
if (!fscrypt_bounce_page_pool)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
already_initialized:
|
|
|
|
mutex_unlock(&fscrypt_init_mutex);
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
fscrypt_destroy();
|
|
|
|
mutex_unlock(&fscrypt_init_mutex);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fscrypt_init() - Set up for fs encryption.
|
|
|
|
*/
|
|
|
|
static int __init fscrypt_init(void)
|
|
|
|
{
|
fscrypt: use unbound workqueue for decryption
Improve fscrypt read performance by switching the decryption workqueue
from bound to unbound. With the bound workqueue, when multiple bios
completed on the same CPU, they were decrypted on that same CPU. But
with the unbound queue, they are now decrypted in parallel on any CPU.
Although fscrypt read performance can be tough to measure due to the
many sources of variation, this change is most beneficial when
decryption is slow, e.g. on CPUs without AES instructions. For example,
I timed tarring up encrypted directories on f2fs. On x86 with AES-NI
instructions disabled, the unbound workqueue improved performance by
about 25-35%, using 1 to NUM_CPUs jobs with 4 or 8 CPUs available. But
with AES-NI enabled, performance was unchanged to within ~2%.
I also did the same test on a quad-core ARM CPU using xts-speck128-neon
encryption. There performance was usually about 10% better with the
unbound workqueue, bringing it closer to the unencrypted speed.
The unbound workqueue may be worse in some cases due to worse locality,
but I think it's still the better default. dm-crypt uses an unbound
workqueue by default too, so this change makes fscrypt match.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2018-04-21 07:30:02 +08:00
|
|
|
/*
|
|
|
|
* Use an unbound workqueue to allow bios to be decrypted in parallel
|
|
|
|
* even when they happen to complete on the same CPU. This sacrifices
|
|
|
|
* locality, but it's worthwhile since decryption is CPU-intensive.
|
|
|
|
*
|
|
|
|
* Also use a high-priority workqueue to prioritize decryption work,
|
|
|
|
* which blocks reads from completing, over regular application tasks.
|
|
|
|
*/
|
2015-05-16 07:26:10 +08:00
|
|
|
fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
|
fscrypt: use unbound workqueue for decryption
Improve fscrypt read performance by switching the decryption workqueue
from bound to unbound. With the bound workqueue, when multiple bios
completed on the same CPU, they were decrypted on that same CPU. But
with the unbound queue, they are now decrypted in parallel on any CPU.
Although fscrypt read performance can be tough to measure due to the
many sources of variation, this change is most beneficial when
decryption is slow, e.g. on CPUs without AES instructions. For example,
I timed tarring up encrypted directories on f2fs. On x86 with AES-NI
instructions disabled, the unbound workqueue improved performance by
about 25-35%, using 1 to NUM_CPUs jobs with 4 or 8 CPUs available. But
with AES-NI enabled, performance was unchanged to within ~2%.
I also did the same test on a quad-core ARM CPU using xts-speck128-neon
encryption. There performance was usually about 10% better with the
unbound workqueue, bringing it closer to the unencrypted speed.
The unbound workqueue may be worse in some cases due to worse locality,
but I think it's still the better default. dm-crypt uses an unbound
workqueue by default too, so this change makes fscrypt match.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2018-04-21 07:30:02 +08:00
|
|
|
WQ_UNBOUND | WQ_HIGHPRI,
|
|
|
|
num_online_cpus());
|
2015-05-16 07:26:10 +08:00
|
|
|
if (!fscrypt_read_workqueue)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
|
|
|
|
if (!fscrypt_ctx_cachep)
|
|
|
|
goto fail_free_queue;
|
|
|
|
|
|
|
|
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
|
|
|
|
if (!fscrypt_info_cachep)
|
|
|
|
goto fail_free_ctx;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_free_ctx:
|
|
|
|
kmem_cache_destroy(fscrypt_ctx_cachep);
|
|
|
|
fail_free_queue:
|
|
|
|
destroy_workqueue(fscrypt_read_workqueue);
|
|
|
|
fail:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
module_init(fscrypt_init)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fscrypt_exit() - Shutdown the fs encryption system
|
|
|
|
*/
|
|
|
|
static void __exit fscrypt_exit(void)
|
|
|
|
{
|
|
|
|
fscrypt_destroy();
|
|
|
|
|
|
|
|
if (fscrypt_read_workqueue)
|
|
|
|
destroy_workqueue(fscrypt_read_workqueue);
|
|
|
|
kmem_cache_destroy(fscrypt_ctx_cachep);
|
|
|
|
kmem_cache_destroy(fscrypt_info_cachep);
|
2017-06-19 15:27:58 +08:00
|
|
|
|
|
|
|
fscrypt_essiv_cleanup();
|
2015-05-16 07:26:10 +08:00
|
|
|
}
|
|
|
|
module_exit(fscrypt_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|