fscrypt: remove struct fscrypt_ctx

Now that ext4 and f2fs implement their own post-read workflow that
supports both fscrypt and fsverity, the fscrypt-only workflow based
around struct fscrypt_ctx is no longer used.  So remove the unused code.

This is based on a patch from Chandan Rajendra's "Consolidate FS read
I/O callbacks code" patchset, but rebased onto the latest kernel, folded
__fscrypt_decrypt_bio() into fscrypt_decrypt_bio(), cleaned up
fscrypt_initialize(), and updated the commit message.

Originally-from: Chandan Rajendra <chandan@linux.ibm.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
This commit is contained in:
Eric Biggers 2019-10-09 16:34:17 -07:00
parent 4006d799d9
commit 1565bdad59
4 changed files with 10 additions and 163 deletions

View File

@ -26,7 +26,7 @@
#include <linux/namei.h> #include <linux/namei.h>
#include "fscrypt_private.h" #include "fscrypt_private.h"
static void __fscrypt_decrypt_bio(struct bio *bio, bool done) void fscrypt_decrypt_bio(struct bio *bio)
{ {
struct bio_vec *bv; struct bio_vec *bv;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
@ -37,37 +37,10 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bv->bv_offset); bv->bv_offset);
if (ret) if (ret)
SetPageError(page); SetPageError(page);
else if (done)
SetPageUptodate(page);
if (done)
unlock_page(page);
} }
} }
void fscrypt_decrypt_bio(struct bio *bio)
{
__fscrypt_decrypt_bio(bio, false);
}
EXPORT_SYMBOL(fscrypt_decrypt_bio); EXPORT_SYMBOL(fscrypt_decrypt_bio);
static void completion_pages(struct work_struct *work)
{
struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
struct bio *bio = ctx->bio;
__fscrypt_decrypt_bio(bio, true);
fscrypt_release_ctx(ctx);
bio_put(bio);
}
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->work, completion_pages);
ctx->bio = bio;
fscrypt_enqueue_decrypt_work(&ctx->work);
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len) sector_t pblk, unsigned int len)
{ {

View File

@ -31,24 +31,16 @@
#include "fscrypt_private.h" #include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32; static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
module_param(num_prealloc_crypto_pages, uint, 0444); module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages, MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate"); "Number of crypto pages to preallocate");
module_param(num_prealloc_crypto_ctxs, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
"Number of crypto contexts to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL; static mempool_t *fscrypt_bounce_page_pool = NULL;
static LIST_HEAD(fscrypt_free_ctxs);
static DEFINE_SPINLOCK(fscrypt_ctx_lock);
static struct workqueue_struct *fscrypt_read_workqueue; static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex); static DEFINE_MUTEX(fscrypt_init_mutex);
static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep; struct kmem_cache *fscrypt_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work) void fscrypt_enqueue_decrypt_work(struct work_struct *work)
@ -57,62 +49,6 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
} }
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work); EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
/**
* fscrypt_release_ctx() - Release a decryption context
* @ctx: The decryption context to release.
*
* If the decryption context was allocated from the pre-allocated pool, return
* it to that pool. Else, free it.
*/
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
unsigned long flags;
if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
kmem_cache_free(fscrypt_ctx_cachep, ctx);
} else {
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
list_add(&ctx->free_list, &fscrypt_free_ctxs);
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
}
}
EXPORT_SYMBOL(fscrypt_release_ctx);
/**
* fscrypt_get_ctx() - Get a decryption context
* @gfp_flags: The gfp flag for memory allocation
*
* Allocate and initialize a decryption context.
*
* Return: A new decryption context on success; an ERR_PTR() otherwise.
*/
struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
{
struct fscrypt_ctx *ctx;
unsigned long flags;
/*
* First try getting a ctx from the free list so that we don't have to
* call into the slab allocator.
*/
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
struct fscrypt_ctx, free_list);
if (ctx)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
if (!ctx) {
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
return ctx;
}
EXPORT_SYMBOL(fscrypt_get_ctx);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags) struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{ {
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
@ -392,17 +328,6 @@ const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate, .d_revalidate = fscrypt_d_revalidate,
}; };
static void fscrypt_destroy(void)
{
struct fscrypt_ctx *pos, *n;
list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
kmem_cache_free(fscrypt_ctx_cachep, pos);
INIT_LIST_HEAD(&fscrypt_free_ctxs);
mempool_destroy(fscrypt_bounce_page_pool);
fscrypt_bounce_page_pool = NULL;
}
/** /**
* fscrypt_initialize() - allocate major buffers for fs encryption. * fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags * @cop_flags: fscrypt operations flags
@ -410,11 +335,11 @@ static void fscrypt_destroy(void)
* We only call this when we start accessing encrypted files, since it * We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used. * results in memory getting allocated that wouldn't otherwise be used.
* *
* Return: Zero on success, non-zero otherwise. * Return: 0 on success; -errno on failure
*/ */
int fscrypt_initialize(unsigned int cop_flags) int fscrypt_initialize(unsigned int cop_flags)
{ {
int i, res = -ENOMEM; int err = 0;
/* No need to allocate a bounce page pool if this FS won't use it. */ /* No need to allocate a bounce page pool if this FS won't use it. */
if (cop_flags & FS_CFLG_OWN_PAGES) if (cop_flags & FS_CFLG_OWN_PAGES)
@ -422,29 +347,18 @@ int fscrypt_initialize(unsigned int cop_flags)
mutex_lock(&fscrypt_init_mutex); mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool) if (fscrypt_bounce_page_pool)
goto already_initialized; goto out_unlock;
for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
struct fscrypt_ctx *ctx;
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
if (!ctx)
goto fail;
list_add(&ctx->free_list, &fscrypt_free_ctxs);
}
err = -ENOMEM;
fscrypt_bounce_page_pool = fscrypt_bounce_page_pool =
mempool_create_page_pool(num_prealloc_crypto_pages, 0); mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!fscrypt_bounce_page_pool) if (!fscrypt_bounce_page_pool)
goto fail; goto out_unlock;
already_initialized: err = 0;
out_unlock:
mutex_unlock(&fscrypt_init_mutex); mutex_unlock(&fscrypt_init_mutex);
return 0; return err;
fail:
fscrypt_destroy();
mutex_unlock(&fscrypt_init_mutex);
return res;
} }
void fscrypt_msg(const struct inode *inode, const char *level, void fscrypt_msg(const struct inode *inode, const char *level,
@ -490,13 +404,9 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue) if (!fscrypt_read_workqueue)
goto fail; goto fail;
fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_ctx_cachep)
goto fail_free_queue;
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_info_cachep) if (!fscrypt_info_cachep)
goto fail_free_ctx; goto fail_free_queue;
err = fscrypt_init_keyring(); err = fscrypt_init_keyring();
if (err) if (err)
@ -506,8 +416,6 @@ static int __init fscrypt_init(void)
fail_free_info: fail_free_info:
kmem_cache_destroy(fscrypt_info_cachep); kmem_cache_destroy(fscrypt_info_cachep);
fail_free_ctx:
kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue: fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue); destroy_workqueue(fscrypt_read_workqueue);
fail: fail:

View File

@ -203,8 +203,6 @@ typedef enum {
FS_ENCRYPT, FS_ENCRYPT,
} fscrypt_direction_t; } fscrypt_direction_t;
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
static inline bool fscrypt_valid_enc_modes(u32 contents_mode, static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode) u32 filenames_mode)
{ {

View File

@ -20,7 +20,6 @@
#define FS_CRYPTO_BLOCK_SIZE 16 #define FS_CRYPTO_BLOCK_SIZE 16
struct fscrypt_ctx;
struct fscrypt_info; struct fscrypt_info;
struct fscrypt_str { struct fscrypt_str {
@ -64,18 +63,6 @@ struct fscrypt_operations {
unsigned int max_namelen; unsigned int max_namelen;
}; };
/* Decryption work */
struct fscrypt_ctx {
union {
struct {
struct bio *bio;
struct work_struct work;
};
struct list_head free_list; /* Free list */
};
u8 flags; /* Flags */
};
static inline bool fscrypt_has_encryption_key(const struct inode *inode) static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{ {
/* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */ /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */
@ -102,8 +89,6 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
/* crypto.c */ /* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *); extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len, unsigned int len,
@ -244,8 +229,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
/* bio.c */ /* bio.c */
extern void fscrypt_decrypt_bio(struct bio *); extern void fscrypt_decrypt_bio(struct bio *);
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int); unsigned int);
@ -295,16 +278,6 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{ {
} }
static inline struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
return;
}
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len, unsigned int len,
unsigned int offs, unsigned int offs,
@ -484,11 +457,6 @@ static inline void fscrypt_decrypt_bio(struct bio *bio)
{ {
} }
static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio)
{
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len) sector_t pblk, unsigned int len)
{ {