RDMA/rxe: Address an issue with hardened user copy
Change rxe pools to use kzalloc instead of kmem_cache to allocate memory for rxe objects. The pools are not really necessary and they trigger hardened user copy warnings as the ioctl framework copies the QP number directly to userspace. Also the general project to move object alloation to the core code will eventually clean these out anyhow. Link: https://lore.kernel.org/r/20200827163535.2632-1-rpearson@hpe.com Signed-off-by: Bob Pearson <rpearson@hpe.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
63fa15dbd4
commit
7672dac304
|
@ -282,13 +282,6 @@ static int __init rxe_module_init(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
/* initialize slab caches for managed objects */
|
||||
err = rxe_cache_init();
|
||||
if (err) {
|
||||
pr_err("unable to init object pools\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = rxe_net_init();
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -303,7 +296,6 @@ static void __exit rxe_module_exit(void)
|
|||
rdma_link_unregister(&rxe_link_ops);
|
||||
ib_unregister_driver(RDMA_DRIVER_RXE);
|
||||
rxe_net_exit();
|
||||
rxe_cache_exit();
|
||||
|
||||
pr_info("unloaded\n");
|
||||
}
|
||||
|
|
|
@ -83,62 +83,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
|
|||
return rxe_type_info[pool->type].name;
|
||||
}
|
||||
|
||||
static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
|
||||
{
|
||||
return rxe_type_info[pool->type].cache;
|
||||
}
|
||||
|
||||
static void rxe_cache_clean(size_t cnt)
|
||||
{
|
||||
int i;
|
||||
struct rxe_type_info *type;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
type = &rxe_type_info[i];
|
||||
if (!(type->flags & RXE_POOL_NO_ALLOC)) {
|
||||
kmem_cache_destroy(type->cache);
|
||||
type->cache = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int rxe_cache_init(void)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
size_t size;
|
||||
struct rxe_type_info *type;
|
||||
|
||||
for (i = 0; i < RXE_NUM_TYPES; i++) {
|
||||
type = &rxe_type_info[i];
|
||||
size = ALIGN(type->size, RXE_POOL_ALIGN);
|
||||
if (!(type->flags & RXE_POOL_NO_ALLOC)) {
|
||||
type->cache =
|
||||
kmem_cache_create(type->name, size,
|
||||
RXE_POOL_ALIGN,
|
||||
RXE_POOL_CACHE_FLAGS, NULL);
|
||||
if (!type->cache) {
|
||||
pr_err("Unable to init kmem cache for %s\n",
|
||||
type->name);
|
||||
err = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err1:
|
||||
rxe_cache_clean(i);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void rxe_cache_exit(void)
|
||||
{
|
||||
rxe_cache_clean(RXE_NUM_TYPES);
|
||||
}
|
||||
|
||||
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
|
||||
{
|
||||
int err = 0;
|
||||
|
@ -379,7 +323,7 @@ void *rxe_alloc(struct rxe_pool *pool)
|
|||
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
|
||||
goto out_cnt;
|
||||
|
||||
elem = kmem_cache_zalloc(pool_cache(pool),
|
||||
elem = kzalloc(rxe_type_info[pool->type].size,
|
||||
(pool->flags & RXE_POOL_ATOMIC) ?
|
||||
GFP_ATOMIC : GFP_KERNEL);
|
||||
if (!elem)
|
||||
|
@ -441,7 +385,7 @@ void rxe_elem_release(struct kref *kref)
|
|||
pool->cleanup(elem);
|
||||
|
||||
if (!(pool->flags & RXE_POOL_NO_ALLOC))
|
||||
kmem_cache_free(pool_cache(pool), elem);
|
||||
kfree(elem);
|
||||
atomic_dec(&pool->num_elem);
|
||||
ib_device_put(&pool->rxe->ib_dev);
|
||||
rxe_pool_put(pool);
|
||||
|
|
|
@ -42,7 +42,6 @@ struct rxe_type_info {
|
|||
u32 min_index;
|
||||
size_t key_offset;
|
||||
size_t key_size;
|
||||
struct kmem_cache *cache;
|
||||
};
|
||||
|
||||
extern struct rxe_type_info rxe_type_info[];
|
||||
|
@ -86,12 +85,6 @@ struct rxe_pool {
|
|||
size_t key_size;
|
||||
};
|
||||
|
||||
/* initialize slab caches for managed objects */
|
||||
int rxe_cache_init(void);
|
||||
|
||||
/* cleanup slab caches for managed objects */
|
||||
void rxe_cache_exit(void);
|
||||
|
||||
/* initialize a pool of objects with given limit on
|
||||
* number of elements. gets parameters from rxe_type_info
|
||||
* pool elements will be allocated out of a slab cache
|
||||
|
|
Loading…
Reference in New Issue