kernel: kmem_ptr_validate considered harmful
This is a nasty and error prone API. It is no longer used, remove it. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
This commit is contained in:
parent
786a5e15b6
commit
ccd35fb9f4
|
@ -106,8 +106,6 @@ int kmem_cache_shrink(struct kmem_cache *);
|
||||||
void kmem_cache_free(struct kmem_cache *, void *);
|
void kmem_cache_free(struct kmem_cache *, void *);
|
||||||
unsigned int kmem_cache_size(struct kmem_cache *);
|
unsigned int kmem_cache_size(struct kmem_cache *);
|
||||||
const char *kmem_cache_name(struct kmem_cache *);
|
const char *kmem_cache_name(struct kmem_cache *);
|
||||||
int kern_ptr_validate(const void *ptr, unsigned long size);
|
|
||||||
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Please use this macro to create slab caches. Simply specify the
|
* Please use this macro to create slab caches. Simply specify the
|
||||||
|
|
32
mm/slab.c
32
mm/slab.c
|
@ -2781,7 +2781,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
|
||||||
/*
|
/*
|
||||||
* Map pages beginning at addr to the given cache and slab. This is required
|
* Map pages beginning at addr to the given cache and slab. This is required
|
||||||
* for the slab allocator to be able to lookup the cache and slab of a
|
* for the slab allocator to be able to lookup the cache and slab of a
|
||||||
* virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
|
* virtual address for kfree, ksize, and slab debugging.
|
||||||
*/
|
*/
|
||||||
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
|
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
|
||||||
void *addr)
|
void *addr)
|
||||||
|
@ -3660,36 +3660,6 @@ void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
|
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
|
|
||||||
* @cachep: the cache we're checking against
|
|
||||||
* @ptr: pointer to validate
|
|
||||||
*
|
|
||||||
* This verifies that the untrusted pointer looks sane;
|
|
||||||
* it is _not_ a guarantee that the pointer is actually
|
|
||||||
* part of the slab cache in question, but it at least
|
|
||||||
* validates that the pointer can be dereferenced and
|
|
||||||
* looks half-way sane.
|
|
||||||
*
|
|
||||||
* Currently only used for dentry validation.
|
|
||||||
*/
|
|
||||||
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
|
|
||||||
{
|
|
||||||
unsigned long size = cachep->buffer_size;
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
if (unlikely(!kern_ptr_validate(ptr, size)))
|
|
||||||
goto out;
|
|
||||||
page = virt_to_page(ptr);
|
|
||||||
if (unlikely(!PageSlab(page)))
|
|
||||||
goto out;
|
|
||||||
if (unlikely(page_get_cache(page) != cachep))
|
|
||||||
goto out;
|
|
||||||
return 1;
|
|
||||||
out:
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
||||||
{
|
{
|
||||||
|
|
|
@ -678,11 +678,6 @@ int kmem_cache_shrink(struct kmem_cache *d)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_shrink);
|
EXPORT_SYMBOL(kmem_cache_shrink);
|
||||||
|
|
||||||
int kmem_ptr_validate(struct kmem_cache *a, const void *b)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int slob_ready __read_mostly;
|
static unsigned int slob_ready __read_mostly;
|
||||||
|
|
||||||
int slab_is_available(void)
|
int slab_is_available(void)
|
||||||
|
|
40
mm/slub.c
40
mm/slub.c
|
@ -1917,17 +1917,6 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_free);
|
EXPORT_SYMBOL(kmem_cache_free);
|
||||||
|
|
||||||
/* Figure out on which slab page the object resides */
|
|
||||||
static struct page *get_object_page(const void *x)
|
|
||||||
{
|
|
||||||
struct page *page = virt_to_head_page(x);
|
|
||||||
|
|
||||||
if (!PageSlab(page))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Object placement in a slab is made very easy because we always start at
|
* Object placement in a slab is made very easy because we always start at
|
||||||
* offset 0. If we tune the size of the object to the alignment then we can
|
* offset 0. If we tune the size of the object to the alignment then we can
|
||||||
|
@ -2385,35 +2374,6 @@ error:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if a given pointer is valid
|
|
||||||
*/
|
|
||||||
int kmem_ptr_validate(struct kmem_cache *s, const void *object)
|
|
||||||
{
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
if (!kern_ptr_validate(object, s->size))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
page = get_object_page(object);
|
|
||||||
|
|
||||||
if (!page || s != page->slab)
|
|
||||||
/* No slab or wrong slab */
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!check_valid_pointer(s, page, object))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We could also check if the object is on the slabs freelist.
|
|
||||||
* But this would be too expensive and it seems that the main
|
|
||||||
* purpose of kmem_ptr_valid() is to check if the object belongs
|
|
||||||
* to a certain slab.
|
|
||||||
*/
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(kmem_ptr_validate);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine the size of a slab object
|
* Determine the size of a slab object
|
||||||
*/
|
*/
|
||||||
|
|
21
mm/util.c
21
mm/util.c
|
@ -186,27 +186,6 @@ void kzfree(const void *p)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kzfree);
|
EXPORT_SYMBOL(kzfree);
|
||||||
|
|
||||||
int kern_ptr_validate(const void *ptr, unsigned long size)
|
|
||||||
{
|
|
||||||
unsigned long addr = (unsigned long)ptr;
|
|
||||||
unsigned long min_addr = PAGE_OFFSET;
|
|
||||||
unsigned long align_mask = sizeof(void *) - 1;
|
|
||||||
|
|
||||||
if (unlikely(addr < min_addr))
|
|
||||||
goto out;
|
|
||||||
if (unlikely(addr > (unsigned long)high_memory - size))
|
|
||||||
goto out;
|
|
||||||
if (unlikely(addr & align_mask))
|
|
||||||
goto out;
|
|
||||||
if (unlikely(!kern_addr_valid(addr)))
|
|
||||||
goto out;
|
|
||||||
if (unlikely(!kern_addr_valid(addr + size - 1)))
|
|
||||||
goto out;
|
|
||||||
return 1;
|
|
||||||
out:
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* strndup_user - duplicate an existing string from user space
|
* strndup_user - duplicate an existing string from user space
|
||||||
* @s: The string to duplicate
|
* @s: The string to duplicate
|
||||||
|
|
Loading…
Reference in New Issue