SLUB: cleanup - define macros instead of hardcoded numbers
Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com> Acked-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
parent
ed313489ba
commit
210b5c0613
18
mm/slub.c
18
mm/slub.c
|
@ -153,6 +153,10 @@
|
||||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define OO_SHIFT 16
|
||||||
|
#define OO_MASK ((1 << OO_SHIFT) - 1)
|
||||||
|
#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
|
||||||
|
|
||||||
/* Internal SLUB flags */
|
/* Internal SLUB flags */
|
||||||
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
||||||
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
||||||
|
@ -290,7 +294,7 @@ static inline struct kmem_cache_order_objects oo_make(int order,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
struct kmem_cache_order_objects x = {
|
struct kmem_cache_order_objects x = {
|
||||||
(order << 16) + (PAGE_SIZE << order) / size
|
(order << OO_SHIFT) + (PAGE_SIZE << order) / size
|
||||||
};
|
};
|
||||||
|
|
||||||
return x;
|
return x;
|
||||||
|
@ -298,12 +302,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
|
||||||
|
|
||||||
static inline int oo_order(struct kmem_cache_order_objects x)
|
static inline int oo_order(struct kmem_cache_order_objects x)
|
||||||
{
|
{
|
||||||
return x.x >> 16;
|
return x.x >> OO_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int oo_objects(struct kmem_cache_order_objects x)
|
static inline int oo_objects(struct kmem_cache_order_objects x)
|
||||||
{
|
{
|
||||||
return x.x & ((1 << 16) - 1);
|
return x.x & OO_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
|
@ -764,8 +768,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
|
||||||
}
|
}
|
||||||
|
|
||||||
max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
|
max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
|
||||||
if (max_objects > 65535)
|
if (max_objects > MAX_OBJS_PER_PAGE)
|
||||||
max_objects = 65535;
|
max_objects = MAX_OBJS_PER_PAGE;
|
||||||
|
|
||||||
if (page->objects != max_objects) {
|
if (page->objects != max_objects) {
|
||||||
slab_err(s, page, "Wrong number of objects. Found %d but "
|
slab_err(s, page, "Wrong number of objects. Found %d but "
|
||||||
|
@ -1807,8 +1811,8 @@ static inline int slab_order(int size, int min_objects,
|
||||||
int rem;
|
int rem;
|
||||||
int min_order = slub_min_order;
|
int min_order = slub_min_order;
|
||||||
|
|
||||||
if ((PAGE_SIZE << min_order) / size > 65535)
|
if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
|
||||||
return get_order(size * 65535) - 1;
|
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
|
||||||
|
|
||||||
for (order = max(min_order,
|
for (order = max(min_order,
|
||||||
fls(min_objects * size - 1) - PAGE_SHIFT);
|
fls(min_objects * size - 1) - PAGE_SHIFT);
|
||||||
|
|
Loading…
Reference in New Issue