xfs: simplify kmem_{zone_}zalloc
Introduce flag KM_ZERO which is used to alloc zeroed entry, and convert kmem_{zone_}zalloc to call kmem_{zone_}alloc() with KM_ZERO directly, in order to avoid the setting to zero step. And following Dave's suggestion, make kmem_{zone_}zalloc static inline into kmem.h as they're now just a simple wrapper. V2: Make kmem_{zone_}zalloc static inline into kmem.h as Dave suggested. Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
This commit is contained in:
parent
d123031a56
commit
359d992bcd
|
@ -62,17 +62,6 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
|
||||||
} while (1);
|
} while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
|
||||||
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
|
||||||
{
|
|
||||||
void *ptr;
|
|
||||||
|
|
||||||
ptr = kmem_alloc(size, flags);
|
|
||||||
if (ptr)
|
|
||||||
memset((char *)ptr, 0, (int)size);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *
|
void *
|
||||||
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
||||||
{
|
{
|
||||||
|
@ -128,14 +117,3 @@ kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
||||||
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
||||||
} while (1);
|
} while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
|
||||||
kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
|
||||||
{
|
|
||||||
void *ptr;
|
|
||||||
|
|
||||||
ptr = kmem_zone_alloc(zone, flags);
|
|
||||||
if (ptr)
|
|
||||||
memset((char *)ptr, 0, kmem_cache_size(zone));
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ typedef unsigned __bitwise xfs_km_flags_t;
|
||||||
#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
|
#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
|
||||||
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
||||||
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
||||||
|
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use a special process flag to avoid recursive callbacks into
|
* We use a special process flag to avoid recursive callbacks into
|
||||||
|
@ -43,7 +44,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
|
||||||
{
|
{
|
||||||
gfp_t lflags;
|
gfp_t lflags;
|
||||||
|
|
||||||
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
|
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
|
||||||
|
|
||||||
if (flags & KM_NOSLEEP) {
|
if (flags & KM_NOSLEEP) {
|
||||||
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
||||||
|
@ -52,11 +53,14 @@ kmem_flags_convert(xfs_km_flags_t flags)
|
||||||
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
||||||
lflags &= ~__GFP_FS;
|
lflags &= ~__GFP_FS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (flags & KM_ZERO)
|
||||||
|
lflags |= __GFP_ZERO;
|
||||||
|
|
||||||
return lflags;
|
return lflags;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
||||||
extern void *kmem_zalloc(size_t, xfs_km_flags_t);
|
|
||||||
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
|
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
|
||||||
extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
|
extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
|
||||||
extern void kmem_free(const void *);
|
extern void kmem_free(const void *);
|
||||||
|
@ -64,6 +68,12 @@ extern void kmem_free(const void *);
|
||||||
|
|
||||||
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
|
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
||||||
|
{
|
||||||
|
return kmem_alloc(size, flags | KM_ZERO);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Zone interfaces
|
* Zone interfaces
|
||||||
*/
|
*/
|
||||||
|
@ -102,6 +112,11 @@ kmem_zone_destroy(kmem_zone_t *zone)
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
|
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
|
||||||
extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t);
|
|
||||||
|
static inline void *
|
||||||
|
kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
||||||
|
{
|
||||||
|
return kmem_zone_alloc(zone, flags | KM_ZERO);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __XFS_SUPPORT_KMEM_H__ */
|
#endif /* __XFS_SUPPORT_KMEM_H__ */
|
||||||
|
|
Loading…
Reference in New Issue