2005-04-17 06:20:36 +08:00
|
|
|
#ifndef _LINUX_VMALLOC_H
|
|
|
|
#define _LINUX_VMALLOC_H
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
mm: rewrite vmap layer
Rewrite the vmap allocator to use rbtrees and lazy tlb flushing, and
provide a fast, scalable percpu frontend for small vmaps (requires a
slightly different API, though).
The biggest problem with vmap is actually vunmap. Presently this requires
a global kernel TLB flush, which on most architectures is a broadcast IPI
to all CPUs to flush the cache. This is all done under a global lock. As
the number of CPUs increases, so will the number of vunmaps a scaled
workload will want to perform, and so will the cost of a global TLB flush.
This gives terrible quadratic scalability characteristics.
Another problem is that the entire vmap subsystem works under a single
lock. It is a rwlock, but it is actually taken for write in all the fast
paths, and the read locking would likely never be run concurrently anyway,
so it's just pointless.
This is a rewrite of vmap subsystem to solve those problems. The existing
vmalloc API is implemented on top of the rewritten subsystem.
The TLB flushing problem is solved by using lazy TLB unmapping. vmap
addresses do not have to be flushed immediately when they are vunmapped,
because the kernel will not reuse them again (would be a use-after-free)
until they are reallocated. So the addresses aren't allocated again until
a subsequent TLB flush. A single TLB flush then can flush multiple
vunmaps from each CPU.
XEN and PAT and such do not like deferred TLB flushing because they can't
always handle multiple aliasing virtual addresses to a physical address.
They now call vm_unmap_aliases() in order to flush any deferred mappings.
That call is very expensive (well, actually not a lot more expensive than
a single vunmap under the old scheme), however it should be OK if not
called too often.
The virtual memory extent information is stored in an rbtree rather than a
linked list to improve the algorithmic scalability.
There is a per-CPU allocator for small vmaps, which amortizes or avoids
global locking.
To use the per-CPU interface, the vm_map_ram / vm_unmap_ram interfaces
must be used in place of vmap and vunmap. Vmalloc does not use these
interfaces at the moment, so it will not be quite so scalable (although it
will use lazy TLB flushing).
As a quick test of performance, I ran a test that loops in the kernel,
linearly mapping then touching then unmapping 4 pages. Different numbers
of tests were run in parallel on an 4 core, 2 socket opteron. Results are
in nanoseconds per map+touch+unmap.
threads vanilla vmap rewrite
1 14700 2900
2 33600 3000
4 49500 2800
8 70631 2900
So with a 8 cores, the rewritten version is already 25x faster.
In a slightly more realistic test (although with an older and less
scalable version of the patch), I ripped the not-very-good vunmap batching
code out of XFS, and implemented the large buffer mapping with vm_map_ram
and vm_unmap_ram... along with a couple of other tricks, I was able to
speed up a large directory workload by 20x on a 64 CPU system. I believe
vmap/vunmap is actually sped up a lot more than 20x on such a system, but
I'm running into other locks now. vmap is pretty well blown off the
profiles.
Before:
1352059 total 0.1401
798784 _write_lock 8320.6667 <- vmlist_lock
529313 default_idle 1181.5022
15242 smp_call_function 15.8771 <- vmap tlb flushing
2472 __get_vm_area_node 1.9312 <- vmap
1762 remove_vm_area 4.5885 <- vunmap
316 map_vm_area 0.2297 <- vmap
312 kfree 0.1950
300 _spin_lock 3.1250
252 sn_send_IPI_phys 0.4375 <- tlb flushing
238 vmap 0.8264 <- vmap
216 find_lock_page 0.5192
196 find_next_bit 0.3603
136 sn2_send_IPI 0.2024
130 pio_phys_write_mmr 2.0312
118 unmap_kernel_range 0.1229
After:
78406 total 0.0081
40053 default_idle 89.4040
33576 ia64_spinlock_contention 349.7500
1650 _spin_lock 17.1875
319 __reg_op 0.5538
281 _atomic_dec_and_lock 1.0977
153 mutex_unlock 1.5938
123 iget_locked 0.1671
117 xfs_dir_lookup 0.1662
117 dput 0.1406
114 xfs_iget_core 0.0268
92 xfs_da_hashname 0.1917
75 d_alloc 0.0670
68 vmap_page_range 0.0462 <- vmap
58 kmem_cache_alloc 0.0604
57 memset 0.0540
52 rb_next 0.1625
50 __copy_user 0.0208
49 bitmap_find_free_region 0.2188 <- vmap
46 ia64_sn_udelay 0.1106
45 find_inode_fast 0.1406
42 memcmp 0.2188
42 finish_task_switch 0.1094
42 __d_lookup 0.0410
40 radix_tree_lookup_slot 0.1250
37 _spin_unlock_irqrestore 0.3854
36 xfs_bmapi 0.0050
36 kmem_cache_free 0.0256
35 xfs_vn_getattr 0.0322
34 radix_tree_lookup 0.1062
33 __link_path_walk 0.0035
31 xfs_da_do_buf 0.0091
30 _xfs_buf_find 0.0204
28 find_get_page 0.0875
27 xfs_iread 0.0241
27 __strncpy_from_user 0.2812
26 _xfs_buf_initialize 0.0406
24 _xfs_buf_lookup_pages 0.0179
24 vunmap_page_range 0.0250 <- vunmap
23 find_lock_page 0.0799
22 vm_map_ram 0.0087 <- vmap
20 kfree 0.0125
19 put_page 0.0330
18 __kmalloc 0.0176
17 xfs_da_node_lookup_int 0.0086
17 _read_lock 0.0885
17 page_waitqueue 0.0664
vmap has gone from being the top 5 on the profiles and flushing the crap
out of all TLBs, to using less than 1% of kernel time.
[akpm@linux-foundation.org: cleanups, section fix]
[akpm@linux-foundation.org: fix build on alpha]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Krzysztof Helt <krzysztof.h1@poczta.fm>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 11:27:03 +08:00
|
|
|
#include <linux/init.h>
|
2013-04-30 06:07:40 +08:00
|
|
|
#include <linux/list.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/page.h> /* pgprot_t */
|
2013-04-30 06:07:40 +08:00
|
|
|
#include <linux/rbtree.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-08-16 18:07:21 +08:00
|
|
|
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
|
2006-06-23 17:03:20 +08:00
|
|
|
|
2008-08-16 18:07:21 +08:00
|
|
|
/* bits in flags of vmalloc's vm_struct below */
|
2013-07-09 06:59:58 +08:00
|
|
|
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
|
|
|
|
#define VM_ALLOC 0x00000002 /* vmalloc() */
|
|
|
|
#define VM_MAP 0x00000004 /* vmap()ed pages */
|
|
|
|
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
|
|
|
|
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
|
|
|
|
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
|
2005-04-17 06:20:36 +08:00
|
|
|
/* bits [20..32] reserved for arch specific ioremap internals */
|
|
|
|
|
2005-09-04 06:54:58 +08:00
|
|
|
/*
|
|
|
|
* Maximum alignment for ioremap() regions.
|
|
|
|
* Can be overriden by arch-specific value.
|
|
|
|
*/
|
|
|
|
#ifndef IOREMAP_MAX_ORDER
|
|
|
|
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct vm_struct {
|
2006-11-11 04:27:48 +08:00
|
|
|
struct vm_struct *next;
|
2005-04-17 06:20:36 +08:00
|
|
|
void *addr;
|
|
|
|
unsigned long size;
|
|
|
|
unsigned long flags;
|
|
|
|
struct page **pages;
|
|
|
|
unsigned int nr_pages;
|
2010-06-18 11:22:40 +08:00
|
|
|
phys_addr_t phys_addr;
|
2012-04-13 18:32:09 +08:00
|
|
|
const void *caller;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2013-04-30 06:07:40 +08:00
|
|
|
struct vmap_area {
|
|
|
|
unsigned long va_start;
|
|
|
|
unsigned long va_end;
|
|
|
|
unsigned long flags;
|
|
|
|
struct rb_node rb_node; /* address sorted rbtree */
|
|
|
|
struct list_head list; /* address sorted list */
|
|
|
|
struct list_head purge_list; /* "lazy purge" list */
|
|
|
|
struct vm_struct *vm;
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Highlevel APIs for driver use
|
|
|
|
*/
|
mm: rewrite vmap layer
Rewrite the vmap allocator to use rbtrees and lazy tlb flushing, and
provide a fast, scalable percpu frontend for small vmaps (requires a
slightly different API, though).
The biggest problem with vmap is actually vunmap. Presently this requires
a global kernel TLB flush, which on most architectures is a broadcast IPI
to all CPUs to flush the cache. This is all done under a global lock. As
the number of CPUs increases, so will the number of vunmaps a scaled
workload will want to perform, and so will the cost of a global TLB flush.
This gives terrible quadratic scalability characteristics.
Another problem is that the entire vmap subsystem works under a single
lock. It is a rwlock, but it is actually taken for write in all the fast
paths, and the read locking would likely never be run concurrently anyway,
so it's just pointless.
This is a rewrite of vmap subsystem to solve those problems. The existing
vmalloc API is implemented on top of the rewritten subsystem.
The TLB flushing problem is solved by using lazy TLB unmapping. vmap
addresses do not have to be flushed immediately when they are vunmapped,
because the kernel will not reuse them again (would be a use-after-free)
until they are reallocated. So the addresses aren't allocated again until
a subsequent TLB flush. A single TLB flush then can flush multiple
vunmaps from each CPU.
XEN and PAT and such do not like deferred TLB flushing because they can't
always handle multiple aliasing virtual addresses to a physical address.
They now call vm_unmap_aliases() in order to flush any deferred mappings.
That call is very expensive (well, actually not a lot more expensive than
a single vunmap under the old scheme), however it should be OK if not
called too often.
The virtual memory extent information is stored in an rbtree rather than a
linked list to improve the algorithmic scalability.
There is a per-CPU allocator for small vmaps, which amortizes or avoids
global locking.
To use the per-CPU interface, the vm_map_ram / vm_unmap_ram interfaces
must be used in place of vmap and vunmap. Vmalloc does not use these
interfaces at the moment, so it will not be quite so scalable (although it
will use lazy TLB flushing).
As a quick test of performance, I ran a test that loops in the kernel,
linearly mapping then touching then unmapping 4 pages. Different numbers
of tests were run in parallel on an 4 core, 2 socket opteron. Results are
in nanoseconds per map+touch+unmap.
threads vanilla vmap rewrite
1 14700 2900
2 33600 3000
4 49500 2800
8 70631 2900
So with a 8 cores, the rewritten version is already 25x faster.
In a slightly more realistic test (although with an older and less
scalable version of the patch), I ripped the not-very-good vunmap batching
code out of XFS, and implemented the large buffer mapping with vm_map_ram
and vm_unmap_ram... along with a couple of other tricks, I was able to
speed up a large directory workload by 20x on a 64 CPU system. I believe
vmap/vunmap is actually sped up a lot more than 20x on such a system, but
I'm running into other locks now. vmap is pretty well blown off the
profiles.
Before:
1352059 total 0.1401
798784 _write_lock 8320.6667 <- vmlist_lock
529313 default_idle 1181.5022
15242 smp_call_function 15.8771 <- vmap tlb flushing
2472 __get_vm_area_node 1.9312 <- vmap
1762 remove_vm_area 4.5885 <- vunmap
316 map_vm_area 0.2297 <- vmap
312 kfree 0.1950
300 _spin_lock 3.1250
252 sn_send_IPI_phys 0.4375 <- tlb flushing
238 vmap 0.8264 <- vmap
216 find_lock_page 0.5192
196 find_next_bit 0.3603
136 sn2_send_IPI 0.2024
130 pio_phys_write_mmr 2.0312
118 unmap_kernel_range 0.1229
After:
78406 total 0.0081
40053 default_idle 89.4040
33576 ia64_spinlock_contention 349.7500
1650 _spin_lock 17.1875
319 __reg_op 0.5538
281 _atomic_dec_and_lock 1.0977
153 mutex_unlock 1.5938
123 iget_locked 0.1671
117 xfs_dir_lookup 0.1662
117 dput 0.1406
114 xfs_iget_core 0.0268
92 xfs_da_hashname 0.1917
75 d_alloc 0.0670
68 vmap_page_range 0.0462 <- vmap
58 kmem_cache_alloc 0.0604
57 memset 0.0540
52 rb_next 0.1625
50 __copy_user 0.0208
49 bitmap_find_free_region 0.2188 <- vmap
46 ia64_sn_udelay 0.1106
45 find_inode_fast 0.1406
42 memcmp 0.2188
42 finish_task_switch 0.1094
42 __d_lookup 0.0410
40 radix_tree_lookup_slot 0.1250
37 _spin_unlock_irqrestore 0.3854
36 xfs_bmapi 0.0050
36 kmem_cache_free 0.0256
35 xfs_vn_getattr 0.0322
34 radix_tree_lookup 0.1062
33 __link_path_walk 0.0035
31 xfs_da_do_buf 0.0091
30 _xfs_buf_find 0.0204
28 find_get_page 0.0875
27 xfs_iread 0.0241
27 __strncpy_from_user 0.2812
26 _xfs_buf_initialize 0.0406
24 _xfs_buf_lookup_pages 0.0179
24 vunmap_page_range 0.0250 <- vunmap
23 find_lock_page 0.0799
22 vm_map_ram 0.0087 <- vmap
20 kfree 0.0125
19 put_page 0.0330
18 __kmalloc 0.0176
17 xfs_da_node_lookup_int 0.0086
17 _read_lock 0.0885
17 page_waitqueue 0.0664
vmap has gone from being the top 5 on the profiles and flushing the crap
out of all TLBs, to using less than 1% of kernel time.
[akpm@linux-foundation.org: cleanups, section fix]
[akpm@linux-foundation.org: fix build on alpha]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Krzysztof Helt <krzysztof.h1@poczta.fm>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 11:27:03 +08:00
|
|
|
extern void vm_unmap_ram(const void *mem, unsigned int count);
|
|
|
|
extern void *vm_map_ram(struct page **pages, unsigned int count,
|
|
|
|
int node, pgprot_t prot);
|
|
|
|
extern void vm_unmap_aliases(void);
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
extern void __init vmalloc_init(void);
|
|
|
|
#else
|
|
|
|
static inline void vmalloc_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void *vmalloc(unsigned long size);
|
2010-10-27 05:22:06 +08:00
|
|
|
extern void *vzalloc(unsigned long size);
|
2006-06-23 17:03:20 +08:00
|
|
|
extern void *vmalloc_user(unsigned long size);
|
2005-10-30 09:15:41 +08:00
|
|
|
extern void *vmalloc_node(unsigned long size, int node);
|
2010-10-27 05:22:06 +08:00
|
|
|
extern void *vzalloc_node(unsigned long size, int node);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void *vmalloc_exec(unsigned long size);
|
|
|
|
extern void *vmalloc_32(unsigned long size);
|
2006-06-23 17:03:20 +08:00
|
|
|
extern void *vmalloc_32_user(unsigned long size);
|
2005-10-07 14:46:04 +08:00
|
|
|
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
2011-01-14 07:46:02 +08:00
|
|
|
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
|
|
|
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
2012-04-13 18:32:09 +08:00
|
|
|
pgprot_t prot, int node, const void *caller);
|
2008-02-05 14:28:32 +08:00
|
|
|
extern void vfree(const void *addr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
extern void *vmap(struct page **pages, unsigned int count,
|
|
|
|
unsigned long flags, pgprot_t prot);
|
2008-02-05 14:28:32 +08:00
|
|
|
extern void vunmap(const void *addr);
|
2006-06-23 17:03:20 +08:00
|
|
|
|
2013-07-04 06:02:18 +08:00
|
|
|
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
|
|
|
unsigned long uaddr, void *kaddr,
|
|
|
|
unsigned long size);
|
|
|
|
|
2006-06-23 17:03:20 +08:00
|
|
|
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
|
|
|
unsigned long pgoff);
|
2007-05-08 15:27:03 +08:00
|
|
|
void vmalloc_sync_all(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lowlevel-APIs (not for driver use!)
|
|
|
|
*/
|
2007-07-21 23:11:35 +08:00
|
|
|
|
|
|
|
static inline size_t get_vm_area_size(const struct vm_struct *area)
|
|
|
|
{
|
|
|
|
/* return actual size without guard page */
|
|
|
|
return area->size - PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
|
2008-04-28 17:12:42 +08:00
|
|
|
extern struct vm_struct *get_vm_area_caller(unsigned long size,
|
2012-04-13 18:32:09 +08:00
|
|
|
unsigned long flags, const void *caller);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
|
|
|
unsigned long start, unsigned long end);
|
2009-02-19 06:48:12 +08:00
|
|
|
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
|
|
|
|
unsigned long flags,
|
|
|
|
unsigned long start, unsigned long end,
|
2012-04-13 18:32:09 +08:00
|
|
|
const void *caller);
|
2008-02-05 14:28:32 +08:00
|
|
|
extern struct vm_struct *remove_vm_area(const void *addr);
|
2012-07-30 15:11:33 +08:00
|
|
|
extern struct vm_struct *find_vm_area(const void *addr);
|
2007-06-04 13:15:35 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
|
|
|
struct page ***pages);
|
2011-03-28 19:53:29 +08:00
|
|
|
#ifdef CONFIG_MMU
|
2009-02-20 15:29:08 +08:00
|
|
|
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
|
|
|
pgprot_t prot, struct page **pages);
|
|
|
|
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
|
2007-06-04 13:15:35 +08:00
|
|
|
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
2011-03-28 19:53:29 +08:00
|
|
|
#else
|
|
|
|
static inline int
|
|
|
|
map_kernel_range_noflush(unsigned long start, unsigned long size,
|
|
|
|
pgprot_t prot, struct page **pages)
|
|
|
|
{
|
|
|
|
return size >> PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
static inline void
|
|
|
|
unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline void
|
|
|
|
unmap_kernel_range(unsigned long addr, unsigned long size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-18 09:37:04 +08:00
|
|
|
/* Allocate/destroy a 'vmalloc' VM area. */
|
2011-09-29 23:53:32 +08:00
|
|
|
extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
|
2007-07-18 09:37:04 +08:00
|
|
|
extern void free_vm_area(struct vm_struct *area);
|
|
|
|
|
2009-01-07 06:39:46 +08:00
|
|
|
/* for /dev/kmem */
|
|
|
|
extern long vread(char *buf, char *addr, unsigned long count);
|
|
|
|
extern long vwrite(char *buf, char *addr, unsigned long count);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Internals. Dont't use..
|
|
|
|
*/
|
2013-04-30 06:07:37 +08:00
|
|
|
extern struct list_head vmap_area_list;
|
2011-08-25 12:24:21 +08:00
|
|
|
extern __init void vm_area_add_early(struct vm_struct *vm);
|
2009-02-24 10:57:21 +08:00
|
|
|
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-04 00:22:47 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2011-03-28 19:53:29 +08:00
|
|
|
# ifdef CONFIG_MMU
|
2009-08-14 14:00:52 +08:00
|
|
|
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
|
|
|
|
const size_t *sizes, int nr_vms,
|
2011-01-14 07:46:01 +08:00
|
|
|
size_t align);
|
2009-08-14 14:00:52 +08:00
|
|
|
|
|
|
|
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
|
2011-03-28 19:53:29 +08:00
|
|
|
# else
|
|
|
|
static inline struct vm_struct **
|
|
|
|
pcpu_get_vm_areas(const unsigned long *offsets,
|
|
|
|
const size_t *sizes, int nr_vms,
|
|
|
|
size_t align)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
# endif
|
2010-09-04 00:22:47 +08:00
|
|
|
#endif
|
2009-08-14 14:00:52 +08:00
|
|
|
|
2013-04-30 06:07:28 +08:00
|
|
|
struct vmalloc_info {
|
|
|
|
unsigned long used;
|
|
|
|
unsigned long largest_chunk;
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
|
|
|
|
extern void get_vmalloc_info(struct vmalloc_info *vmi);
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define VMALLOC_TOTAL 0UL
|
|
|
|
#define get_vmalloc_info(vmi) \
|
|
|
|
do { \
|
|
|
|
(vmi)->used = 0; \
|
|
|
|
(vmi)->largest_chunk = 0; \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* _LINUX_VMALLOC_H */
|