[XFS] remove old vmap cache

XFS's vmap batching simply defers a number (up to 64) of vunmaps, and keeps
track of them in a list. To purge the batch, it just goes through the list and
calls vunamp on each one. This is pretty poor: a global TLB flush is generally
still performed on each vunmap, with the most expensive parts of the operation
being the broadcast IPIs and locking involved in the SMP callouts, and the
locking involved in the vmap management -- none of these are avoided by just
batching up the calls. I'm actually surprised it ever made much difference.
(Now that the lazy vmap allocator is upstream, this description is not quite
right, but the vunmap batching still doesn't seem to do much)

Rip all this logic out of XFS completely. I will improve vmap performance
and scalability directly in subsequent patch.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Reviewed-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
This commit is contained in:
Nick Piggin 2009-01-06 14:40:44 +11:00 committed by Lachlan McIlroy
parent 195ec037ff
commit d2859751cd
1 changed files with 1 additions and 74 deletions

View File

@ -165,75 +165,6 @@ test_page_region(
return (mask && (page_private(page) & mask) == mask);
}
/*
* Mapping of multi-page buffers into contiguous virtual space
*/
typedef struct a_list {
void *vm_addr;
struct a_list *next;
} a_list_t;
static a_list_t *as_free_head;
static int as_list_len;
static DEFINE_SPINLOCK(as_lock);
/*
* Try to batch vunmaps because they are costly.
*/
STATIC void
free_address(
void *addr)
{
a_list_t *aentry;
#ifdef CONFIG_XEN
/*
* Xen needs to be able to make sure it can get an exclusive
* RO mapping of pages it wants to turn into a pagetable. If
* a newly allocated page is also still being vmap()ed by xfs,
* it will cause pagetable construction to fail. This is a
* quick workaround to always eagerly unmap pages so that Xen
* is happy.
*/
vunmap(addr);
return;
#endif
aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
if (likely(aentry)) {
spin_lock(&as_lock);
aentry->next = as_free_head;
aentry->vm_addr = addr;
as_free_head = aentry;
as_list_len++;
spin_unlock(&as_lock);
} else {
vunmap(addr);
}
}
STATIC void
purge_addresses(void)
{
a_list_t *aentry, *old;
if (as_free_head == NULL)
return;
spin_lock(&as_lock);
aentry = as_free_head;
as_free_head = NULL;
as_list_len = 0;
spin_unlock(&as_lock);
while ((old = aentry) != NULL) {
vunmap(aentry->vm_addr);
aentry = aentry->next;
kfree(old);
}
}
/*
* Internal xfs_buf_t object manipulation
*/
@ -333,7 +264,7 @@ xfs_buf_free(
uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
free_address(bp->b_addr - bp->b_offset);
vunmap(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
@ -455,8 +386,6 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
if (as_list_len > 64)
purge_addresses();
bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
VM_MAP, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL))
@ -1743,8 +1672,6 @@ xfsbufd(
count++;
}
if (as_list_len > 0)
purge_addresses();
if (count)
blk_run_address_space(target->bt_mapping);