SLUB: Define functions for cpu slab handling instead of using PageActive
Use inline functions to access the per cpu bit. Intoduce the notion of "freezing" a slab to make things more understandable. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3ca12ee549
commit
4b6f075045
57
mm/slub.c
57
mm/slub.c
|
@ -78,10 +78,18 @@
|
||||||
*
|
*
|
||||||
* Overloading of page flags that are otherwise used for LRU management.
|
* Overloading of page flags that are otherwise used for LRU management.
|
||||||
*
|
*
|
||||||
* PageActive The slab is used as a cpu cache. Allocations
|
* PageActive The slab is frozen and exempt from list processing.
|
||||||
* may be performed from the slab. The slab is not
|
* This means that the slab is dedicated to a purpose
|
||||||
* on any slab list and cannot be moved onto one.
|
* such as satisfying allocations for a specific
|
||||||
* The cpu slab may be equipped with an additioanl
|
* processor. Objects may be freed in the slab while
|
||||||
|
* it is frozen but slab_free will then skip the usual
|
||||||
|
* list operations. It is up to the processor holding
|
||||||
|
* the slab to integrate the slab into the slab lists
|
||||||
|
* when the slab is no longer needed.
|
||||||
|
*
|
||||||
|
* One use of this flag is to mark slabs that are
|
||||||
|
* used for allocations. Then such a slab becomes a cpu
|
||||||
|
* slab. The cpu slab may be equipped with an additional
|
||||||
* lockless_freelist that allows lockless access to
|
* lockless_freelist that allows lockless access to
|
||||||
* free objects in addition to the regular freelist
|
* free objects in addition to the regular freelist
|
||||||
* that requires the slab lock.
|
* that requires the slab lock.
|
||||||
|
@ -91,6 +99,21 @@
|
||||||
* the fast path and disables lockless freelists.
|
* the fast path and disables lockless freelists.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static inline int SlabFrozen(struct page *page)
|
||||||
|
{
|
||||||
|
return PageActive(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void SetSlabFrozen(struct page *page)
|
||||||
|
{
|
||||||
|
SetPageActive(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ClearSlabFrozen(struct page *page)
|
||||||
|
{
|
||||||
|
ClearPageActive(page);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int SlabDebug(struct page *page)
|
static inline int SlabDebug(struct page *page)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
|
@ -1135,11 +1158,12 @@ static void remove_partial(struct kmem_cache *s,
|
||||||
*
|
*
|
||||||
* Must hold list_lock.
|
* Must hold list_lock.
|
||||||
*/
|
*/
|
||||||
static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
|
static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
|
||||||
{
|
{
|
||||||
if (slab_trylock(page)) {
|
if (slab_trylock(page)) {
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
n->nr_partial--;
|
n->nr_partial--;
|
||||||
|
SetSlabFrozen(page);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1163,7 +1187,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
|
||||||
|
|
||||||
spin_lock(&n->list_lock);
|
spin_lock(&n->list_lock);
|
||||||
list_for_each_entry(page, &n->partial, lru)
|
list_for_each_entry(page, &n->partial, lru)
|
||||||
if (lock_and_del_slab(n, page))
|
if (lock_and_freeze_slab(n, page))
|
||||||
goto out;
|
goto out;
|
||||||
page = NULL;
|
page = NULL;
|
||||||
out:
|
out:
|
||||||
|
@ -1242,10 +1266,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
*
|
*
|
||||||
* On exit the slab lock will have been dropped.
|
* On exit the slab lock will have been dropped.
|
||||||
*/
|
*/
|
||||||
static void putback_slab(struct kmem_cache *s, struct page *page)
|
static void unfreeze_slab(struct kmem_cache *s, struct page *page)
|
||||||
{
|
{
|
||||||
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
||||||
|
|
||||||
|
ClearSlabFrozen(page);
|
||||||
if (page->inuse) {
|
if (page->inuse) {
|
||||||
|
|
||||||
if (page->freelist)
|
if (page->freelist)
|
||||||
|
@ -1296,9 +1321,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
|
||||||
page->inuse--;
|
page->inuse--;
|
||||||
}
|
}
|
||||||
s->cpu_slab[cpu] = NULL;
|
s->cpu_slab[cpu] = NULL;
|
||||||
ClearPageActive(page);
|
unfreeze_slab(s, page);
|
||||||
|
|
||||||
putback_slab(s, page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
|
static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
|
||||||
|
@ -1389,9 +1412,7 @@ another_slab:
|
||||||
new_slab:
|
new_slab:
|
||||||
page = get_partial(s, gfpflags, node);
|
page = get_partial(s, gfpflags, node);
|
||||||
if (page) {
|
if (page) {
|
||||||
have_slab:
|
|
||||||
s->cpu_slab[cpu] = page;
|
s->cpu_slab[cpu] = page;
|
||||||
SetPageActive(page);
|
|
||||||
goto load_freelist;
|
goto load_freelist;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1421,7 +1442,9 @@ have_slab:
|
||||||
flush_slab(s, s->cpu_slab[cpu], cpu);
|
flush_slab(s, s->cpu_slab[cpu], cpu);
|
||||||
}
|
}
|
||||||
slab_lock(page);
|
slab_lock(page);
|
||||||
goto have_slab;
|
SetSlabFrozen(page);
|
||||||
|
s->cpu_slab[cpu] = page;
|
||||||
|
goto load_freelist;
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
debug:
|
debug:
|
||||||
|
@ -1508,11 +1531,7 @@ checks_ok:
|
||||||
page->freelist = object;
|
page->freelist = object;
|
||||||
page->inuse--;
|
page->inuse--;
|
||||||
|
|
||||||
if (unlikely(PageActive(page)))
|
if (unlikely(SlabFrozen(page)))
|
||||||
/*
|
|
||||||
* Cpu slabs are never on partial lists and are
|
|
||||||
* never freed.
|
|
||||||
*/
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (unlikely(!page->inuse))
|
if (unlikely(!page->inuse))
|
||||||
|
@ -1544,7 +1563,7 @@ slab_empty:
|
||||||
debug:
|
debug:
|
||||||
if (!free_object_checks(s, page, x))
|
if (!free_object_checks(s, page, x))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
if (!PageActive(page) && !page->freelist)
|
if (!SlabFrozen(page) && !page->freelist)
|
||||||
remove_full(s, page);
|
remove_full(s, page);
|
||||||
if (s->flags & SLAB_STORE_USER)
|
if (s->flags & SLAB_STORE_USER)
|
||||||
set_track(s, x, TRACK_FREE, addr);
|
set_track(s, x, TRACK_FREE, addr);
|
||||||
|
|
Loading…
Reference in New Issue