ipc/util.c, ipc_rcu_alloc: cacheline align allocation
Enforce that ipc_rcu_alloc returns a cacheline aligned pointer on SMP. Rationale: The SysV sem code tries to move the main spinlock into a seperate cacheline (____cacheline_aligned_in_smp). This works only if ipc_rcu_alloc returns cacheline aligned pointers. vmalloc and kmalloc return cacheline algined pointers, the implementation of ipc_rcu_alloc breaks that. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Cc: Rik van Riel <riel@redhat.com> Cc: Davidlohr Bueso <davidlohr.bueso@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9ad66ae65f
commit
196aa0132f
12
ipc/util.c
12
ipc/util.c
|
@ -468,9 +468,7 @@ void ipc_free(void* ptr, int size)
|
|||
struct ipc_rcu {
|
||||
struct rcu_head rcu;
|
||||
atomic_t refcount;
|
||||
/* "void *" makes sure alignment of following data is sane. */
|
||||
void *data[0];
|
||||
};
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/**
|
||||
* ipc_rcu_alloc - allocate ipc and rcu space
|
||||
|
@ -488,12 +486,14 @@ void *ipc_rcu_alloc(int size)
|
|||
if (unlikely(!out))
|
||||
return NULL;
|
||||
atomic_set(&out->refcount, 1);
|
||||
return out->data;
|
||||
return out + 1;
|
||||
}
|
||||
|
||||
int ipc_rcu_getref(void *ptr)
|
||||
{
|
||||
return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount);
|
||||
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
|
||||
|
||||
return atomic_inc_not_zero(&p->refcount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -507,7 +507,7 @@ static void ipc_schedule_free(struct rcu_head *head)
|
|||
|
||||
void ipc_rcu_putref(void *ptr)
|
||||
{
|
||||
struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data);
|
||||
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
|
||||
|
||||
if (!atomic_dec_and_test(&p->refcount))
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue