Drivers: hv: vmbus: Allocate ring buffer memory in NUMA aware fashion
Allocate ring buffer memory from the NUMA node assigned to the channel. Since this is a performance and not a correctness issue, if the node specific allocation were to fail, fall back and allocate without specifying the node. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
50566ac870
commit
294409d205
|
@ -73,6 +73,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
|||
unsigned long flags;
|
||||
int ret, err = 0;
|
||||
unsigned long t;
|
||||
struct page *page;
|
||||
|
||||
spin_lock_irqsave(&newchannel->lock, flags);
|
||||
if (newchannel->state == CHANNEL_OPEN_STATE) {
|
||||
|
@ -87,8 +88,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
|||
newchannel->channel_callback_context = context;
|
||||
|
||||
/* Allocate the ring buffer */
|
||||
out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
|
||||
get_order(send_ringbuffer_size + recv_ringbuffer_size));
|
||||
page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
|
||||
GFP_KERNEL|__GFP_ZERO,
|
||||
get_order(send_ringbuffer_size +
|
||||
recv_ringbuffer_size));
|
||||
|
||||
if (!page)
|
||||
out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
|
||||
get_order(send_ringbuffer_size +
|
||||
recv_ringbuffer_size));
|
||||
else
|
||||
out = (void *)page_address(page);
|
||||
|
||||
if (!out) {
|
||||
err = -ENOMEM;
|
||||
|
|
Loading…
Reference in New Issue