VMCI: Remove non-blocking/pinned queuepair support
We added this for a special case that doesn't exist on Linux. Remove the non-blocking/pinned queuepair code and simplify the driver in preparation for adding virtual IOMMU support. Acked-by: Aditya Sarwade <asarwade@vmware.com> Signed-off-by: Andy King <acking@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
440ab3b303
commit
45412befe8
|
@ -148,12 +148,10 @@ typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
|
||||||
struct vmci_queue_kern_if {
|
struct vmci_queue_kern_if {
|
||||||
struct page **page;
|
struct page **page;
|
||||||
struct page **header_page;
|
struct page **header_page;
|
||||||
void *va;
|
|
||||||
struct mutex __mutex; /* Protects the queue. */
|
struct mutex __mutex; /* Protects the queue. */
|
||||||
struct mutex *mutex; /* Shared by producer and consumer queues. */
|
struct mutex *mutex; /* Shared by producer and consumer queues. */
|
||||||
bool host;
|
bool host;
|
||||||
size_t num_pages;
|
size_t num_pages;
|
||||||
bool mapped;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -267,11 +265,6 @@ static void qp_free_queue(void *q, u64 size)
|
||||||
if (queue) {
|
if (queue) {
|
||||||
u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
|
u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||||
|
|
||||||
if (queue->kernel_if->mapped) {
|
|
||||||
vunmap(queue->kernel_if->va);
|
|
||||||
queue->kernel_if->va = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (i)
|
while (i)
|
||||||
__free_page(queue->kernel_if->page[--i]);
|
__free_page(queue->kernel_if->page[--i]);
|
||||||
|
|
||||||
|
@ -311,8 +304,6 @@ static void *qp_alloc_queue(u64 size, u32 flags)
|
||||||
queue->kernel_if->header_page = NULL; /* Unused in guest. */
|
queue->kernel_if->header_page = NULL; /* Unused in guest. */
|
||||||
queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
|
queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
|
||||||
queue->kernel_if->host = false;
|
queue->kernel_if->host = false;
|
||||||
queue->kernel_if->va = NULL;
|
|
||||||
queue->kernel_if->mapped = false;
|
|
||||||
|
|
||||||
for (i = 0; i < num_data_pages; i++) {
|
for (i = 0; i < num_data_pages; i++) {
|
||||||
queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
|
queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
|
||||||
|
@ -320,16 +311,6 @@ static void *qp_alloc_queue(u64 size, u32 flags)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vmci_qp_pinned(flags)) {
|
|
||||||
queue->kernel_if->va =
|
|
||||||
vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
|
|
||||||
PAGE_KERNEL);
|
|
||||||
if (!queue->kernel_if->va)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
queue->kernel_if->mapped = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (void *)queue;
|
return (void *)queue;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
@ -359,11 +340,7 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
||||||
void *va;
|
void *va;
|
||||||
size_t to_copy;
|
size_t to_copy;
|
||||||
|
|
||||||
if (!kernel_if->mapped)
|
va = kmap(kernel_if->page[page_index]);
|
||||||
va = kmap(kernel_if->page[page_index]);
|
|
||||||
else
|
|
||||||
va = (void *)((u8 *)kernel_if->va +
|
|
||||||
(page_index * PAGE_SIZE));
|
|
||||||
|
|
||||||
if (size - bytes_copied > PAGE_SIZE - page_offset)
|
if (size - bytes_copied > PAGE_SIZE - page_offset)
|
||||||
/* Enough payload to fill up from this page. */
|
/* Enough payload to fill up from this page. */
|
||||||
|
@ -388,8 +365,7 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_copied += to_copy;
|
bytes_copied += to_copy;
|
||||||
if (!kernel_if->mapped)
|
kunmap(kernel_if->page[page_index]);
|
||||||
kunmap(kernel_if->page[page_index]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return VMCI_SUCCESS;
|
return VMCI_SUCCESS;
|
||||||
|
@ -417,11 +393,7 @@ static int __qp_memcpy_from_queue(void *dest,
|
||||||
void *va;
|
void *va;
|
||||||
size_t to_copy;
|
size_t to_copy;
|
||||||
|
|
||||||
if (!kernel_if->mapped)
|
va = kmap(kernel_if->page[page_index]);
|
||||||
va = kmap(kernel_if->page[page_index]);
|
|
||||||
else
|
|
||||||
va = (void *)((u8 *)kernel_if->va +
|
|
||||||
(page_index * PAGE_SIZE));
|
|
||||||
|
|
||||||
if (size - bytes_copied > PAGE_SIZE - page_offset)
|
if (size - bytes_copied > PAGE_SIZE - page_offset)
|
||||||
/* Enough payload to fill up this page. */
|
/* Enough payload to fill up this page. */
|
||||||
|
@ -446,8 +418,7 @@ static int __qp_memcpy_from_queue(void *dest,
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_copied += to_copy;
|
bytes_copied += to_copy;
|
||||||
if (!kernel_if->mapped)
|
kunmap(kernel_if->page[page_index]);
|
||||||
kunmap(kernel_if->page[page_index]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return VMCI_SUCCESS;
|
return VMCI_SUCCESS;
|
||||||
|
@ -634,8 +605,6 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
|
||||||
queue->kernel_if->header_page =
|
queue->kernel_if->header_page =
|
||||||
(struct page **)((u8 *)queue + queue_size);
|
(struct page **)((u8 *)queue + queue_size);
|
||||||
queue->kernel_if->page = &queue->kernel_if->header_page[1];
|
queue->kernel_if->page = &queue->kernel_if->header_page[1];
|
||||||
queue->kernel_if->va = NULL;
|
|
||||||
queue->kernel_if->mapped = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return queue;
|
return queue;
|
||||||
|
@ -1720,21 +1689,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
|
||||||
if (result < VMCI_SUCCESS)
|
if (result < VMCI_SUCCESS)
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
/*
|
|
||||||
* Preemptively load in the headers if non-blocking to
|
|
||||||
* prevent blocking later.
|
|
||||||
*/
|
|
||||||
if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
|
|
||||||
result = qp_host_map_queues(entry->produce_q,
|
|
||||||
entry->consume_q);
|
|
||||||
if (result < VMCI_SUCCESS) {
|
|
||||||
qp_host_unregister_user_memory(
|
|
||||||
entry->produce_q,
|
|
||||||
entry->consume_q);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
entry->state = VMCIQPB_ATTACHED_MEM;
|
entry->state = VMCIQPB_ATTACHED_MEM;
|
||||||
} else {
|
} else {
|
||||||
entry->state = VMCIQPB_ATTACHED_NO_MEM;
|
entry->state = VMCIQPB_ATTACHED_NO_MEM;
|
||||||
|
@ -1749,24 +1703,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
|
||||||
|
|
||||||
return VMCI_ERROR_UNAVAILABLE;
|
return VMCI_ERROR_UNAVAILABLE;
|
||||||
} else {
|
} else {
|
||||||
/*
|
|
||||||
* For non-blocking queue pairs, we cannot rely on
|
|
||||||
* enqueue/dequeue to map in the pages on the
|
|
||||||
* host-side, since it may block, so we make an
|
|
||||||
* attempt here.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (flags & VMCI_QPFLAG_NONBLOCK) {
|
|
||||||
result =
|
|
||||||
qp_host_map_queues(entry->produce_q,
|
|
||||||
entry->consume_q);
|
|
||||||
if (result < VMCI_SUCCESS)
|
|
||||||
return result;
|
|
||||||
|
|
||||||
entry->qp.flags |= flags &
|
|
||||||
(VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The host side has successfully attached to a queue pair. */
|
/* The host side has successfully attached to a queue pair. */
|
||||||
entry->state = VMCIQPB_ATTACHED_MEM;
|
entry->state = VMCIQPB_ATTACHED_MEM;
|
||||||
}
|
}
|
||||||
|
@ -2543,24 +2479,19 @@ void vmci_qp_guest_endpoints_exit(void)
|
||||||
* Since non-blocking isn't yet implemented on the host personality we
|
* Since non-blocking isn't yet implemented on the host personality we
|
||||||
* have no reason to acquire a spin lock. So to avoid the use of an
|
* have no reason to acquire a spin lock. So to avoid the use of an
|
||||||
* unnecessary lock only acquire the mutex if we can block.
|
* unnecessary lock only acquire the mutex if we can block.
|
||||||
* Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
|
|
||||||
* we can use the same locking function for access to both the queue
|
|
||||||
* and the queue headers as it is the same logic. Assert this behvior.
|
|
||||||
*/
|
*/
|
||||||
static void qp_lock(const struct vmci_qp *qpair)
|
static void qp_lock(const struct vmci_qp *qpair)
|
||||||
{
|
{
|
||||||
if (vmci_can_block(qpair->flags))
|
qp_acquire_queue_mutex(qpair->produce_q);
|
||||||
qp_acquire_queue_mutex(qpair->produce_q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Helper routine that unlocks the queue pair after calling
|
* Helper routine that unlocks the queue pair after calling
|
||||||
* qp_lock. Respects non-blocking and pinning flags.
|
* qp_lock.
|
||||||
*/
|
*/
|
||||||
static void qp_unlock(const struct vmci_qp *qpair)
|
static void qp_unlock(const struct vmci_qp *qpair)
|
||||||
{
|
{
|
||||||
if (vmci_can_block(qpair->flags))
|
qp_release_queue_mutex(qpair->produce_q);
|
||||||
qp_release_queue_mutex(qpair->produce_q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2568,17 +2499,12 @@ static void qp_unlock(const struct vmci_qp *qpair)
|
||||||
* currently not mapped, it will be attempted to do so.
|
* currently not mapped, it will be attempted to do so.
|
||||||
*/
|
*/
|
||||||
static int qp_map_queue_headers(struct vmci_queue *produce_q,
|
static int qp_map_queue_headers(struct vmci_queue *produce_q,
|
||||||
struct vmci_queue *consume_q,
|
struct vmci_queue *consume_q)
|
||||||
bool can_block)
|
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
|
if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
|
||||||
if (can_block)
|
result = qp_host_map_queues(produce_q, consume_q);
|
||||||
result = qp_host_map_queues(produce_q, consume_q);
|
|
||||||
else
|
|
||||||
result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
|
|
||||||
|
|
||||||
if (result < VMCI_SUCCESS)
|
if (result < VMCI_SUCCESS)
|
||||||
return (produce_q->saved_header &&
|
return (produce_q->saved_header &&
|
||||||
consume_q->saved_header) ?
|
consume_q->saved_header) ?
|
||||||
|
@ -2601,8 +2527,7 @@ static int qp_get_queue_headers(const struct vmci_qp *qpair,
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
|
result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
|
||||||
vmci_can_block(qpair->flags));
|
|
||||||
if (result == VMCI_SUCCESS) {
|
if (result == VMCI_SUCCESS) {
|
||||||
*produce_q_header = qpair->produce_q->q_header;
|
*produce_q_header = qpair->produce_q->q_header;
|
||||||
*consume_q_header = qpair->consume_q->q_header;
|
*consume_q_header = qpair->consume_q->q_header;
|
||||||
|
@ -2645,9 +2570,6 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
|
||||||
{
|
{
|
||||||
unsigned int generation;
|
unsigned int generation;
|
||||||
|
|
||||||
if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
qpair->blocked++;
|
qpair->blocked++;
|
||||||
generation = qpair->generation;
|
generation = qpair->generation;
|
||||||
qp_unlock(qpair);
|
qp_unlock(qpair);
|
||||||
|
@ -2674,15 +2596,14 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
|
||||||
const u64 produce_q_size,
|
const u64 produce_q_size,
|
||||||
const void *buf,
|
const void *buf,
|
||||||
size_t buf_size,
|
size_t buf_size,
|
||||||
vmci_memcpy_to_queue_func memcpy_to_queue,
|
vmci_memcpy_to_queue_func memcpy_to_queue)
|
||||||
bool can_block)
|
|
||||||
{
|
{
|
||||||
s64 free_space;
|
s64 free_space;
|
||||||
u64 tail;
|
u64 tail;
|
||||||
size_t written;
|
size_t written;
|
||||||
ssize_t result;
|
ssize_t result;
|
||||||
|
|
||||||
result = qp_map_queue_headers(produce_q, consume_q, can_block);
|
result = qp_map_queue_headers(produce_q, consume_q);
|
||||||
if (unlikely(result != VMCI_SUCCESS))
|
if (unlikely(result != VMCI_SUCCESS))
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
|
@ -2737,15 +2658,14 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
|
||||||
void *buf,
|
void *buf,
|
||||||
size_t buf_size,
|
size_t buf_size,
|
||||||
vmci_memcpy_from_queue_func memcpy_from_queue,
|
vmci_memcpy_from_queue_func memcpy_from_queue,
|
||||||
bool update_consumer,
|
bool update_consumer)
|
||||||
bool can_block)
|
|
||||||
{
|
{
|
||||||
s64 buf_ready;
|
s64 buf_ready;
|
||||||
u64 head;
|
u64 head;
|
||||||
size_t read;
|
size_t read;
|
||||||
ssize_t result;
|
ssize_t result;
|
||||||
|
|
||||||
result = qp_map_queue_headers(produce_q, consume_q, can_block);
|
result = qp_map_queue_headers(produce_q, consume_q);
|
||||||
if (unlikely(result != VMCI_SUCCESS))
|
if (unlikely(result != VMCI_SUCCESS))
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
|
@ -2842,32 +2762,11 @@ int vmci_qpair_alloc(struct vmci_qp **qpair,
|
||||||
route = vmci_guest_code_active() ?
|
route = vmci_guest_code_active() ?
|
||||||
VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
|
VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
|
||||||
|
|
||||||
/* If NONBLOCK or PINNED is set, we better be the guest personality. */
|
if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
|
||||||
if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
|
pr_devel("NONBLOCK OR PINNED set");
|
||||||
VMCI_ROUTE_AS_GUEST != route) {
|
|
||||||
pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
|
|
||||||
return VMCI_ERROR_INVALID_ARGS;
|
return VMCI_ERROR_INVALID_ARGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Limit the size of pinned QPs and check sanity.
|
|
||||||
*
|
|
||||||
* Pinned pages implies non-blocking mode. Mutexes aren't acquired
|
|
||||||
* when the NONBLOCK flag is set in qpair code; and also should not be
|
|
||||||
* acquired when the PINNED flagged is set. Since pinning pages
|
|
||||||
* implies we want speed, it makes no sense not to have NONBLOCK
|
|
||||||
* set if PINNED is set. Hence enforce this implication.
|
|
||||||
*/
|
|
||||||
if (vmci_qp_pinned(flags)) {
|
|
||||||
if (vmci_can_block(flags)) {
|
|
||||||
pr_err("Attempted to enable pinning w/o non-blocking");
|
|
||||||
return VMCI_ERROR_INVALID_ARGS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
|
|
||||||
return VMCI_ERROR_NO_RESOURCES;
|
|
||||||
}
|
|
||||||
|
|
||||||
my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
|
my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
|
||||||
if (!my_qpair)
|
if (!my_qpair)
|
||||||
return VMCI_ERROR_NO_MEM;
|
return VMCI_ERROR_NO_MEM;
|
||||||
|
@ -3195,8 +3094,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->produce_q_size,
|
qpair->produce_q_size,
|
||||||
buf, buf_size,
|
buf, buf_size,
|
||||||
qp_memcpy_to_queue,
|
qp_memcpy_to_queue);
|
||||||
vmci_can_block(qpair->flags));
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
|
@ -3237,8 +3135,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
buf, buf_size,
|
buf, buf_size,
|
||||||
qp_memcpy_from_queue, true,
|
qp_memcpy_from_queue, true);
|
||||||
vmci_can_block(qpair->flags));
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
|
@ -3280,8 +3177,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
buf, buf_size,
|
buf, buf_size,
|
||||||
qp_memcpy_from_queue, false,
|
qp_memcpy_from_queue, false);
|
||||||
vmci_can_block(qpair->flags));
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
|
@ -3323,8 +3219,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->produce_q_size,
|
qpair->produce_q_size,
|
||||||
iov, iov_size,
|
iov, iov_size,
|
||||||
qp_memcpy_to_queue_iov,
|
qp_memcpy_to_queue_iov);
|
||||||
vmci_can_block(qpair->flags));
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
|
@ -3367,7 +3262,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
iov, iov_size,
|
iov, iov_size,
|
||||||
qp_memcpy_from_queue_iov,
|
qp_memcpy_from_queue_iov,
|
||||||
true, vmci_can_block(qpair->flags));
|
true);
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
|
@ -3411,7 +3306,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
iov, iov_size,
|
iov, iov_size,
|
||||||
qp_memcpy_from_queue_iov,
|
qp_memcpy_from_queue_iov,
|
||||||
false, vmci_can_block(qpair->flags));
|
false);
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
|
|
|
@ -146,24 +146,6 @@ VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
|
||||||
return page_store->len >= 2;
|
return page_store->len >= 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Helper function to check if the non-blocking flag
|
|
||||||
* is set for a given queue pair.
|
|
||||||
*/
|
|
||||||
static inline bool vmci_can_block(u32 flags)
|
|
||||||
{
|
|
||||||
return !(flags & VMCI_QPFLAG_NONBLOCK);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Helper function to check if the queue pair is pinned
|
|
||||||
* into memory.
|
|
||||||
*/
|
|
||||||
static inline bool vmci_qp_pinned(u32 flags)
|
|
||||||
{
|
|
||||||
return flags & VMCI_QPFLAG_PINNED;
|
|
||||||
}
|
|
||||||
|
|
||||||
void vmci_qp_broker_exit(void);
|
void vmci_qp_broker_exit(void);
|
||||||
int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
|
int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
|
||||||
u32 flags, u32 priv_flags,
|
u32 flags, u32 priv_flags,
|
||||||
|
|
Loading…
Reference in New Issue