2008-07-30 13:34:05 +08:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2009-02-05 07:12:24 +08:00
|
|
|
* Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
|
2008-07-30 13:34:05 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cross Partition Communication (XPC) uv-based functions.
|
|
|
|
*
|
|
|
|
* Architecture specific implementation of common functions.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
2008-07-30 13:34:18 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/device.h>
|
2012-08-22 07:16:02 +08:00
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/module.h>
|
2008-11-06 07:28:00 +08:00
|
|
|
#include <linux/err.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2008-07-30 13:34:16 +08:00
|
|
|
#include <asm/uv/uv_hub.h>
|
2008-11-06 07:28:00 +08:00
|
|
|
#if defined CONFIG_X86_64
|
|
|
|
#include <asm/uv/bios.h>
|
|
|
|
#include <asm/uv/uv_irq.h>
|
|
|
|
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
|
|
|
#include <asm/sn/intr.h>
|
|
|
|
#include <asm/sn/sn_sal.h>
|
|
|
|
#endif
|
2008-07-30 13:34:18 +08:00
|
|
|
#include "../sgi-gru/gru.h"
|
2008-07-30 13:34:16 +08:00
|
|
|
#include "../sgi-gru/grukservices.h"
|
2008-07-30 13:34:05 +08:00
|
|
|
#include "xpc.h"
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
|
|
|
struct uv_IO_APIC_route_entry {
|
|
|
|
__u64 vector : 8,
|
|
|
|
delivery_mode : 3,
|
|
|
|
dest_mode : 1,
|
|
|
|
delivery_status : 1,
|
|
|
|
polarity : 1,
|
|
|
|
__reserved_1 : 1,
|
|
|
|
trigger : 1,
|
|
|
|
mask : 1,
|
|
|
|
__reserved_2 : 15,
|
|
|
|
dest : 32;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2009-04-14 05:40:18 +08:00
|
|
|
static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
|
2008-07-30 13:34:07 +08:00
|
|
|
|
2008-07-30 13:34:18 +08:00
|
|
|
#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
|
2008-11-06 07:28:00 +08:00
|
|
|
#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
|
|
|
|
XPC_ACTIVATE_MSG_SIZE_UV)
|
|
|
|
#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
|
|
|
|
#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
|
|
|
|
XPC_NOTIFY_MSG_SIZE_UV)
|
|
|
|
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2012-08-22 07:16:02 +08:00
|
|
|
static int xpc_mq_node = -1;
|
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
|
|
|
|
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
|
|
|
static int
|
2009-04-14 05:40:19 +08:00
|
|
|
xpc_setup_partitions_uv(void)
|
2008-07-30 13:34:18 +08:00
|
|
|
{
|
|
|
|
short partid;
|
|
|
|
struct xpc_partition_uv *part_uv;
|
|
|
|
|
|
|
|
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
|
|
|
|
part_uv = &xpc_partitions[partid].sn.uv;
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
|
2008-07-30 13:34:18 +08:00
|
|
|
spin_lock_init(&part_uv->flags_lock);
|
|
|
|
part_uv->remote_act_state = XPC_P_AS_INACTIVE;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
static void
|
2009-04-14 05:40:19 +08:00
|
|
|
xpc_teardown_partitions_uv(void)
|
2009-04-03 07:59:10 +08:00
|
|
|
{
|
|
|
|
short partid;
|
|
|
|
struct xpc_partition_uv *part_uv;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
|
|
|
|
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
|
|
|
|
part_uv = &xpc_partitions[partid].sn.uv;
|
|
|
|
|
|
|
|
if (part_uv->cached_activate_gru_mq_desc != NULL) {
|
|
|
|
mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
|
|
|
|
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
|
|
|
|
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
|
|
|
|
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
|
|
|
|
kfree(part_uv->cached_activate_gru_mq_desc);
|
|
|
|
part_uv->cached_activate_gru_mq_desc = NULL;
|
|
|
|
mutex_unlock(&part_uv->
|
|
|
|
cached_activate_gru_mq_desc_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
static int
|
|
|
|
xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
|
|
|
|
{
|
2009-04-03 07:59:10 +08:00
|
|
|
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
|
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
#if defined CONFIG_X86_64
|
2009-10-01 00:02:59 +08:00
|
|
|
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
|
|
|
|
UV_AFFINITY_CPU);
|
2012-08-22 07:16:02 +08:00
|
|
|
if (mq->irq < 0)
|
2009-04-03 07:59:10 +08:00
|
|
|
return mq->irq;
|
2008-11-06 07:28:00 +08:00
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
|
2008-11-06 07:28:00 +08:00
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
2008-11-06 07:28:00 +08:00
|
|
|
if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
|
|
|
|
mq->irq = SGI_XPC_ACTIVATE;
|
|
|
|
else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
|
|
|
|
mq->irq = SGI_XPC_NOTIFY;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
|
|
|
|
uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
|
2008-11-06 07:28:00 +08:00
|
|
|
#else
|
|
|
|
#error not a supported configuration
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
|
|
|
|
{
|
|
|
|
#if defined CONFIG_X86_64
|
2009-10-01 00:02:59 +08:00
|
|
|
uv_teardown_irq(mq->irq);
|
2008-11-06 07:28:00 +08:00
|
|
|
|
|
|
|
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
|
|
|
int mmr_pnode;
|
|
|
|
unsigned long mmr_value;
|
|
|
|
|
|
|
|
mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
|
|
|
|
mmr_value = 1UL << 16;
|
|
|
|
|
|
|
|
uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
|
|
|
|
#else
|
|
|
|
#error not a supported configuration
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2009-12-16 08:47:56 +08:00
|
|
|
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
|
|
|
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
|
|
|
|
|
|
|
|
ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
|
2008-12-13 01:07:00 +08:00
|
|
|
mq->order, &mq->mmr_offset);
|
2008-11-06 07:28:00 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
|
|
|
|
ret);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2009-12-16 08:47:56 +08:00
|
|
|
#elif defined CONFIG_X86_64
|
|
|
|
ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
|
|
|
|
mq->order, &mq->mmr_offset);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
|
|
|
|
"ret=%d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-11-06 07:28:00 +08:00
|
|
|
#else
|
|
|
|
#error not a supported configuration
|
|
|
|
#endif
|
|
|
|
|
|
|
|
mq->watchlist_num = ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
|
|
|
|
{
|
|
|
|
int ret;
|
2009-12-16 08:47:56 +08:00
|
|
|
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
|
2008-11-06 07:28:00 +08:00
|
|
|
|
|
|
|
#if defined CONFIG_X86_64
|
2009-12-16 08:47:56 +08:00
|
|
|
ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
|
2008-11-06 07:28:00 +08:00
|
|
|
BUG_ON(ret != BIOS_STATUS_SUCCESS);
|
|
|
|
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
2009-12-16 08:47:56 +08:00
|
|
|
ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
|
2008-11-06 07:28:00 +08:00
|
|
|
BUG_ON(ret != SALRET_OK);
|
|
|
|
#else
|
|
|
|
#error not a supported configuration
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct xpc_gru_mq_uv *
|
|
|
|
xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
|
2008-07-30 13:34:18 +08:00
|
|
|
irq_handler_t irq_handler)
|
|
|
|
{
|
2008-11-06 07:28:00 +08:00
|
|
|
enum xp_retval xp_ret;
|
2008-07-30 13:34:18 +08:00
|
|
|
int ret;
|
|
|
|
int nid;
|
2009-12-16 08:48:00 +08:00
|
|
|
int nasid;
|
2008-11-06 07:28:00 +08:00
|
|
|
int pg_order;
|
2008-07-30 13:34:18 +08:00
|
|
|
struct page *page;
|
2008-11-06 07:28:00 +08:00
|
|
|
struct xpc_gru_mq_uv *mq;
|
2009-04-03 07:59:10 +08:00
|
|
|
struct uv_IO_APIC_route_entry *mmr_value;
|
2008-11-06 07:28:00 +08:00
|
|
|
|
|
|
|
mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
|
|
|
|
if (mq == NULL) {
|
|
|
|
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
|
|
|
|
"a xpc_gru_mq_uv structure\n");
|
|
|
|
ret = -ENOMEM;
|
2009-04-03 07:59:10 +08:00
|
|
|
goto out_0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (mq->gru_mq_desc == NULL) {
|
|
|
|
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
|
|
|
|
"a gru_message_queue_desc structure\n");
|
|
|
|
ret = -ENOMEM;
|
2008-11-06 07:28:00 +08:00
|
|
|
goto out_1;
|
|
|
|
}
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
pg_order = get_order(mq_size);
|
|
|
|
mq->order = pg_order + PAGE_SHIFT;
|
|
|
|
mq_size = 1UL << mq->order;
|
|
|
|
|
|
|
|
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
|
|
|
|
|
|
|
|
nid = cpu_to_node(cpu);
|
mm: rename alloc_pages_exact_node() to __alloc_pages_node()
alloc_pages_exact_node() was introduced in commit 6484eb3e2a81 ("page
allocator: do not check NUMA node ID when the caller knows the node is
valid") as an optimized variant of alloc_pages_node(), that doesn't
fallback to current node for nid == NUMA_NO_NODE. Unfortunately the
name of the function can easily suggest that the allocation is
restricted to the given node and fails otherwise. In truth, the node is
only preferred, unless __GFP_THISNODE is passed among the gfp flags.
The misleading name has lead to mistakes in the past, see for example
commits 5265047ac301 ("mm, thp: really limit transparent hugepage
allocation to local node") and b360edb43f8e ("mm, mempolicy:
migrate_to_node should only migrate to node").
Another issue with the name is that there's a family of
alloc_pages_exact*() functions where 'exact' means exact size (instead
of page order), which leads to more confusion.
To prevent further mistakes, this patch effectively renames
alloc_pages_exact_node() to __alloc_pages_node() to better convey that
it's an optimized variant of alloc_pages_node() not intended for general
usage. Both functions get described in comments.
It has been also considered to really provide a convenience function for
allocations restricted to a node, but the major opinion seems to be that
__GFP_THISNODE already provides that functionality and we shouldn't
duplicate the API needlessly. The number of users would be small
anyway.
Existing callers of alloc_pages_exact_node() are simply converted to
call __alloc_pages_node(), with the exception of sba_alloc_coherent()
which open-codes the check for NUMA_NO_NODE, so it is converted to use
alloc_pages_node() instead. This means it no longer performs some
VM_BUG_ON checks, and since the current check for nid in
alloc_pages_node() uses a 'nid < 0' comparison (which includes
NUMA_NO_NODE), it may hide wrong values which would be previously
exposed.
Both differences will be rectified by the next patch.
To sum up, this patch makes no functional changes, except temporarily
hiding potentially buggy callers. Restricting the checks in
alloc_pages_node() is left for the next patch which can in turn expose
more existing buggy callers.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Robin Holt <robinmholt@gmail.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Cliff Whickman <cpw@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 06:03:50 +08:00
|
|
|
page = __alloc_pages_node(nid,
|
2014-03-11 06:49:43 +08:00
|
|
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
|
2012-08-22 07:16:02 +08:00
|
|
|
pg_order);
|
2008-07-30 13:34:19 +08:00
|
|
|
if (page == NULL) {
|
|
|
|
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
|
|
|
|
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
|
2008-11-06 07:28:00 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_2;
|
2008-07-30 13:34:19 +08:00
|
|
|
}
|
2008-11-06 07:28:00 +08:00
|
|
|
mq->address = page_address(page);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
/* enable generation of irq when GRU mq operation occurs to this mq */
|
|
|
|
ret = xpc_gru_mq_watchlist_alloc_uv(mq);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out_3;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out_4;
|
|
|
|
|
|
|
|
ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
|
2008-07-30 13:34:18 +08:00
|
|
|
if (ret != 0) {
|
|
|
|
dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
|
2009-04-03 07:59:10 +08:00
|
|
|
mq->irq, -ret);
|
2008-11-06 07:28:00 +08:00
|
|
|
goto out_5;
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
2009-12-16 08:48:00 +08:00
|
|
|
nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
|
|
|
|
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
|
2009-12-16 08:48:00 +08:00
|
|
|
nasid, mmr_value->vector, mmr_value->dest);
|
2009-04-03 07:59:10 +08:00
|
|
|
if (ret != 0) {
|
|
|
|
dev_err(xpc_part, "gru_create_message_queue() returned "
|
|
|
|
"error=%d\n", ret);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_6;
|
|
|
|
}
|
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
/* allow other partitions to access this GRU mq */
|
|
|
|
xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
|
|
|
|
if (xp_ret != xpSuccess) {
|
|
|
|
ret = -EACCES;
|
|
|
|
goto out_6;
|
|
|
|
}
|
2008-07-30 13:34:18 +08:00
|
|
|
|
|
|
|
return mq;
|
2008-11-06 07:28:00 +08:00
|
|
|
|
|
|
|
/* something went wrong */
|
|
|
|
out_6:
|
|
|
|
free_irq(mq->irq, NULL);
|
|
|
|
out_5:
|
|
|
|
xpc_release_gru_mq_irq_uv(mq);
|
|
|
|
out_4:
|
|
|
|
xpc_gru_mq_watchlist_free_uv(mq);
|
|
|
|
out_3:
|
|
|
|
free_pages((unsigned long)mq->address, pg_order);
|
|
|
|
out_2:
|
2009-04-03 07:59:10 +08:00
|
|
|
kfree(mq->gru_mq_desc);
|
2008-11-06 07:28:00 +08:00
|
|
|
out_1:
|
2009-04-03 07:59:10 +08:00
|
|
|
kfree(mq);
|
|
|
|
out_0:
|
2008-11-06 07:28:00 +08:00
|
|
|
return ERR_PTR(ret);
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
2008-07-30 13:34:05 +08:00
|
|
|
|
2008-07-30 13:34:07 +08:00
|
|
|
static void
|
2008-11-06 07:28:00 +08:00
|
|
|
xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
|
2008-07-30 13:34:18 +08:00
|
|
|
{
|
2008-11-06 07:28:00 +08:00
|
|
|
unsigned int mq_size;
|
|
|
|
int pg_order;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* disallow other partitions to access GRU mq */
|
|
|
|
mq_size = 1UL << mq->order;
|
|
|
|
ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
|
|
|
|
BUG_ON(ret != xpSuccess);
|
|
|
|
|
|
|
|
/* unregister irq handler and release mq irq/vector mapping */
|
|
|
|
free_irq(mq->irq, NULL);
|
|
|
|
xpc_release_gru_mq_irq_uv(mq);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
/* disable generation of irq when GRU mq op occurs to this mq */
|
|
|
|
xpc_gru_mq_watchlist_free_uv(mq);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
pg_order = mq->order - PAGE_SHIFT;
|
|
|
|
free_pages((unsigned long)mq->address, pg_order);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
kfree(mq);
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum xp_retval
|
2009-04-03 07:59:10 +08:00
|
|
|
xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
|
|
|
|
size_t msg_size)
|
2008-07-30 13:34:07 +08:00
|
|
|
{
|
2008-07-30 13:34:18 +08:00
|
|
|
enum xp_retval xp_ret;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (1) {
|
2009-04-03 07:59:10 +08:00
|
|
|
ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
|
2008-07-30 13:34:18 +08:00
|
|
|
if (ret == MQE_OK) {
|
|
|
|
xp_ret = xpSuccess;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == MQE_QUEUE_FULL) {
|
|
|
|
dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
|
|
|
|
"error=MQE_QUEUE_FULL\n");
|
|
|
|
/* !!! handle QLimit reached; delay & try again */
|
|
|
|
/* ??? Do we add a limit to the number of retries? */
|
|
|
|
(void)msleep_interruptible(10);
|
|
|
|
} else if (ret == MQE_CONGESTION) {
|
|
|
|
dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
|
|
|
|
"error=MQE_CONGESTION\n");
|
|
|
|
/* !!! handle LB Overflow; simply try again */
|
|
|
|
/* ??? Do we add a limit to the number of retries? */
|
|
|
|
} else {
|
|
|
|
/* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
|
|
|
|
dev_err(xpc_chan, "gru_send_message_gpa() returned "
|
|
|
|
"error=%d\n", ret);
|
|
|
|
xp_ret = xpGruSendMqError;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return xp_ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_process_activate_IRQ_rcvd_uv(void)
|
|
|
|
{
|
|
|
|
unsigned long irq_flags;
|
|
|
|
short partid;
|
|
|
|
struct xpc_partition *part;
|
|
|
|
u8 act_state_req;
|
|
|
|
|
|
|
|
DBUG_ON(xpc_activate_IRQ_rcvd == 0);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
|
|
|
|
part = &xpc_partitions[partid];
|
|
|
|
|
|
|
|
if (part->sn.uv.act_state_req == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
xpc_activate_IRQ_rcvd--;
|
|
|
|
BUG_ON(xpc_activate_IRQ_rcvd < 0);
|
|
|
|
|
|
|
|
act_state_req = part->sn.uv.act_state_req;
|
|
|
|
part->sn.uv.act_state_req = 0;
|
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
|
|
|
|
if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
|
|
|
|
if (part->act_state == XPC_P_AS_INACTIVE)
|
|
|
|
xpc_activate_partition(part);
|
|
|
|
else if (part->act_state == XPC_P_AS_DEACTIVATING)
|
|
|
|
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
|
|
|
|
|
|
|
|
} else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
|
|
|
|
if (part->act_state == XPC_P_AS_INACTIVE)
|
|
|
|
xpc_activate_partition(part);
|
|
|
|
else
|
|
|
|
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
|
|
|
|
|
|
|
|
} else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
|
|
|
|
XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
if (xpc_activate_IRQ_rcvd == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
static void
|
|
|
|
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
|
|
|
|
struct xpc_activate_mq_msghdr_uv *msg_hdr,
|
2010-10-27 05:21:15 +08:00
|
|
|
int part_setup,
|
2008-07-30 13:34:19 +08:00
|
|
|
int *wakeup_hb_checker)
|
2008-07-30 13:34:18 +08:00
|
|
|
{
|
|
|
|
unsigned long irq_flags;
|
2008-07-30 13:34:19 +08:00
|
|
|
struct xpc_partition_uv *part_uv = &part->sn.uv;
|
2008-07-30 13:34:18 +08:00
|
|
|
struct xpc_openclose_args *args;
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
part_uv->remote_act_state = msg_hdr->act_state;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
switch (msg_hdr->type) {
|
|
|
|
case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
|
|
|
|
/* syncing of remote_act_state was just done above */
|
|
|
|
break;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
|
|
|
|
struct xpc_activate_mq_msg_activate_req_uv *msg;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
/*
|
|
|
|
* ??? Do we deal here with ts_jiffies being different
|
|
|
|
* ??? if act_state != XPC_P_AS_INACTIVE instead of
|
|
|
|
* ??? below?
|
|
|
|
*/
|
|
|
|
msg = container_of(msg_hdr, struct
|
|
|
|
xpc_activate_mq_msg_activate_req_uv, hdr);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
if (part_uv->act_state_req == 0)
|
|
|
|
xpc_activate_IRQ_rcvd++;
|
|
|
|
part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
|
|
|
|
part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
|
|
|
|
part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
|
2009-04-14 05:40:18 +08:00
|
|
|
part_uv->heartbeat_gpa = msg->heartbeat_gpa;
|
2009-04-03 07:59:10 +08:00
|
|
|
|
|
|
|
if (msg->activate_gru_mq_desc_gpa !=
|
|
|
|
part_uv->activate_gru_mq_desc_gpa) {
|
2012-07-12 05:02:38 +08:00
|
|
|
spin_lock(&part_uv->flags_lock);
|
2009-04-03 07:59:10 +08:00
|
|
|
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
|
2012-07-12 05:02:38 +08:00
|
|
|
spin_unlock(&part_uv->flags_lock);
|
2009-04-03 07:59:10 +08:00
|
|
|
part_uv->activate_gru_mq_desc_gpa =
|
|
|
|
msg->activate_gru_mq_desc_gpa;
|
|
|
|
}
|
2008-07-30 13:34:19 +08:00
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
(*wakeup_hb_checker)++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
|
|
|
|
struct xpc_activate_mq_msg_deactivate_req_uv *msg;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
msg = container_of(msg_hdr, struct
|
|
|
|
xpc_activate_mq_msg_deactivate_req_uv, hdr);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
if (part_uv->act_state_req == 0)
|
|
|
|
xpc_activate_IRQ_rcvd++;
|
|
|
|
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
|
|
|
|
part_uv->reason = msg->reason;
|
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
|
|
|
|
(*wakeup_hb_checker)++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
|
|
|
|
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2010-10-27 05:21:15 +08:00
|
|
|
if (!part_setup)
|
|
|
|
break;
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
msg = container_of(msg_hdr, struct
|
|
|
|
xpc_activate_mq_msg_chctl_closerequest_uv,
|
|
|
|
hdr);
|
|
|
|
args = &part->remote_openclose_args[msg->ch_number];
|
|
|
|
args->reason = msg->reason;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
spin_lock_irqsave(&part->chctl_lock, irq_flags);
|
|
|
|
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
|
|
|
|
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
xpc_wakeup_channel_mgr(part);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
|
|
|
|
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2010-10-27 05:21:15 +08:00
|
|
|
if (!part_setup)
|
|
|
|
break;
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
msg = container_of(msg_hdr, struct
|
|
|
|
xpc_activate_mq_msg_chctl_closereply_uv,
|
|
|
|
hdr);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
spin_lock_irqsave(&part->chctl_lock, irq_flags);
|
|
|
|
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
|
|
|
|
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
xpc_wakeup_channel_mgr(part);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
|
|
|
|
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
|
|
|
|
|
2010-10-27 05:21:15 +08:00
|
|
|
if (!part_setup)
|
|
|
|
break;
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
msg = container_of(msg_hdr, struct
|
|
|
|
xpc_activate_mq_msg_chctl_openrequest_uv,
|
|
|
|
hdr);
|
|
|
|
args = &part->remote_openclose_args[msg->ch_number];
|
|
|
|
args->entry_size = msg->entry_size;
|
|
|
|
args->local_nentries = msg->local_nentries;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&part->chctl_lock, irq_flags);
|
|
|
|
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
|
|
|
|
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
|
|
|
|
|
|
|
xpc_wakeup_channel_mgr(part);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
|
|
|
|
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
|
|
|
|
|
2010-10-27 05:21:15 +08:00
|
|
|
if (!part_setup)
|
|
|
|
break;
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
msg = container_of(msg_hdr, struct
|
|
|
|
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
|
|
|
|
args = &part->remote_openclose_args[msg->ch_number];
|
|
|
|
args->remote_nentries = msg->remote_nentries;
|
|
|
|
args->local_nentries = msg->local_nentries;
|
2009-04-03 07:59:10 +08:00
|
|
|
args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
|
2008-07-30 13:34:19 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&part->chctl_lock, irq_flags);
|
|
|
|
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
|
|
|
|
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
|
|
|
|
|
|
|
xpc_wakeup_channel_mgr(part);
|
|
|
|
break;
|
|
|
|
}
|
2009-04-14 05:40:19 +08:00
|
|
|
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
|
|
|
|
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
|
|
|
|
|
2010-10-27 05:21:15 +08:00
|
|
|
if (!part_setup)
|
|
|
|
break;
|
|
|
|
|
2009-04-14 05:40:19 +08:00
|
|
|
msg = container_of(msg_hdr, struct
|
|
|
|
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
|
|
|
|
spin_lock_irqsave(&part->chctl_lock, irq_flags);
|
|
|
|
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
|
|
|
|
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
|
|
|
|
|
|
|
xpc_wakeup_channel_mgr(part);
|
|
|
|
}
|
2008-07-30 13:34:19 +08:00
|
|
|
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
|
|
|
|
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
|
|
|
|
part_uv->flags |= XPC_P_ENGAGED_UV;
|
|
|
|
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
|
|
|
|
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
|
|
|
|
part_uv->flags &= ~XPC_P_ENGAGED_UV;
|
|
|
|
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
dev_err(xpc_part, "received unknown activate_mq msg type=%d "
|
|
|
|
"from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
|
|
|
|
|
|
|
|
/* get hb checker to deactivate from the remote partition */
|
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
if (part_uv->act_state_req == 0)
|
|
|
|
xpc_activate_IRQ_rcvd++;
|
|
|
|
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
|
|
|
|
part_uv->reason = xpBadMsgType;
|
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
(*wakeup_hb_checker)++;
|
|
|
|
return;
|
|
|
|
}
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
|
|
|
|
part->remote_rp_ts_jiffies != 0) {
|
|
|
|
/*
|
|
|
|
* ??? Does what we do here need to be sensitive to
|
|
|
|
* ??? act_state or remote_act_state?
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
if (part_uv->act_state_req == 0)
|
|
|
|
xpc_activate_IRQ_rcvd++;
|
|
|
|
part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
|
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
(*wakeup_hb_checker)++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t
|
|
|
|
xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msghdr_uv *msg_hdr;
|
|
|
|
short partid;
|
|
|
|
struct xpc_partition *part;
|
|
|
|
int wakeup_hb_checker = 0;
|
2009-04-03 07:59:10 +08:00
|
|
|
int part_referenced;
|
2008-07-30 13:34:19 +08:00
|
|
|
|
2008-11-06 07:28:00 +08:00
|
|
|
while (1) {
|
2009-04-03 07:59:10 +08:00
|
|
|
msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
|
2008-11-06 07:28:00 +08:00
|
|
|
if (msg_hdr == NULL)
|
|
|
|
break;
|
2008-07-30 13:34:19 +08:00
|
|
|
|
|
|
|
partid = msg_hdr->partid;
|
|
|
|
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
|
|
|
|
dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
|
|
|
|
"received invalid partid=0x%x in message\n",
|
|
|
|
partid);
|
|
|
|
} else {
|
|
|
|
part = &xpc_partitions[partid];
|
2009-04-03 07:59:10 +08:00
|
|
|
|
|
|
|
part_referenced = xpc_part_ref(part);
|
|
|
|
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
|
2010-10-27 05:21:15 +08:00
|
|
|
part_referenced,
|
2009-04-03 07:59:10 +08:00
|
|
|
&wakeup_hb_checker);
|
|
|
|
if (part_referenced)
|
2008-07-30 13:34:19 +08:00
|
|
|
xpc_part_deref(part);
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (wakeup_hb_checker)
|
|
|
|
wake_up_interruptible(&xpc_activate_IRQ_wq);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
static enum xp_retval
|
|
|
|
xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
|
|
|
|
unsigned long gru_mq_desc_gpa)
|
|
|
|
{
|
|
|
|
enum xp_retval ret;
|
|
|
|
|
|
|
|
ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
|
|
|
|
sizeof(struct gru_message_queue_desc));
|
|
|
|
if (ret == xpSuccess)
|
|
|
|
gru_mq_desc->mq = NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:18 +08:00
|
|
|
static enum xp_retval
|
|
|
|
xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
|
|
|
|
int msg_type)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
|
2009-04-03 07:59:10 +08:00
|
|
|
struct xpc_partition_uv *part_uv = &part->sn.uv;
|
|
|
|
struct gru_message_queue_desc *gru_mq_desc;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
enum xp_retval ret;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
|
|
|
DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
|
|
|
|
|
|
|
|
msg_hdr->type = msg_type;
|
2009-04-03 07:59:10 +08:00
|
|
|
msg_hdr->partid = xp_partition_id;
|
2008-07-30 13:34:18 +08:00
|
|
|
msg_hdr->act_state = part->act_state;
|
|
|
|
msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
|
|
|
|
again:
|
|
|
|
if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
|
|
|
|
gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
|
|
|
|
if (gru_mq_desc == NULL) {
|
|
|
|
gru_mq_desc = kmalloc(sizeof(struct
|
|
|
|
gru_message_queue_desc),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (gru_mq_desc == NULL) {
|
|
|
|
ret = xpNoMemory;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
|
|
|
|
part_uv->
|
|
|
|
activate_gru_mq_desc_gpa);
|
|
|
|
if (ret != xpSuccess)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
|
|
|
|
part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
|
|
|
|
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
|
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:18 +08:00
|
|
|
/* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
|
2009-04-03 07:59:10 +08:00
|
|
|
ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
|
|
|
|
msg_size);
|
|
|
|
if (ret != xpSuccess) {
|
|
|
|
smp_rmb(); /* ensure a fresh copy of part_uv->flags */
|
|
|
|
if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
|
|
|
|
return ret;
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
|
|
|
|
size_t msg_size, int msg_type)
|
|
|
|
{
|
|
|
|
enum xp_retval ret;
|
|
|
|
|
|
|
|
ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
|
|
|
|
if (unlikely(ret != xpSuccess))
|
|
|
|
XPC_DEACTIVATE_PARTITION(part, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
|
|
|
|
void *msg, size_t msg_size, int msg_type)
|
|
|
|
{
|
2009-04-03 07:59:10 +08:00
|
|
|
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
2008-07-30 13:34:18 +08:00
|
|
|
enum xp_retval ret;
|
|
|
|
|
|
|
|
ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
|
|
|
|
if (unlikely(ret != xpSuccess)) {
|
|
|
|
if (irq_flags != NULL)
|
|
|
|
spin_unlock_irqrestore(&ch->lock, *irq_flags);
|
|
|
|
|
|
|
|
XPC_DEACTIVATE_PARTITION(part, ret);
|
|
|
|
|
|
|
|
if (irq_flags != NULL)
|
|
|
|
spin_lock_irqsave(&ch->lock, *irq_flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
|
|
|
|
{
|
|
|
|
unsigned long irq_flags;
|
|
|
|
struct xpc_partition_uv *part_uv = &part->sn.uv;
|
|
|
|
|
2008-07-30 13:34:07 +08:00
|
|
|
/*
|
2008-11-06 07:29:48 +08:00
|
|
|
* !!! Make our side think that the remote partition sent an activate
|
2009-04-14 05:40:18 +08:00
|
|
|
* !!! mq message our way by doing what the activate IRQ handler would
|
2008-07-30 13:34:14 +08:00
|
|
|
* !!! do had one really been sent.
|
2008-07-30 13:34:07 +08:00
|
|
|
*/
|
2008-07-30 13:34:18 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
if (part_uv->act_state_req == 0)
|
|
|
|
xpc_activate_IRQ_rcvd++;
|
|
|
|
part_uv->act_state_req = act_state_req;
|
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
|
|
|
|
wake_up_interruptible(&xpc_activate_IRQ_wq);
|
2008-07-30 13:34:07 +08:00
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:05 +08:00
|
|
|
static enum xp_retval
|
2008-07-30 13:34:18 +08:00
|
|
|
xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
|
|
|
|
size_t *len)
|
2008-07-30 13:34:05 +08:00
|
|
|
{
|
2008-11-06 07:29:48 +08:00
|
|
|
s64 status;
|
|
|
|
enum xp_retval ret;
|
|
|
|
|
|
|
|
#if defined CONFIG_X86_64
|
|
|
|
status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
|
|
|
|
(u64 *)len);
|
|
|
|
if (status == BIOS_STATUS_SUCCESS)
|
|
|
|
ret = xpSuccess;
|
|
|
|
else if (status == BIOS_STATUS_MORE_PASSES)
|
|
|
|
ret = xpNeedMoreInfo;
|
|
|
|
else
|
|
|
|
ret = xpBiosError;
|
|
|
|
|
|
|
|
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
|
|
|
status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
|
|
|
|
if (status == SALRET_OK)
|
|
|
|
ret = xpSuccess;
|
|
|
|
else if (status == SALRET_MORE_PASSES)
|
|
|
|
ret = xpNeedMoreInfo;
|
|
|
|
else
|
|
|
|
ret = xpSalError;
|
|
|
|
|
|
|
|
#else
|
|
|
|
#error not a supported configuration
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return ret;
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2009-04-14 05:40:19 +08:00
|
|
|
xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
|
2008-07-30 13:34:18 +08:00
|
|
|
{
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_heartbeat_uv =
|
|
|
|
&xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
|
|
|
|
rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
|
|
|
|
rp->sn.uv.activate_gru_mq_desc_gpa =
|
2009-04-03 07:59:10 +08:00
|
|
|
uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
|
2008-07-30 13:34:18 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_allow_hb_uv(short partid)
|
2008-07-30 13:34:18 +08:00
|
|
|
{
|
2009-04-14 05:40:18 +08:00
|
|
|
}
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2009-04-14 05:40:18 +08:00
|
|
|
static void
|
|
|
|
xpc_disallow_hb_uv(short partid)
|
|
|
|
{
|
|
|
|
}
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2009-04-14 05:40:18 +08:00
|
|
|
static void
|
|
|
|
xpc_disallow_all_hbs_uv(void)
|
|
|
|
{
|
2008-07-30 13:34:05 +08:00
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:07 +08:00
|
|
|
static void
|
|
|
|
xpc_increment_heartbeat_uv(void)
|
|
|
|
{
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_heartbeat_uv->value++;
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_offline_heartbeat_uv(void)
|
|
|
|
{
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_increment_heartbeat_uv();
|
|
|
|
xpc_heartbeat_uv->offline = 1;
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_online_heartbeat_uv(void)
|
|
|
|
{
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_increment_heartbeat_uv();
|
|
|
|
xpc_heartbeat_uv->offline = 0;
|
2008-07-30 13:34:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_heartbeat_init_uv(void)
|
|
|
|
{
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_heartbeat_uv->value = 1;
|
|
|
|
xpc_heartbeat_uv->offline = 0;
|
2008-07-30 13:34:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_heartbeat_exit_uv(void)
|
|
|
|
{
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_offline_heartbeat_uv();
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum xp_retval
|
|
|
|
xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
|
|
|
|
{
|
|
|
|
struct xpc_partition_uv *part_uv = &part->sn.uv;
|
2009-04-14 05:40:18 +08:00
|
|
|
enum xp_retval ret;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2009-04-14 05:40:18 +08:00
|
|
|
ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
|
|
|
|
part_uv->heartbeat_gpa,
|
|
|
|
sizeof(struct xpc_heartbeat_uv));
|
|
|
|
if (ret != xpSuccess)
|
|
|
|
return ret;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2009-04-14 05:40:18 +08:00
|
|
|
if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
|
|
|
|
!part_uv->cached_heartbeat.offline) {
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2009-04-14 05:40:18 +08:00
|
|
|
ret = xpNoHeartbeat;
|
|
|
|
} else {
|
|
|
|
part->last_heartbeat = part_uv->cached_heartbeat.value;
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
return ret;
|
2008-07-30 13:34:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2008-07-30 13:34:09 +08:00
|
|
|
xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
|
2008-07-30 13:34:18 +08:00
|
|
|
unsigned long remote_rp_gpa, int nasid)
|
2008-07-30 13:34:07 +08:00
|
|
|
{
|
|
|
|
short partid = remote_rp->SAL_partid;
|
|
|
|
struct xpc_partition *part = &xpc_partitions[partid];
|
2008-07-30 13:34:18 +08:00
|
|
|
struct xpc_activate_mq_msg_activate_req_uv msg;
|
2008-07-30 13:34:07 +08:00
|
|
|
|
2008-07-30 13:34:18 +08:00
|
|
|
part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
|
|
|
|
part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
|
2009-04-14 05:40:18 +08:00
|
|
|
part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
|
2009-04-03 07:59:10 +08:00
|
|
|
part->sn.uv.activate_gru_mq_desc_gpa =
|
2009-04-14 05:40:18 +08:00
|
|
|
remote_rp->sn.uv.activate_gru_mq_desc_gpa;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ??? Is it a good idea to make this conditional on what is
|
|
|
|
* ??? potentially stale state information?
|
|
|
|
*/
|
|
|
|
if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
|
|
|
|
msg.rp_gpa = uv_gpa(xpc_rsvd_page);
|
2009-04-14 05:40:18 +08:00
|
|
|
msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
|
2009-04-03 07:59:10 +08:00
|
|
|
msg.activate_gru_mq_desc_gpa =
|
2009-04-14 05:40:18 +08:00
|
|
|
xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
|
2008-07-30 13:34:18 +08:00
|
|
|
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
|
|
|
|
}
|
2008-07-30 13:34:07 +08:00
|
|
|
|
2008-07-30 13:34:18 +08:00
|
|
|
if (part->act_state == XPC_P_AS_INACTIVE)
|
|
|
|
xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
|
2008-07-30 13:34:07 +08:00
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:09 +08:00
|
|
|
static void
|
|
|
|
xpc_request_partition_reactivation_uv(struct xpc_partition *part)
|
|
|
|
{
|
2008-07-30 13:34:18 +08:00
|
|
|
xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_request_partition_deactivation_uv(struct xpc_partition *part)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_deactivate_req_uv msg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ??? Is it a good idea to make this conditional on what is
|
|
|
|
* ??? potentially stale state information?
|
|
|
|
*/
|
|
|
|
if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
|
|
|
|
part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
|
|
|
|
|
|
|
|
msg.reason = part->reason;
|
|
|
|
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
|
|
|
|
}
|
2008-07-30 13:34:09 +08:00
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
static void
|
|
|
|
xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
|
|
|
|
{
|
|
|
|
/* nothing needs to be done */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
|
|
|
|
{
|
|
|
|
head->first = NULL;
|
|
|
|
head->last = NULL;
|
|
|
|
spin_lock_init(&head->lock);
|
|
|
|
head->n_entries = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
|
|
|
|
{
|
|
|
|
unsigned long irq_flags;
|
|
|
|
struct xpc_fifo_entry_uv *first;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&head->lock, irq_flags);
|
|
|
|
first = head->first;
|
|
|
|
if (head->first != NULL) {
|
|
|
|
head->first = first->next;
|
|
|
|
if (head->first == NULL)
|
|
|
|
head->last = NULL;
|
2009-12-16 08:47:57 +08:00
|
|
|
|
|
|
|
head->n_entries--;
|
|
|
|
BUG_ON(head->n_entries < 0);
|
|
|
|
|
|
|
|
first->next = NULL;
|
2008-07-30 13:34:19 +08:00
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&head->lock, irq_flags);
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
|
|
|
|
struct xpc_fifo_entry_uv *last)
|
|
|
|
{
|
|
|
|
unsigned long irq_flags;
|
|
|
|
|
|
|
|
last->next = NULL;
|
|
|
|
spin_lock_irqsave(&head->lock, irq_flags);
|
|
|
|
if (head->last != NULL)
|
|
|
|
head->last->next = last;
|
|
|
|
else
|
|
|
|
head->first = last;
|
|
|
|
head->last = last;
|
2009-04-03 07:59:10 +08:00
|
|
|
head->n_entries++;
|
2008-07-30 13:34:19 +08:00
|
|
|
spin_unlock_irqrestore(&head->lock, irq_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
|
|
|
|
{
|
|
|
|
return head->n_entries;
|
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:06 +08:00
|
|
|
/*
|
2008-07-30 13:34:18 +08:00
|
|
|
* Setup the channel structures that are uv specific.
|
2008-07-30 13:34:06 +08:00
|
|
|
*/
|
|
|
|
static enum xp_retval
|
2009-04-14 05:40:19 +08:00
|
|
|
xpc_setup_ch_structures_uv(struct xpc_partition *part)
|
2008-07-30 13:34:06 +08:00
|
|
|
{
|
2008-07-30 13:34:19 +08:00
|
|
|
struct xpc_channel_uv *ch_uv;
|
|
|
|
int ch_number;
|
|
|
|
|
|
|
|
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
|
|
|
|
ch_uv = &part->channels[ch_number].sn.uv;
|
|
|
|
|
|
|
|
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
|
|
|
|
xpc_init_fifo_uv(&ch_uv->recv_msg_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
return xpSuccess;
|
2008-07-30 13:34:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-07-30 13:34:18 +08:00
|
|
|
* Teardown the channel structures that are uv specific.
|
2008-07-30 13:34:06 +08:00
|
|
|
*/
|
|
|
|
static void
|
2009-04-14 05:40:19 +08:00
|
|
|
xpc_teardown_ch_structures_uv(struct xpc_partition *part)
|
2008-07-30 13:34:06 +08:00
|
|
|
{
|
2008-07-30 13:34:19 +08:00
|
|
|
/* nothing needs to be done */
|
2008-07-30 13:34:06 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum xp_retval
|
|
|
|
xpc_make_first_contact_uv(struct xpc_partition *part)
|
|
|
|
{
|
2008-07-30 13:34:18 +08:00
|
|
|
struct xpc_activate_mq_msg_uv msg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We send a sync msg to get the remote partition's remote_act_state
|
|
|
|
* updated to our current act_state which at this point should
|
|
|
|
* be XPC_P_AS_ACTIVATING.
|
|
|
|
*/
|
|
|
|
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
|
|
|
|
|
2009-12-16 08:47:58 +08:00
|
|
|
while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
|
|
|
|
(part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
|
2008-07-30 13:34:18 +08:00
|
|
|
|
|
|
|
dev_dbg(xpc_part, "waiting to make first contact with "
|
|
|
|
"partition %d\n", XPC_PARTID(part));
|
|
|
|
|
|
|
|
/* wait a 1/4 of a second or so */
|
|
|
|
(void)msleep_interruptible(250);
|
|
|
|
|
|
|
|
if (part->act_state == XPC_P_AS_DEACTIVATING)
|
|
|
|
return part->reason;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xpSuccess;
|
2008-07-30 13:34:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u64
|
2008-07-30 13:34:10 +08:00
|
|
|
xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
|
2008-07-30 13:34:06 +08:00
|
|
|
{
|
2008-07-30 13:34:18 +08:00
|
|
|
unsigned long irq_flags;
|
|
|
|
union xpc_channel_ctl_flags chctl;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&part->chctl_lock, irq_flags);
|
|
|
|
chctl = part->chctl;
|
|
|
|
if (chctl.all_flags != 0)
|
|
|
|
part->chctl.all_flags = 0;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
|
|
|
return chctl.all_flags;
|
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
static enum xp_retval
|
|
|
|
xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
|
|
|
|
{
|
|
|
|
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
|
|
|
|
struct xpc_send_msg_slot_uv *msg_slot;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
int nentries;
|
|
|
|
int entry;
|
|
|
|
size_t nbytes;
|
|
|
|
|
|
|
|
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
|
|
|
|
nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
|
|
|
|
ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
|
|
|
|
if (ch_uv->send_msg_slots == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (entry = 0; entry < nentries; entry++) {
|
|
|
|
msg_slot = &ch_uv->send_msg_slots[entry];
|
|
|
|
|
|
|
|
msg_slot->msg_slot_number = entry;
|
|
|
|
xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
|
|
|
|
&msg_slot->next);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ch->lock, irq_flags);
|
|
|
|
if (nentries < ch->local_nentries)
|
|
|
|
ch->local_nentries = nentries;
|
|
|
|
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
|
|
|
return xpSuccess;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xpNoMemory;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum xp_retval
|
|
|
|
xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
|
|
|
|
{
|
|
|
|
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
|
|
|
|
struct xpc_notify_mq_msg_uv *msg_slot;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
int nentries;
|
|
|
|
int entry;
|
|
|
|
size_t nbytes;
|
|
|
|
|
|
|
|
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
|
|
|
|
nbytes = nentries * ch->entry_size;
|
|
|
|
ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
|
|
|
|
if (ch_uv->recv_msg_slots == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (entry = 0; entry < nentries; entry++) {
|
2009-02-05 07:12:24 +08:00
|
|
|
msg_slot = ch_uv->recv_msg_slots +
|
|
|
|
entry * ch->entry_size;
|
2008-07-30 13:34:19 +08:00
|
|
|
|
|
|
|
msg_slot->hdr.msg_slot_number = entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ch->lock, irq_flags);
|
|
|
|
if (nentries < ch->remote_nentries)
|
|
|
|
ch->remote_nentries = nentries;
|
|
|
|
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
|
|
|
return xpSuccess;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xpNoMemory;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate msg_slots associated with the channel.
|
|
|
|
*/
|
2008-07-30 13:34:18 +08:00
|
|
|
static enum xp_retval
|
|
|
|
xpc_setup_msg_structures_uv(struct xpc_channel *ch)
|
|
|
|
{
|
2008-07-30 13:34:19 +08:00
|
|
|
static enum xp_retval ret;
|
|
|
|
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
|
|
|
|
|
|
|
|
DBUG_ON(ch->flags & XPC_C_SETUP);
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
|
|
|
|
gru_message_queue_desc),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (ch_uv->cached_notify_gru_mq_desc == NULL)
|
|
|
|
return xpNoMemory;
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
ret = xpc_allocate_send_msg_slot_uv(ch);
|
|
|
|
if (ret == xpSuccess) {
|
|
|
|
|
|
|
|
ret = xpc_allocate_recv_msg_slot_uv(ch);
|
|
|
|
if (ret != xpSuccess) {
|
|
|
|
kfree(ch_uv->send_msg_slots);
|
|
|
|
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
/*
|
|
|
|
* Free up msg_slots and clear other stuff that were setup for the specified
|
|
|
|
* channel.
|
|
|
|
*/
|
2008-07-30 13:34:18 +08:00
|
|
|
static void
|
|
|
|
xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
|
|
|
|
{
|
|
|
|
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
DBUG_ON(!spin_is_locked(&ch->lock));
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
kfree(ch_uv->cached_notify_gru_mq_desc);
|
|
|
|
ch_uv->cached_notify_gru_mq_desc = NULL;
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
if (ch->flags & XPC_C_SETUP) {
|
|
|
|
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
|
|
|
|
kfree(ch_uv->send_msg_slots);
|
|
|
|
xpc_init_fifo_uv(&ch_uv->recv_msg_list);
|
|
|
|
kfree(ch_uv->recv_msg_slots);
|
|
|
|
}
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
|
|
|
|
|
|
|
|
msg.ch_number = ch->number;
|
|
|
|
msg.reason = ch->reason;
|
|
|
|
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_chctl_closereply_uv msg;
|
|
|
|
|
|
|
|
msg.ch_number = ch->number;
|
|
|
|
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
|
|
|
|
|
|
|
|
msg.ch_number = ch->number;
|
2008-07-30 13:34:19 +08:00
|
|
|
msg.entry_size = ch->entry_size;
|
2008-07-30 13:34:18 +08:00
|
|
|
msg.local_nentries = ch->local_nentries;
|
|
|
|
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_chctl_openreply_uv msg;
|
|
|
|
|
|
|
|
msg.ch_number = ch->number;
|
|
|
|
msg.local_nentries = ch->local_nentries;
|
|
|
|
msg.remote_nentries = ch->remote_nentries;
|
2009-04-03 07:59:10 +08:00
|
|
|
msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
|
2008-07-30 13:34:18 +08:00
|
|
|
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
|
|
|
|
}
|
|
|
|
|
2009-04-14 05:40:19 +08:00
|
|
|
static void
|
|
|
|
xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
|
|
|
|
|
|
|
|
msg.ch_number = ch->number;
|
|
|
|
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
|
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
static void
|
|
|
|
xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
|
|
|
|
{
|
|
|
|
unsigned long irq_flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&part->chctl_lock, irq_flags);
|
|
|
|
part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
|
|
|
|
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
|
|
|
|
|
|
|
|
xpc_wakeup_channel_mgr(part);
|
|
|
|
}
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
static enum xp_retval
|
2008-07-30 13:34:18 +08:00
|
|
|
xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
|
2009-04-03 07:59:10 +08:00
|
|
|
unsigned long gru_mq_desc_gpa)
|
2008-07-30 13:34:18 +08:00
|
|
|
{
|
2009-04-03 07:59:10 +08:00
|
|
|
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
|
|
|
|
|
|
|
|
DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
|
|
|
|
return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
|
|
|
|
gru_mq_desc_gpa);
|
2008-07-30 13:34:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_uv msg;
|
|
|
|
|
|
|
|
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
|
|
|
|
{
|
|
|
|
struct xpc_activate_mq_msg_uv msg;
|
|
|
|
|
|
|
|
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
|
|
|
|
XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_assume_partition_disengaged_uv(short partid)
|
|
|
|
{
|
|
|
|
struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
|
|
|
|
part_uv->flags &= ~XPC_P_ENGAGED_UV;
|
|
|
|
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xpc_partition_engaged_uv(short partid)
|
|
|
|
{
|
|
|
|
return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xpc_any_partition_engaged_uv(void)
|
|
|
|
{
|
|
|
|
struct xpc_partition_uv *part_uv;
|
|
|
|
short partid;
|
|
|
|
|
|
|
|
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
|
|
|
|
part_uv = &xpc_partitions[partid].sn.uv;
|
|
|
|
if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
2008-07-30 13:34:06 +08:00
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:19 +08:00
|
|
|
static enum xp_retval
|
|
|
|
xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
|
|
|
|
struct xpc_send_msg_slot_uv **address_of_msg_slot)
|
|
|
|
{
|
|
|
|
enum xp_retval ret;
|
|
|
|
struct xpc_send_msg_slot_uv *msg_slot;
|
|
|
|
struct xpc_fifo_entry_uv *entry;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
|
|
|
|
if (entry != NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (flags & XPC_NOWAIT)
|
|
|
|
return xpNoWait;
|
|
|
|
|
|
|
|
ret = xpc_allocate_msg_wait(ch);
|
|
|
|
if (ret != xpInterrupted && ret != xpTimeout)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
|
|
|
|
*address_of_msg_slot = msg_slot;
|
|
|
|
return xpSuccess;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_free_msg_slot_uv(struct xpc_channel *ch,
|
|
|
|
struct xpc_send_msg_slot_uv *msg_slot)
|
|
|
|
{
|
|
|
|
xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
|
|
|
|
|
|
|
|
/* wakeup anyone waiting for a free msg slot */
|
|
|
|
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
|
|
|
|
wake_up(&ch->msg_allocate_wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_notify_sender_uv(struct xpc_channel *ch,
|
|
|
|
struct xpc_send_msg_slot_uv *msg_slot,
|
|
|
|
enum xp_retval reason)
|
|
|
|
{
|
|
|
|
xpc_notify_func func = msg_slot->func;
|
|
|
|
|
|
|
|
if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
|
|
|
|
|
|
|
|
atomic_dec(&ch->n_to_notify);
|
|
|
|
|
|
|
|
dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
|
|
|
|
"msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
|
|
|
|
msg_slot->msg_slot_number, ch->partid, ch->number);
|
|
|
|
|
|
|
|
func(reason, ch->partid, ch->number, msg_slot->key);
|
|
|
|
|
|
|
|
dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
|
|
|
|
"msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
|
|
|
|
msg_slot->msg_slot_number, ch->partid, ch->number);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
|
|
|
|
struct xpc_notify_mq_msg_uv *msg)
|
|
|
|
{
|
|
|
|
struct xpc_send_msg_slot_uv *msg_slot;
|
|
|
|
int entry = msg->hdr.msg_slot_number % ch->local_nentries;
|
|
|
|
|
|
|
|
msg_slot = &ch->sn.uv.send_msg_slots[entry];
|
|
|
|
|
|
|
|
BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
|
|
|
|
msg_slot->msg_slot_number += ch->local_nentries;
|
|
|
|
|
|
|
|
if (msg_slot->func != NULL)
|
|
|
|
xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
|
|
|
|
|
|
|
|
xpc_free_msg_slot_uv(ch, msg_slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
|
|
|
|
struct xpc_notify_mq_msg_uv *msg)
|
|
|
|
{
|
|
|
|
struct xpc_partition_uv *part_uv = &part->sn.uv;
|
|
|
|
struct xpc_channel *ch;
|
|
|
|
struct xpc_channel_uv *ch_uv;
|
|
|
|
struct xpc_notify_mq_msg_uv *msg_slot;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
int ch_number = msg->hdr.ch_number;
|
|
|
|
|
|
|
|
if (unlikely(ch_number >= part->nchannels)) {
|
|
|
|
dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
|
|
|
|
"channel number=0x%x in message from partid=%d\n",
|
|
|
|
ch_number, XPC_PARTID(part));
|
|
|
|
|
|
|
|
/* get hb checker to deactivate from the remote partition */
|
|
|
|
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
if (part_uv->act_state_req == 0)
|
|
|
|
xpc_activate_IRQ_rcvd++;
|
|
|
|
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
|
|
|
|
part_uv->reason = xpBadChannelNumber;
|
|
|
|
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
|
|
|
|
|
|
|
|
wake_up_interruptible(&xpc_activate_IRQ_wq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ch = &part->channels[ch_number];
|
|
|
|
xpc_msgqueue_ref(ch);
|
|
|
|
|
|
|
|
if (!(ch->flags & XPC_C_CONNECTED)) {
|
|
|
|
xpc_msgqueue_deref(ch);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* see if we're really dealing with an ACK for a previously sent msg */
|
|
|
|
if (msg->hdr.size == 0) {
|
|
|
|
xpc_handle_notify_mq_ack_uv(ch, msg);
|
|
|
|
xpc_msgqueue_deref(ch);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we're dealing with a normal message sent via the notify_mq */
|
|
|
|
ch_uv = &ch->sn.uv;
|
|
|
|
|
2009-02-05 07:12:24 +08:00
|
|
|
msg_slot = ch_uv->recv_msg_slots +
|
|
|
|
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
|
2008-07-30 13:34:19 +08:00
|
|
|
|
|
|
|
BUG_ON(msg_slot->hdr.size != 0);
|
|
|
|
|
|
|
|
memcpy(msg_slot, msg, msg->hdr.size);
|
|
|
|
|
|
|
|
xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
|
|
|
|
|
|
|
|
if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
|
|
|
|
/*
|
|
|
|
* If there is an existing idle kthread get it to deliver
|
|
|
|
* the payload, otherwise we'll have to get the channel mgr
|
|
|
|
* for this partition to create a kthread to do the delivery.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&ch->kthreads_idle) > 0)
|
|
|
|
wake_up_nr(&ch->idle_wq, 1);
|
|
|
|
else
|
|
|
|
xpc_send_chctl_local_msgrequest_uv(part, ch->number);
|
|
|
|
}
|
|
|
|
xpc_msgqueue_deref(ch);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t
|
|
|
|
xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct xpc_notify_mq_msg_uv *msg;
|
|
|
|
short partid;
|
|
|
|
struct xpc_partition *part;
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
|
|
|
|
NULL) {
|
2008-07-30 13:34:19 +08:00
|
|
|
|
|
|
|
partid = msg->hdr.partid;
|
|
|
|
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
|
|
|
|
dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
|
|
|
|
"invalid partid=0x%x in message\n", partid);
|
|
|
|
} else {
|
|
|
|
part = &xpc_partitions[partid];
|
|
|
|
|
|
|
|
if (xpc_part_ref(part)) {
|
|
|
|
xpc_handle_notify_mq_msg_uv(part, msg);
|
|
|
|
xpc_part_deref(part);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
|
2008-07-30 13:34:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
|
|
|
|
{
|
|
|
|
return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
|
|
|
|
{
|
|
|
|
struct xpc_channel *ch = &part->channels[ch_number];
|
|
|
|
int ndeliverable_payloads;
|
|
|
|
|
|
|
|
xpc_msgqueue_ref(ch);
|
|
|
|
|
|
|
|
ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
|
|
|
|
|
|
|
|
if (ndeliverable_payloads > 0 &&
|
|
|
|
(ch->flags & XPC_C_CONNECTED) &&
|
|
|
|
(ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
|
|
|
|
|
|
|
|
xpc_activate_kthreads(ch, ndeliverable_payloads);
|
|
|
|
}
|
|
|
|
|
|
|
|
xpc_msgqueue_deref(ch);
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum xp_retval
|
|
|
|
xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
|
|
|
|
u16 payload_size, u8 notify_type, xpc_notify_func func,
|
|
|
|
void *key)
|
|
|
|
{
|
|
|
|
enum xp_retval ret = xpSuccess;
|
|
|
|
struct xpc_send_msg_slot_uv *msg_slot = NULL;
|
|
|
|
struct xpc_notify_mq_msg_uv *msg;
|
|
|
|
u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
|
|
|
|
size_t msg_size;
|
|
|
|
|
|
|
|
DBUG_ON(notify_type != XPC_N_CALL);
|
|
|
|
|
|
|
|
msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
|
|
|
|
if (msg_size > ch->entry_size)
|
|
|
|
return xpPayloadTooBig;
|
|
|
|
|
|
|
|
xpc_msgqueue_ref(ch);
|
|
|
|
|
|
|
|
if (ch->flags & XPC_C_DISCONNECTING) {
|
|
|
|
ret = ch->reason;
|
|
|
|
goto out_1;
|
|
|
|
}
|
|
|
|
if (!(ch->flags & XPC_C_CONNECTED)) {
|
|
|
|
ret = xpNotConnected;
|
|
|
|
goto out_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
|
|
|
|
if (ret != xpSuccess)
|
|
|
|
goto out_1;
|
|
|
|
|
|
|
|
if (func != NULL) {
|
|
|
|
atomic_inc(&ch->n_to_notify);
|
|
|
|
|
|
|
|
msg_slot->key = key;
|
2009-01-30 06:25:06 +08:00
|
|
|
smp_wmb(); /* a non-NULL func must hit memory after the key */
|
2008-07-30 13:34:19 +08:00
|
|
|
msg_slot->func = func;
|
|
|
|
|
|
|
|
if (ch->flags & XPC_C_DISCONNECTING) {
|
|
|
|
ret = ch->reason;
|
|
|
|
goto out_2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
|
|
|
|
msg->hdr.partid = xp_partition_id;
|
|
|
|
msg->hdr.ch_number = ch->number;
|
|
|
|
msg->hdr.size = msg_size;
|
|
|
|
msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
|
|
|
|
memcpy(&msg->payload, payload, payload_size);
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
|
|
|
|
msg_size);
|
2008-07-30 13:34:19 +08:00
|
|
|
if (ret == xpSuccess)
|
|
|
|
goto out_1;
|
|
|
|
|
|
|
|
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
|
|
|
|
out_2:
|
|
|
|
if (func != NULL) {
|
|
|
|
/*
|
|
|
|
* Try to NULL the msg_slot's func field. If we fail, then
|
|
|
|
* xpc_notify_senders_of_disconnect_uv() beat us to it, in which
|
|
|
|
* case we need to pretend we succeeded to send the message
|
|
|
|
* since the user will get a callout for the disconnect error
|
|
|
|
* by xpc_notify_senders_of_disconnect_uv(), and to also get an
|
|
|
|
* error returned here will confuse them. Additionally, since
|
|
|
|
* in this case the channel is being disconnected we don't need
|
|
|
|
* to put the the msg_slot back on the free list.
|
|
|
|
*/
|
|
|
|
if (cmpxchg(&msg_slot->func, func, NULL) != func) {
|
|
|
|
ret = xpSuccess;
|
|
|
|
goto out_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg_slot->key = NULL;
|
|
|
|
atomic_dec(&ch->n_to_notify);
|
|
|
|
}
|
|
|
|
xpc_free_msg_slot_uv(ch, msg_slot);
|
|
|
|
out_1:
|
|
|
|
xpc_msgqueue_deref(ch);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the callers of xpc_send_notify() that the status of their payloads
|
|
|
|
* is unknown because the channel is now disconnecting.
|
|
|
|
*
|
|
|
|
* We don't worry about putting these msg_slots on the free list since the
|
|
|
|
* msg_slots themselves are about to be kfree'd.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
|
|
|
|
{
|
|
|
|
struct xpc_send_msg_slot_uv *msg_slot;
|
|
|
|
int entry;
|
|
|
|
|
|
|
|
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
|
|
|
|
|
|
|
|
for (entry = 0; entry < ch->local_nentries; entry++) {
|
|
|
|
|
|
|
|
if (atomic_read(&ch->n_to_notify) == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
msg_slot = &ch->sn.uv.send_msg_slots[entry];
|
|
|
|
if (msg_slot->func != NULL)
|
|
|
|
xpc_notify_sender_uv(ch, msg_slot, ch->reason);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the next deliverable message's payload.
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
|
|
|
|
{
|
|
|
|
struct xpc_fifo_entry_uv *entry;
|
|
|
|
struct xpc_notify_mq_msg_uv *msg;
|
|
|
|
void *payload = NULL;
|
|
|
|
|
|
|
|
if (!(ch->flags & XPC_C_DISCONNECTING)) {
|
|
|
|
entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
|
|
|
|
if (entry != NULL) {
|
|
|
|
msg = container_of(entry, struct xpc_notify_mq_msg_uv,
|
|
|
|
hdr.u.next);
|
|
|
|
payload = &msg->payload;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return payload;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
|
2008-07-30 13:34:06 +08:00
|
|
|
{
|
2008-07-30 13:34:19 +08:00
|
|
|
struct xpc_notify_mq_msg_uv *msg;
|
|
|
|
enum xp_retval ret;
|
|
|
|
|
|
|
|
msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
|
|
|
|
|
|
|
|
/* return an ACK to the sender of this message */
|
|
|
|
|
|
|
|
msg->hdr.partid = xp_partition_id;
|
|
|
|
msg->hdr.size = 0; /* size of zero indicates this is an ACK */
|
|
|
|
|
2009-04-03 07:59:10 +08:00
|
|
|
ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
|
2008-07-30 13:34:19 +08:00
|
|
|
sizeof(struct xpc_notify_mq_msghdr_uv));
|
|
|
|
if (ret != xpSuccess)
|
|
|
|
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
|
2008-07-30 13:34:06 +08:00
|
|
|
}
|
|
|
|
|
2009-04-14 05:40:19 +08:00
|
|
|
static struct xpc_arch_operations xpc_arch_ops_uv = {
|
|
|
|
.setup_partitions = xpc_setup_partitions_uv,
|
|
|
|
.teardown_partitions = xpc_teardown_partitions_uv,
|
|
|
|
.process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
|
|
|
|
.get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
|
|
|
|
.setup_rsvd_page = xpc_setup_rsvd_page_uv,
|
|
|
|
|
|
|
|
.allow_hb = xpc_allow_hb_uv,
|
|
|
|
.disallow_hb = xpc_disallow_hb_uv,
|
|
|
|
.disallow_all_hbs = xpc_disallow_all_hbs_uv,
|
|
|
|
.increment_heartbeat = xpc_increment_heartbeat_uv,
|
|
|
|
.offline_heartbeat = xpc_offline_heartbeat_uv,
|
|
|
|
.online_heartbeat = xpc_online_heartbeat_uv,
|
|
|
|
.heartbeat_init = xpc_heartbeat_init_uv,
|
|
|
|
.heartbeat_exit = xpc_heartbeat_exit_uv,
|
|
|
|
.get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
|
|
|
|
|
|
|
|
.request_partition_activation =
|
|
|
|
xpc_request_partition_activation_uv,
|
|
|
|
.request_partition_reactivation =
|
|
|
|
xpc_request_partition_reactivation_uv,
|
|
|
|
.request_partition_deactivation =
|
|
|
|
xpc_request_partition_deactivation_uv,
|
|
|
|
.cancel_partition_deactivation_request =
|
|
|
|
xpc_cancel_partition_deactivation_request_uv,
|
|
|
|
|
|
|
|
.setup_ch_structures = xpc_setup_ch_structures_uv,
|
|
|
|
.teardown_ch_structures = xpc_teardown_ch_structures_uv,
|
|
|
|
|
|
|
|
.make_first_contact = xpc_make_first_contact_uv,
|
|
|
|
|
|
|
|
.get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
|
|
|
|
.send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
|
|
|
|
.send_chctl_closereply = xpc_send_chctl_closereply_uv,
|
|
|
|
.send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
|
|
|
|
.send_chctl_openreply = xpc_send_chctl_openreply_uv,
|
|
|
|
.send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
|
|
|
|
.process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
|
|
|
|
|
|
|
|
.save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
|
|
|
|
|
|
|
|
.setup_msg_structures = xpc_setup_msg_structures_uv,
|
|
|
|
.teardown_msg_structures = xpc_teardown_msg_structures_uv,
|
|
|
|
|
|
|
|
.indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
|
|
|
|
.indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
|
|
|
|
.assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
|
|
|
|
.partition_engaged = xpc_partition_engaged_uv,
|
|
|
|
.any_partition_engaged = xpc_any_partition_engaged_uv,
|
|
|
|
|
|
|
|
.n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
|
|
|
|
.send_payload = xpc_send_payload_uv,
|
|
|
|
.get_deliverable_payload = xpc_get_deliverable_payload_uv,
|
|
|
|
.received_payload = xpc_received_payload_uv,
|
|
|
|
.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
|
|
|
|
};
|
|
|
|
|
2012-08-22 07:16:02 +08:00
|
|
|
static int
|
|
|
|
xpc_init_mq_node(int nid)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
get_online_cpus();
|
|
|
|
|
|
|
|
for_each_cpu(cpu, cpumask_of_node(nid)) {
|
|
|
|
xpc_activate_mq_uv =
|
|
|
|
xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
|
|
|
|
XPC_ACTIVATE_IRQ_NAME,
|
|
|
|
xpc_handle_activate_IRQ_uv);
|
|
|
|
if (!IS_ERR(xpc_activate_mq_uv))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (IS_ERR(xpc_activate_mq_uv)) {
|
|
|
|
put_online_cpus();
|
|
|
|
return PTR_ERR(xpc_activate_mq_uv);
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_cpu(cpu, cpumask_of_node(nid)) {
|
|
|
|
xpc_notify_mq_uv =
|
|
|
|
xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
|
|
|
|
XPC_NOTIFY_IRQ_NAME,
|
|
|
|
xpc_handle_notify_IRQ_uv);
|
|
|
|
if (!IS_ERR(xpc_notify_mq_uv))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (IS_ERR(xpc_notify_mq_uv)) {
|
|
|
|
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
|
|
|
|
put_online_cpus();
|
|
|
|
return PTR_ERR(xpc_notify_mq_uv);
|
|
|
|
}
|
|
|
|
|
|
|
|
put_online_cpus();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-30 13:34:18 +08:00
|
|
|
int
|
2008-07-30 13:34:05 +08:00
|
|
|
xpc_init_uv(void)
|
|
|
|
{
|
2012-08-22 07:16:02 +08:00
|
|
|
int nid;
|
|
|
|
int ret = 0;
|
|
|
|
|
2009-04-14 05:40:19 +08:00
|
|
|
xpc_arch_ops = xpc_arch_ops_uv;
|
2008-07-30 13:34:19 +08:00
|
|
|
|
|
|
|
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
|
|
|
|
dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
|
|
|
|
XPC_MSG_HDR_MAX_SIZE);
|
|
|
|
return -E2BIG;
|
|
|
|
}
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2012-08-22 07:16:02 +08:00
|
|
|
if (xpc_mq_node < 0)
|
|
|
|
for_each_online_node(nid) {
|
|
|
|
ret = xpc_init_mq_node(nid);
|
2008-07-30 13:34:18 +08:00
|
|
|
|
2012-08-22 07:16:02 +08:00
|
|
|
if (!ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
ret = xpc_init_mq_node(xpc_mq_node);
|
2008-07-30 13:34:19 +08:00
|
|
|
|
2012-08-22 07:16:02 +08:00
|
|
|
if (ret < 0)
|
|
|
|
dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
|
|
|
|
-ret);
|
|
|
|
|
|
|
|
return ret;
|
2008-07-30 13:34:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xpc_exit_uv(void)
|
|
|
|
{
|
2008-11-06 07:28:00 +08:00
|
|
|
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
|
|
|
|
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
|
2008-07-30 13:34:05 +08:00
|
|
|
}
|
2012-08-22 07:16:02 +08:00
|
|
|
|
|
|
|
module_param(xpc_mq_node, int, 0);
|
|
|
|
MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
|