Merge master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc
* master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc: [PATCH] powerpc: iSeries needs slb_initialize to be called powerpc: hook up the splice syscall [PATCH] powerpc/cell: compile fixes [PATCH] powerpc: trivial spelling fixes in fault.c [PATCH] powerpc/pseries: EEH Cleanup [PATCH] powerpc/pseries: misc lparcfg fixes [PATCH] powerpc/pseries: fix device name printing, again. [PATCH] powerpc: Extends HCALL interface for InfiniBand usage [PATCH] powerpc/pseries: Change H_StudlyCaps to H_SHOUTING_CAPS [PATCH] powerpc/pseries: print message if EEH recovery fails [PATCH] powerpc/pseries: mutex lock to serialize EEH event processing powerpc: converted embedded platforms to use new define_machine support powerpc: merge machine_check_exception between ppc32 & ppc64
This commit is contained in:
commit
f900e5824a
|
@ -37,7 +37,7 @@
|
|||
#include <asm/prom.h>
|
||||
#include <asm/vdso_datapage.h>
|
||||
|
||||
#define MODULE_VERS "1.6"
|
||||
#define MODULE_VERS "1.7"
|
||||
#define MODULE_NAME "lparcfg"
|
||||
|
||||
/* #define LPARCFG_DEBUG */
|
||||
|
@ -149,17 +149,17 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag)
|
|||
if (rc == 0) /* success, return */
|
||||
return;
|
||||
/* check for null tag ? */
|
||||
if (rc == H_Hardware)
|
||||
if (rc == H_HARDWARE)
|
||||
printk(KERN_INFO
|
||||
"plpar-hcall (%s) failed with hardware fault\n", tag);
|
||||
else if (rc == H_Function)
|
||||
else if (rc == H_FUNCTION)
|
||||
printk(KERN_INFO
|
||||
"plpar-hcall (%s) failed; function not allowed\n", tag);
|
||||
else if (rc == H_Authority)
|
||||
else if (rc == H_AUTHORITY)
|
||||
printk(KERN_INFO
|
||||
"plpar-hcall (%s) failed; not authorized to this function\n",
|
||||
tag);
|
||||
else if (rc == H_Parameter)
|
||||
"plpar-hcall (%s) failed; not authorized to this"
|
||||
" function\n", tag);
|
||||
else if (rc == H_PARAMETER)
|
||||
printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
|
||||
tag);
|
||||
else
|
||||
|
@ -209,7 +209,7 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
|
|||
unsigned long dummy;
|
||||
rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy);
|
||||
|
||||
if (rc != H_Authority)
|
||||
if (rc != H_AUTHORITY)
|
||||
log_plpar_hcall_return(rc, "H_PIC");
|
||||
}
|
||||
|
||||
|
@ -242,7 +242,7 @@ static void parse_system_parameter_string(struct seq_file *m)
|
|||
{
|
||||
int call_status;
|
||||
|
||||
char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
|
||||
unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
|
||||
if (!local_buffer) {
|
||||
printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
|
||||
__FILE__, __FUNCTION__, __LINE__);
|
||||
|
@ -254,7 +254,8 @@ static void parse_system_parameter_string(struct seq_file *m)
|
|||
call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
|
||||
NULL,
|
||||
SPLPAR_CHARACTERISTICS_TOKEN,
|
||||
__pa(rtas_data_buf));
|
||||
__pa(rtas_data_buf),
|
||||
RTAS_DATA_BUF_SIZE);
|
||||
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
|
||||
spin_unlock(&rtas_data_buf_lock);
|
||||
|
||||
|
@ -275,7 +276,7 @@ static void parse_system_parameter_string(struct seq_file *m)
|
|||
#ifdef LPARCFG_DEBUG
|
||||
printk(KERN_INFO "success calling get-system-parameter \n");
|
||||
#endif
|
||||
splpar_strlen = local_buffer[0] * 16 + local_buffer[1];
|
||||
splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
|
||||
local_buffer += 2; /* step over strlen value */
|
||||
|
||||
memset(workbuffer, 0, SPLPAR_MAXLENGTH);
|
||||
|
@ -529,13 +530,13 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
|
|||
retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
|
||||
*new_weight_ptr);
|
||||
|
||||
if (retval == H_Success || retval == H_Constrained) {
|
||||
if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
|
||||
retval = count;
|
||||
} else if (retval == H_Busy) {
|
||||
} else if (retval == H_BUSY) {
|
||||
retval = -EBUSY;
|
||||
} else if (retval == H_Hardware) {
|
||||
} else if (retval == H_HARDWARE) {
|
||||
retval = -EIO;
|
||||
} else if (retval == H_Parameter) {
|
||||
} else if (retval == H_PARAMETER) {
|
||||
retval = -EINVAL;
|
||||
} else {
|
||||
printk(KERN_WARNING "%s: received unknown hv return code %ld",
|
||||
|
|
|
@ -578,18 +578,18 @@ static void rtas_percpu_suspend_me(void *info)
|
|||
* We use "waiting" to indicate our state. As long
|
||||
* as it is >0, we are still trying to all join up.
|
||||
* If it goes to 0, we have successfully joined up and
|
||||
* one thread got H_Continue. If any error happens,
|
||||
* one thread got H_CONTINUE. If any error happens,
|
||||
* we set it to <0.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_JOIN);
|
||||
smp_rmb();
|
||||
} while (rc == H_Success && data->waiting > 0);
|
||||
if (rc == H_Success)
|
||||
} while (rc == H_SUCCESS && data->waiting > 0);
|
||||
if (rc == H_SUCCESS)
|
||||
goto out;
|
||||
|
||||
if (rc == H_Continue) {
|
||||
if (rc == H_CONTINUE) {
|
||||
data->waiting = 0;
|
||||
data->args->args[data->args->nargs] =
|
||||
rtas_call(ibm_suspend_me_token, 0, 1, NULL);
|
||||
|
@ -597,7 +597,7 @@ static void rtas_percpu_suspend_me(void *info)
|
|||
plpar_hcall_norets(H_PROD,i);
|
||||
} else {
|
||||
data->waiting = -EBUSY;
|
||||
printk(KERN_ERR "Error on H_Join hypervisor call\n");
|
||||
printk(KERN_ERR "Error on H_JOIN hypervisor call\n");
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -624,7 +624,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
|
|||
printk(KERN_ERR "Error doing global join\n");
|
||||
|
||||
/* Prod each CPU. This won't hurt, and will wake
|
||||
* anyone we successfully put to sleep with H_Join
|
||||
* anyone we successfully put to sleep with H_JOIN.
|
||||
*/
|
||||
for_each_possible_cpu(i)
|
||||
plpar_hcall_norets(H_PROD, i);
|
||||
|
|
|
@ -50,7 +50,6 @@
|
|||
#include <asm/kgdb.h>
|
||||
#endif
|
||||
|
||||
extern void platform_init(void);
|
||||
extern void bootx_init(unsigned long r4, unsigned long phys);
|
||||
|
||||
boot_infos_t *boot_infos;
|
||||
|
@ -138,12 +137,7 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
|
|||
strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
|
||||
#endif /* CONFIG_CMDLINE */
|
||||
|
||||
#ifdef CONFIG_PPC_MULTIPLATFORM
|
||||
probe_machine();
|
||||
#else
|
||||
/* Base init based on machine type. Obsoloete, please kill ! */
|
||||
platform_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_6xx
|
||||
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
|
||||
|
|
|
@ -215,12 +215,10 @@ void __init early_setup(unsigned long dt_ptr)
|
|||
/*
|
||||
* Initialize stab / SLB management except on iSeries
|
||||
*/
|
||||
if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
|
||||
if (cpu_has_feature(CPU_FTR_SLB))
|
||||
slb_initialize();
|
||||
else
|
||||
stab_initialize(get_paca()->stab_real);
|
||||
}
|
||||
if (cpu_has_feature(CPU_FTR_SLB))
|
||||
slb_initialize();
|
||||
else if (!firmware_has_feature(FW_FEATURE_ISERIES))
|
||||
stab_initialize(get_paca()->stab_real);
|
||||
|
||||
DBG(" <- early_setup()\n");
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ void system_reset_exception(struct pt_regs *regs)
|
|||
*/
|
||||
static inline int check_io_access(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
|
||||
unsigned long msr = regs->msr;
|
||||
const struct exception_table_entry *entry;
|
||||
unsigned int *nip = (unsigned int *)regs->nip;
|
||||
|
@ -261,7 +261,7 @@ static inline int check_io_access(struct pt_regs *regs)
|
|||
return 1;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PPC_PMAC */
|
||||
#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -308,8 +308,8 @@ platform_machine_check(struct pt_regs *regs)
|
|||
|
||||
void machine_check_exception(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
int recover = 0;
|
||||
unsigned long reason = get_mc_reason(regs);
|
||||
|
||||
/* See if any machine dependent calls */
|
||||
if (ppc_md.machine_check_exception)
|
||||
|
@ -317,8 +317,6 @@ void machine_check_exception(struct pt_regs *regs)
|
|||
|
||||
if (recover)
|
||||
return;
|
||||
#else
|
||||
unsigned long reason = get_mc_reason(regs);
|
||||
|
||||
if (user_mode(regs)) {
|
||||
regs->msr |= MSR_RI;
|
||||
|
@ -462,7 +460,6 @@ void machine_check_exception(struct pt_regs *regs)
|
|||
* additional info, e.g. bus error registers.
|
||||
*/
|
||||
platform_machine_check(regs);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
if (debugger_fault_handler(regs))
|
||||
return;
|
||||
|
|
|
@ -177,15 +177,15 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|||
|
||||
/* When running in the kernel we expect faults to occur only to
|
||||
* addresses in user space. All other faults represent errors in the
|
||||
* kernel and should generate an OOPS. Unfortunatly, in the case of an
|
||||
* erroneous fault occuring in a code path which already holds mmap_sem
|
||||
* kernel and should generate an OOPS. Unfortunately, in the case of an
|
||||
* erroneous fault occurring in a code path which already holds mmap_sem
|
||||
* we will deadlock attempting to validate the fault against the
|
||||
* address space. Luckily the kernel only validly references user
|
||||
* space from well defined areas of code, which are listed in the
|
||||
* exceptions table.
|
||||
*
|
||||
* As the vast majority of faults will be valid we will only perform
|
||||
* the source reference check when there is a possibilty of a deadlock.
|
||||
* the source reference check when there is a possibility of a deadlock.
|
||||
* Attempt to lock the address space, if we cannot we then validate the
|
||||
* source. If this is invalid we can skip the address space check,
|
||||
* thus avoiding the deadlock.
|
||||
|
|
|
@ -158,25 +158,25 @@ static int __init mpc834x_rtc_hookup(void)
|
|||
late_initcall(mpc834x_rtc_hookup);
|
||||
#endif
|
||||
|
||||
void __init platform_init(void)
|
||||
/*
|
||||
* Called very early, MMU is off, device-tree isn't unflattened
|
||||
*/
|
||||
static int __init mpc834x_sys_probe(void)
|
||||
{
|
||||
/* setup the PowerPC module struct */
|
||||
ppc_md.setup_arch = mpc834x_sys_setup_arch;
|
||||
|
||||
ppc_md.init_IRQ = mpc834x_sys_init_IRQ;
|
||||
ppc_md.get_irq = ipic_get_irq;
|
||||
|
||||
ppc_md.restart = mpc83xx_restart;
|
||||
|
||||
ppc_md.time_init = mpc83xx_time_init;
|
||||
ppc_md.set_rtc_time = NULL;
|
||||
ppc_md.get_rtc_time = NULL;
|
||||
ppc_md.calibrate_decr = generic_calibrate_decr;
|
||||
|
||||
ppc_md.progress = udbg_progress;
|
||||
|
||||
if (ppc_md.progress)
|
||||
ppc_md.progress("mpc834x_sys_init(): exit", 0);
|
||||
|
||||
return;
|
||||
/* We always match for now, eventually we should look at the flat
|
||||
dev tree to ensure this is the board we are suppose to run on
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
define_machine(mpc834x_sys) {
|
||||
.name = "MPC834x SYS",
|
||||
.probe = mpc834x_sys_probe,
|
||||
.setup_arch = mpc834x_sys_setup_arch,
|
||||
.init_IRQ = mpc834x_sys_init_IRQ,
|
||||
.get_irq = ipic_get_irq,
|
||||
.restart = mpc83xx_restart,
|
||||
.time_init = mpc83xx_time_init,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
.progress = udbg_progress,
|
||||
};
|
||||
|
|
|
@ -220,25 +220,25 @@ void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
|
|||
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
|
||||
}
|
||||
|
||||
void __init platform_init(void)
|
||||
/*
|
||||
* Called very early, device-tree isn't unflattened
|
||||
*/
|
||||
static int __init mpc85xx_ads_probe(void)
|
||||
{
|
||||
ppc_md.setup_arch = mpc85xx_ads_setup_arch;
|
||||
ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo;
|
||||
|
||||
ppc_md.init_IRQ = mpc85xx_ads_pic_init;
|
||||
ppc_md.get_irq = mpic_get_irq;
|
||||
|
||||
ppc_md.restart = mpc85xx_restart;
|
||||
ppc_md.power_off = NULL;
|
||||
ppc_md.halt = NULL;
|
||||
|
||||
ppc_md.time_init = NULL;
|
||||
ppc_md.set_rtc_time = NULL;
|
||||
ppc_md.get_rtc_time = NULL;
|
||||
ppc_md.calibrate_decr = generic_calibrate_decr;
|
||||
|
||||
ppc_md.progress = udbg_progress;
|
||||
|
||||
if (ppc_md.progress)
|
||||
ppc_md.progress("mpc85xx_ads platform_init(): exit", 0);
|
||||
/* We always match for now, eventually we should look at the flat
|
||||
dev tree to ensure this is the board we are suppose to run on
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
define_machine(mpc85xx_ads) {
|
||||
.name = "MPC85xx ADS",
|
||||
.probe = mpc85xx_ads_probe,
|
||||
.setup_arch = mpc85xx_ads_setup_arch,
|
||||
.init_IRQ = mpc85xx_ads_pic_init,
|
||||
.show_cpuinfo = mpc85xx_ads_show_cpuinfo,
|
||||
.get_irq = mpic_get_irq,
|
||||
.restart = mpc85xx_restart,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
.progress = udbg_progress,
|
||||
};
|
||||
|
|
|
@ -316,6 +316,7 @@ void *spu_syscall_table[] = {
|
|||
[__NR_pselect6] sys_ni_syscall, /* sys_pselect */
|
||||
[__NR_ppoll] sys_ni_syscall, /* sys_ppoll */
|
||||
[__NR_unshare] sys_unshare,
|
||||
[__NR_splice] sys_splice,
|
||||
};
|
||||
|
||||
long spu_sys_callback(struct spu_syscall_block *s)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <linux/ptrace.h>
|
||||
|
||||
#include <asm/spu.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#include "spufs.h"
|
||||
|
||||
|
|
|
@ -865,7 +865,7 @@ void __init eeh_init(void)
|
|||
* on the CEC architecture, type of the device, on earlier boot
|
||||
* command-line arguments & etc.
|
||||
*/
|
||||
void eeh_add_device_early(struct device_node *dn)
|
||||
static void eeh_add_device_early(struct device_node *dn)
|
||||
{
|
||||
struct pci_controller *phb;
|
||||
struct eeh_early_enable_info info;
|
||||
|
@ -882,7 +882,6 @@ void eeh_add_device_early(struct device_node *dn)
|
|||
info.buid_lo = BUID_LO(phb->buid);
|
||||
early_enable_eeh(dn, &info);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_early);
|
||||
|
||||
void eeh_add_device_tree_early(struct device_node *dn)
|
||||
{
|
||||
|
@ -893,20 +892,6 @@ void eeh_add_device_tree_early(struct device_node *dn)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
|
||||
|
||||
void eeh_add_device_tree_late(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
eeh_add_device_late(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *subbus = dev->subordinate;
|
||||
if (subbus)
|
||||
eeh_add_device_tree_late(subbus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_add_device_late - perform EEH initialization for the indicated pci device
|
||||
* @dev: pci device for which to set up EEH
|
||||
|
@ -914,7 +899,7 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
|
|||
* This routine must be used to complete EEH initialization for PCI
|
||||
* devices that were added after system boot (e.g. hotplug, dlpar).
|
||||
*/
|
||||
void eeh_add_device_late(struct pci_dev *dev)
|
||||
static void eeh_add_device_late(struct pci_dev *dev)
|
||||
{
|
||||
struct device_node *dn;
|
||||
struct pci_dn *pdn;
|
||||
|
@ -933,16 +918,33 @@ void eeh_add_device_late(struct pci_dev *dev)
|
|||
|
||||
pci_addr_cache_insert_device (dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_late);
|
||||
|
||||
void eeh_add_device_tree_late(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
eeh_add_device_late(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *subbus = dev->subordinate;
|
||||
if (subbus)
|
||||
eeh_add_device_tree_late(subbus);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
|
||||
|
||||
/**
|
||||
* eeh_remove_device - undo EEH setup for the indicated pci device
|
||||
* @dev: pci device to be removed
|
||||
*
|
||||
* This routine should be when a device is removed from a running
|
||||
* system (e.g. by hotplug or dlpar).
|
||||
* This routine should be called when a device is removed from
|
||||
* a running system (e.g. by hotplug or dlpar). It unregisters
|
||||
* the PCI device from the EEH subsystem. I/O errors affecting
|
||||
* this device will no longer be detected after this call; thus,
|
||||
* i/o errors affecting this slot may leave this device unusable.
|
||||
*/
|
||||
void eeh_remove_device(struct pci_dev *dev)
|
||||
static void eeh_remove_device(struct pci_dev *dev)
|
||||
{
|
||||
struct device_node *dn;
|
||||
if (!dev || !eeh_subsystem_enabled)
|
||||
|
@ -958,21 +960,17 @@ void eeh_remove_device(struct pci_dev *dev)
|
|||
PCI_DN(dn)->pcidev = NULL;
|
||||
pci_dev_put (dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_remove_device);
|
||||
|
||||
void eeh_remove_bus_device(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_bus *bus = dev->subordinate;
|
||||
struct pci_dev *child, *tmp;
|
||||
|
||||
eeh_remove_device(dev);
|
||||
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
struct pci_bus *bus = dev->subordinate;
|
||||
struct list_head *ln;
|
||||
if (!bus)
|
||||
return;
|
||||
for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
|
||||
struct pci_dev *pdev = pci_dev_b(ln);
|
||||
if (pdev)
|
||||
eeh_remove_bus_device(pdev);
|
||||
}
|
||||
|
||||
if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
|
||||
list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
|
||||
eeh_remove_bus_device(child);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
|
||||
|
|
|
@ -293,15 +293,16 @@ void handle_eeh_events (struct eeh_event *event)
|
|||
frozen_pdn = PCI_DN(frozen_dn);
|
||||
frozen_pdn->eeh_freeze_count++;
|
||||
|
||||
pci_str = pci_name (frozen_pdn->pcidev);
|
||||
drv_str = pcid_name (frozen_pdn->pcidev);
|
||||
if (!pci_str) {
|
||||
if (frozen_pdn->pcidev) {
|
||||
pci_str = pci_name (frozen_pdn->pcidev);
|
||||
drv_str = pcid_name (frozen_pdn->pcidev);
|
||||
} else {
|
||||
pci_str = pci_name (event->dev);
|
||||
drv_str = pcid_name (event->dev);
|
||||
}
|
||||
|
||||
if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
|
||||
goto hard_fail;
|
||||
goto excess_failures;
|
||||
|
||||
/* If the reset state is a '5' and the time to reset is 0 (infinity)
|
||||
* or is more then 15 seconds, then mark this as a permanent failure.
|
||||
|
@ -356,7 +357,7 @@ void handle_eeh_events (struct eeh_event *event)
|
|||
|
||||
return;
|
||||
|
||||
hard_fail:
|
||||
excess_failures:
|
||||
/*
|
||||
* About 90% of all real-life EEH failures in the field
|
||||
* are due to poorly seated PCI cards. Only 10% or so are
|
||||
|
@ -367,7 +368,15 @@ hard_fail:
|
|||
"and has been permanently disabled. Please try reseating\n"
|
||||
"this device or replacing it.\n",
|
||||
drv_str, pci_str, frozen_pdn->eeh_freeze_count);
|
||||
goto perm_error;
|
||||
|
||||
hard_fail:
|
||||
printk(KERN_ERR
|
||||
"EEH: Unable to recover from failure of PCI device %s - %s\n"
|
||||
"Please try reseating this device or replacing it.\n",
|
||||
drv_str, pci_str);
|
||||
|
||||
perm_error:
|
||||
eeh_slot_error_detail(frozen_pdn, 2 /* Permanent Error */);
|
||||
|
||||
/* Notify all devices that they're about to go down. */
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/eeh_event.h>
|
||||
#include <asm/ppc-pci.h>
|
||||
|
||||
|
@ -37,14 +39,18 @@ LIST_HEAD(eeh_eventlist);
|
|||
static void eeh_thread_launcher(void *);
|
||||
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
|
||||
|
||||
/* Serialize reset sequences for a given pci device */
|
||||
DEFINE_MUTEX(eeh_event_mutex);
|
||||
|
||||
/**
|
||||
* eeh_event_handler - dispatch EEH events. The detection of a frozen
|
||||
* slot can occur inside an interrupt, where it can be hard to do
|
||||
* anything about it. The goal of this routine is to pull these
|
||||
* detection events out of the context of the interrupt handler, and
|
||||
* re-dispatch them for processing at a later time in a normal context.
|
||||
*
|
||||
* eeh_event_handler - dispatch EEH events.
|
||||
* @dummy - unused
|
||||
*
|
||||
* The detection of a frozen slot can occur inside an interrupt,
|
||||
* where it can be hard to do anything about it. The goal of this
|
||||
* routine is to pull these detection events out of the context
|
||||
* of the interrupt handler, and re-dispatch them for processing
|
||||
* at a later time in a normal context.
|
||||
*/
|
||||
static int eeh_event_handler(void * dummy)
|
||||
{
|
||||
|
@ -64,23 +70,24 @@ static int eeh_event_handler(void * dummy)
|
|||
event = list_entry(eeh_eventlist.next, struct eeh_event, list);
|
||||
list_del(&event->list);
|
||||
}
|
||||
|
||||
if (event)
|
||||
eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
|
||||
|
||||
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
|
||||
|
||||
if (event == NULL)
|
||||
break;
|
||||
|
||||
/* Serialize processing of EEH events */
|
||||
mutex_lock(&eeh_event_mutex);
|
||||
eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
|
||||
|
||||
printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
|
||||
pci_name(event->dev));
|
||||
|
||||
handle_eeh_events(event);
|
||||
|
||||
eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
|
||||
|
||||
pci_dev_put(event->dev);
|
||||
kfree(event);
|
||||
mutex_unlock(&eeh_event_mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -88,7 +95,6 @@ static int eeh_event_handler(void * dummy)
|
|||
|
||||
/**
|
||||
* eeh_thread_launcher
|
||||
*
|
||||
* @dummy - unused
|
||||
*/
|
||||
static void eeh_thread_launcher(void *dummy)
|
||||
|
|
|
@ -127,3 +127,103 @@ _GLOBAL(plpar_hcall_4out)
|
|||
|
||||
mtcrf 0xff,r0
|
||||
blr /* return r3 = status */
|
||||
|
||||
/* plpar_hcall_7arg_7ret(unsigned long opcode, R3
|
||||
unsigned long arg1, R4
|
||||
unsigned long arg2, R5
|
||||
unsigned long arg3, R6
|
||||
unsigned long arg4, R7
|
||||
unsigned long arg5, R8
|
||||
unsigned long arg6, R9
|
||||
unsigned long arg7, R10
|
||||
unsigned long *out1, 112(R1)
|
||||
unsigned long *out2, 110(R1)
|
||||
unsigned long *out3, 108(R1)
|
||||
unsigned long *out4, 106(R1)
|
||||
unsigned long *out5, 104(R1)
|
||||
unsigned long *out6, 102(R1)
|
||||
unsigned long *out7); 100(R1)
|
||||
*/
|
||||
_GLOBAL(plpar_hcall_7arg_7ret)
|
||||
HMT_MEDIUM
|
||||
|
||||
mfcr r0
|
||||
stw r0,8(r1)
|
||||
|
||||
HVSC /* invoke the hypervisor */
|
||||
|
||||
lwz r0,8(r1)
|
||||
|
||||
ld r11,STK_PARM(r11)(r1) /* Fetch r4 ret arg */
|
||||
std r4,0(r11)
|
||||
ld r11,STK_PARM(r12)(r1) /* Fetch r5 ret arg */
|
||||
std r5,0(r11)
|
||||
ld r11,STK_PARM(r13)(r1) /* Fetch r6 ret arg */
|
||||
std r6,0(r11)
|
||||
ld r11,STK_PARM(r14)(r1) /* Fetch r7 ret arg */
|
||||
std r7,0(r11)
|
||||
ld r11,STK_PARM(r15)(r1) /* Fetch r8 ret arg */
|
||||
std r8,0(r11)
|
||||
ld r11,STK_PARM(r16)(r1) /* Fetch r9 ret arg */
|
||||
std r9,0(r11)
|
||||
ld r11,STK_PARM(r17)(r1) /* Fetch r10 ret arg */
|
||||
std r10,0(r11)
|
||||
|
||||
mtcrf 0xff,r0
|
||||
|
||||
blr /* return r3 = status */
|
||||
|
||||
/* plpar_hcall_9arg_9ret(unsigned long opcode, R3
|
||||
unsigned long arg1, R4
|
||||
unsigned long arg2, R5
|
||||
unsigned long arg3, R6
|
||||
unsigned long arg4, R7
|
||||
unsigned long arg5, R8
|
||||
unsigned long arg6, R9
|
||||
unsigned long arg7, R10
|
||||
unsigned long arg8, 112(R1)
|
||||
unsigned long arg9, 110(R1)
|
||||
unsigned long *out1, 108(R1)
|
||||
unsigned long *out2, 106(R1)
|
||||
unsigned long *out3, 104(R1)
|
||||
unsigned long *out4, 102(R1)
|
||||
unsigned long *out5, 100(R1)
|
||||
unsigned long *out6, 98(R1)
|
||||
unsigned long *out7); 96(R1)
|
||||
unsigned long *out8, 94(R1)
|
||||
unsigned long *out9, 92(R1)
|
||||
*/
|
||||
_GLOBAL(plpar_hcall_9arg_9ret)
|
||||
HMT_MEDIUM
|
||||
|
||||
mfcr r0
|
||||
stw r0,8(r1)
|
||||
|
||||
ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */
|
||||
ld r12,STK_PARM(r12)(r1) /* put arg9 in R12 */
|
||||
|
||||
HVSC /* invoke the hypervisor */
|
||||
|
||||
ld r0,STK_PARM(r13)(r1) /* Fetch r4 ret arg */
|
||||
stdx r4,r0,r0
|
||||
ld r0,STK_PARM(r14)(r1) /* Fetch r5 ret arg */
|
||||
stdx r5,r0,r0
|
||||
ld r0,STK_PARM(r15)(r1) /* Fetch r6 ret arg */
|
||||
stdx r6,r0,r0
|
||||
ld r0,STK_PARM(r16)(r1) /* Fetch r7 ret arg */
|
||||
stdx r7,r0,r0
|
||||
ld r0,STK_PARM(r17)(r1) /* Fetch r8 ret arg */
|
||||
stdx r8,r0,r0
|
||||
ld r0,STK_PARM(r18)(r1) /* Fetch r9 ret arg */
|
||||
stdx r9,r0,r0
|
||||
ld r0,STK_PARM(r19)(r1) /* Fetch r10 ret arg */
|
||||
stdx r10,r0,r0
|
||||
ld r0,STK_PARM(r20)(r1) /* Fetch r11 ret arg */
|
||||
stdx r11,r0,r0
|
||||
ld r0,STK_PARM(r21)(r1) /* Fetch r12 ret arg */
|
||||
stdx r12,r0,r0
|
||||
|
||||
lwz r0,8(r1)
|
||||
mtcrf 0xff,r0
|
||||
|
||||
blr /* return r3 = status */
|
||||
|
|
|
@ -41,7 +41,7 @@ int hvc_get_chars(uint32_t vtermno, char *buf, int count)
|
|||
unsigned long got;
|
||||
|
||||
if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
|
||||
(unsigned long *)buf, (unsigned long *)buf+1) == H_Success)
|
||||
(unsigned long *)buf, (unsigned long *)buf+1) == H_SUCCESS)
|
||||
return got;
|
||||
return 0;
|
||||
}
|
||||
|
@ -69,9 +69,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
|
|||
|
||||
ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
|
||||
lbuf[1]);
|
||||
if (ret == H_Success)
|
||||
if (ret == H_SUCCESS)
|
||||
return count;
|
||||
if (ret == H_Busy)
|
||||
if (ret == H_BUSY)
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -43,21 +43,21 @@ MODULE_VERSION(HVCS_ARCH_VERSION);
|
|||
static int hvcs_convert(long to_convert)
|
||||
{
|
||||
switch (to_convert) {
|
||||
case H_Success:
|
||||
case H_SUCCESS:
|
||||
return 0;
|
||||
case H_Parameter:
|
||||
case H_PARAMETER:
|
||||
return -EINVAL;
|
||||
case H_Hardware:
|
||||
case H_HARDWARE:
|
||||
return -EIO;
|
||||
case H_Busy:
|
||||
case H_LongBusyOrder1msec:
|
||||
case H_LongBusyOrder10msec:
|
||||
case H_LongBusyOrder100msec:
|
||||
case H_LongBusyOrder1sec:
|
||||
case H_LongBusyOrder10sec:
|
||||
case H_LongBusyOrder100sec:
|
||||
case H_BUSY:
|
||||
case H_LONG_BUSY_ORDER_1_MSEC:
|
||||
case H_LONG_BUSY_ORDER_10_MSEC:
|
||||
case H_LONG_BUSY_ORDER_100_MSEC:
|
||||
case H_LONG_BUSY_ORDER_1_SEC:
|
||||
case H_LONG_BUSY_ORDER_10_SEC:
|
||||
case H_LONG_BUSY_ORDER_100_SEC:
|
||||
return -EBUSY;
|
||||
case H_Function: /* fall through */
|
||||
case H_FUNCTION: /* fall through */
|
||||
default:
|
||||
return -EPERM;
|
||||
}
|
||||
|
|
|
@ -54,7 +54,8 @@ EXPORT_SYMBOL(plpar_hcall);
|
|||
EXPORT_SYMBOL(plpar_hcall_4out);
|
||||
EXPORT_SYMBOL(plpar_hcall_norets);
|
||||
EXPORT_SYMBOL(plpar_hcall_8arg_2ret);
|
||||
|
||||
EXPORT_SYMBOL(plpar_hcall_7arg_7ret);
|
||||
EXPORT_SYMBOL(plpar_hcall_9arg_9ret);
|
||||
extern void pSeries_find_serial_port(void);
|
||||
|
||||
|
||||
|
@ -72,7 +73,7 @@ static void udbg_hvsi_putc(char c)
|
|||
|
||||
do {
|
||||
rc = plpar_put_term_char(vtermno, sizeof(packet), packet);
|
||||
} while (rc == H_Busy);
|
||||
} while (rc == H_BUSY);
|
||||
}
|
||||
|
||||
static long hvsi_udbg_buf_len;
|
||||
|
@ -85,7 +86,7 @@ static int udbg_hvsi_getc_poll(void)
|
|||
|
||||
if (hvsi_udbg_buf_len == 0) {
|
||||
rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf);
|
||||
if (rc != H_Success || hvsi_udbg_buf[0] != 0xff) {
|
||||
if (rc != H_SUCCESS || hvsi_udbg_buf[0] != 0xff) {
|
||||
/* bad read or non-data packet */
|
||||
hvsi_udbg_buf_len = 0;
|
||||
} else {
|
||||
|
@ -139,7 +140,7 @@ static void udbg_putcLP(char c)
|
|||
buf[0] = c;
|
||||
do {
|
||||
rc = plpar_put_term_char(vtermno, 1, buf);
|
||||
} while(rc == H_Busy);
|
||||
} while(rc == H_BUSY);
|
||||
}
|
||||
|
||||
/* Buffered chars getc */
|
||||
|
@ -158,7 +159,7 @@ static int udbg_getc_pollLP(void)
|
|||
/* get some more chars. */
|
||||
inbuflen = 0;
|
||||
rc = plpar_get_term_char(vtermno, &inbuflen, buf);
|
||||
if (rc != H_Success)
|
||||
if (rc != H_SUCCESS)
|
||||
inbuflen = 0; /* otherwise inbuflen is garbage */
|
||||
}
|
||||
if (inbuflen <= 0 || inbuflen > 16) {
|
||||
|
@ -304,7 +305,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
|
|||
|
||||
lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
|
||||
hpte_r, &slot, &dummy0, &dummy1);
|
||||
if (unlikely(lpar_rc == H_PTEG_Full)) {
|
||||
if (unlikely(lpar_rc == H_PTEG_FULL)) {
|
||||
if (!(vflags & HPTE_V_BOLTED))
|
||||
DBG_LOW(" full\n");
|
||||
return -1;
|
||||
|
@ -315,7 +316,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
|
|||
* will fail. However we must catch the failure in hash_page
|
||||
* or we will loop forever, so return -2 in this case.
|
||||
*/
|
||||
if (unlikely(lpar_rc != H_Success)) {
|
||||
if (unlikely(lpar_rc != H_SUCCESS)) {
|
||||
if (!(vflags & HPTE_V_BOLTED))
|
||||
DBG_LOW(" lpar err %d\n", lpar_rc);
|
||||
return -2;
|
||||
|
@ -346,9 +347,9 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
|
|||
/* don't remove a bolted entry */
|
||||
lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
|
||||
(0x1UL << 4), &dummy1, &dummy2);
|
||||
if (lpar_rc == H_Success)
|
||||
if (lpar_rc == H_SUCCESS)
|
||||
return i;
|
||||
BUG_ON(lpar_rc != H_Not_Found);
|
||||
BUG_ON(lpar_rc != H_NOT_FOUND);
|
||||
|
||||
slot_offset++;
|
||||
slot_offset &= 0x7;
|
||||
|
@ -391,14 +392,14 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
|
|||
|
||||
lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN);
|
||||
|
||||
if (lpar_rc == H_Not_Found) {
|
||||
if (lpar_rc == H_NOT_FOUND) {
|
||||
DBG_LOW("not found !\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
DBG_LOW("ok\n");
|
||||
|
||||
BUG_ON(lpar_rc != H_Success);
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -417,7 +418,7 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
|
|||
|
||||
lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
|
||||
|
||||
BUG_ON(lpar_rc != H_Success);
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
|
||||
return dword0;
|
||||
}
|
||||
|
@ -468,7 +469,7 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
|
|||
flags = newpp & 7;
|
||||
lpar_rc = plpar_pte_protect(flags, slot, 0);
|
||||
|
||||
BUG_ON(lpar_rc != H_Success);
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
}
|
||||
|
||||
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
|
||||
|
@ -484,10 +485,10 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
|
|||
want_v = hpte_encode_v(va, psize);
|
||||
lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN,
|
||||
&dummy1, &dummy2);
|
||||
if (lpar_rc == H_Not_Found)
|
||||
if (lpar_rc == H_NOT_FOUND)
|
||||
return;
|
||||
|
||||
BUG_ON(lpar_rc != H_Success);
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -463,7 +463,7 @@ static void pseries_dedicated_idle_sleep(void)
|
|||
* very low priority. The cede enables interrupts, which
|
||||
* doesn't matter here.
|
||||
*/
|
||||
if (!lppaca[cpu ^ 1].idle || poll_pending() == H_Pending)
|
||||
if (!lppaca[cpu ^ 1].idle || poll_pending() == H_PENDING)
|
||||
cede_processor();
|
||||
|
||||
out:
|
||||
|
|
|
@ -258,7 +258,7 @@ EXPORT_SYMBOL(vio_find_node);
|
|||
int vio_enable_interrupts(struct vio_dev *dev)
|
||||
{
|
||||
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
|
||||
if (rc != H_Success)
|
||||
if (rc != H_SUCCESS)
|
||||
printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ EXPORT_SYMBOL(vio_enable_interrupts);
|
|||
int vio_disable_interrupts(struct vio_dev *dev)
|
||||
{
|
||||
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
|
||||
if (rc != H_Success)
|
||||
if (rc != H_SUCCESS)
|
||||
printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ static int pSeriesLP_xirr_info_get(int n_cpu)
|
|||
unsigned long return_value;
|
||||
|
||||
lpar_rc = plpar_xirr(&return_value);
|
||||
if (lpar_rc != H_Success)
|
||||
if (lpar_rc != H_SUCCESS)
|
||||
panic(" bad return code xirr - rc = %lx \n", lpar_rc);
|
||||
return (int)return_value;
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value)
|
|||
unsigned long val64 = value & 0xffffffff;
|
||||
|
||||
lpar_rc = plpar_eoi(val64);
|
||||
if (lpar_rc != H_Success)
|
||||
if (lpar_rc != H_SUCCESS)
|
||||
panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
|
||||
val64);
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value)
|
|||
unsigned long lpar_rc;
|
||||
|
||||
lpar_rc = plpar_cppr(value);
|
||||
if (lpar_rc != H_Success)
|
||||
if (lpar_rc != H_SUCCESS)
|
||||
panic("bad return code cppr - rc = %lx\n", lpar_rc);
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
|
|||
unsigned long lpar_rc;
|
||||
|
||||
lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
|
||||
if (lpar_rc != H_Success)
|
||||
if (lpar_rc != H_SUCCESS)
|
||||
panic("bad return code qirr - rc = %lx\n", lpar_rc);
|
||||
}
|
||||
|
||||
|
|
|
@ -904,7 +904,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
|
|||
* It is possible the vty-server was removed after the irq was
|
||||
* requested but before we have time to enable interrupts.
|
||||
*/
|
||||
if (vio_enable_interrupts(vdev) == H_Success)
|
||||
if (vio_enable_interrupts(vdev) == H_SUCCESS)
|
||||
return 0;
|
||||
else {
|
||||
printk(KERN_ERR "HVCS: int enable failed for"
|
||||
|
|
|
@ -235,7 +235,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
|
|||
|
||||
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
|
||||
|
||||
if(lpar_rc != H_Success) {
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
pool->free_map[free_index] = index;
|
||||
pool->skbuff[index] = NULL;
|
||||
pool->consumer_index--;
|
||||
|
@ -373,7 +373,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
|||
|
||||
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
|
||||
|
||||
if(lpar_rc != H_Success) {
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
|
||||
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
|
||||
}
|
||||
|
@ -511,7 +511,7 @@ static int ibmveth_open(struct net_device *netdev)
|
|||
adapter->filter_list_dma,
|
||||
mac_address);
|
||||
|
||||
if(lpar_rc != H_Success) {
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
|
||||
ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
|
||||
adapter->buffer_list_dma,
|
||||
|
@ -527,7 +527,7 @@ static int ibmveth_open(struct net_device *netdev)
|
|||
ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
|
||||
do {
|
||||
rc = h_free_logical_lan(adapter->vdev->unit_address);
|
||||
} while (H_isLongBusy(rc) || (rc == H_Busy));
|
||||
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
|
||||
|
||||
ibmveth_cleanup(adapter);
|
||||
return rc;
|
||||
|
@ -556,9 +556,9 @@ static int ibmveth_close(struct net_device *netdev)
|
|||
|
||||
do {
|
||||
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
|
||||
} while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
|
||||
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
|
||||
|
||||
if(lpar_rc != H_Success)
|
||||
if(lpar_rc != H_SUCCESS)
|
||||
{
|
||||
ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
|
||||
lpar_rc);
|
||||
|
@ -693,9 +693,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
desc[4].desc,
|
||||
desc[5].desc,
|
||||
correlator);
|
||||
} while ((lpar_rc == H_Busy) && (retry_count--));
|
||||
} while ((lpar_rc == H_BUSY) && (retry_count--));
|
||||
|
||||
if(lpar_rc != H_Success && lpar_rc != H_Dropped) {
|
||||
if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
|
||||
int i;
|
||||
ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
|
||||
for(i = 0; i < 6; i++) {
|
||||
|
@ -786,14 +786,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
|
|||
/* we think we are done - reenable interrupts, then check once more to make sure we are done */
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
|
||||
|
||||
ibmveth_assert(lpar_rc == H_Success);
|
||||
ibmveth_assert(lpar_rc == H_SUCCESS);
|
||||
|
||||
netif_rx_complete(netdev);
|
||||
|
||||
if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
|
||||
{
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
|
||||
ibmveth_assert(lpar_rc == H_Success);
|
||||
ibmveth_assert(lpar_rc == H_SUCCESS);
|
||||
more_work = 1;
|
||||
goto restart_poll;
|
||||
}
|
||||
|
@ -813,7 +813,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs
|
|||
|
||||
if(netif_rx_schedule_prep(netdev)) {
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
|
||||
ibmveth_assert(lpar_rc == H_Success);
|
||||
ibmveth_assert(lpar_rc == H_SUCCESS);
|
||||
__netif_rx_schedule(netdev);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
|
@ -835,7 +835,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
|
|||
IbmVethMcastEnableRecv |
|
||||
IbmVethMcastDisableFiltering,
|
||||
0);
|
||||
if(lpar_rc != H_Success) {
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
|
||||
}
|
||||
} else {
|
||||
|
@ -847,7 +847,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
|
|||
IbmVethMcastDisableFiltering |
|
||||
IbmVethMcastClearFilterTable,
|
||||
0);
|
||||
if(lpar_rc != H_Success) {
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
|
||||
}
|
||||
/* add the addresses to the filter table */
|
||||
|
@ -858,7 +858,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
|
|||
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
|
||||
IbmVethMcastAddFilter,
|
||||
mcast_addr);
|
||||
if(lpar_rc != H_Success) {
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
|
||||
}
|
||||
}
|
||||
|
@ -867,7 +867,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
|
|||
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
|
||||
IbmVethMcastEnableFiltering,
|
||||
0);
|
||||
if(lpar_rc != H_Success) {
|
||||
if(lpar_rc != H_SUCCESS) {
|
||||
ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue,
|
|||
tasklet_kill(&hostdata->srp_task);
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
||||
} while ((rc == H_Busy) || (H_isLongBusy(rc)));
|
||||
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
||||
dma_unmap_single(hostdata->dev,
|
||||
queue->msg_token,
|
||||
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
|
||||
|
@ -230,7 +230,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
|
|||
rc = plpar_hcall_norets(H_REG_CRQ,
|
||||
vdev->unit_address,
|
||||
queue->msg_token, PAGE_SIZE);
|
||||
if (rc == H_Resource)
|
||||
if (rc == H_RESOURCE)
|
||||
/* maybe kexecing and resource is busy. try a reset */
|
||||
rc = ibmvscsi_reset_crq_queue(queue,
|
||||
hostdata);
|
||||
|
@ -269,7 +269,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
|
|||
req_irq_failed:
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
||||
} while ((rc == H_Busy) || (H_isLongBusy(rc)));
|
||||
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
||||
reg_crq_failed:
|
||||
dma_unmap_single(hostdata->dev,
|
||||
queue->msg_token,
|
||||
|
@ -295,7 +295,7 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
|
|||
/* Re-enable the CRQ */
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
|
||||
} while ((rc == H_InProgress) || (rc == H_Busy) || (H_isLongBusy(rc)));
|
||||
} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
||||
|
||||
if (rc)
|
||||
printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
|
||||
|
@ -317,7 +317,7 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
|
|||
/* Close the CRQ */
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
||||
} while ((rc == H_Busy) || (H_isLongBusy(rc)));
|
||||
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
||||
|
||||
/* Clean out the queue */
|
||||
memset(queue->msgs, 0x00, PAGE_SIZE);
|
||||
|
|
|
@ -60,23 +60,9 @@ void __init pci_addr_cache_build(void);
|
|||
* device (including config space i/o). Call eeh_add_device_late
|
||||
* to finish the eeh setup for this device.
|
||||
*/
|
||||
void eeh_add_device_early(struct device_node *);
|
||||
void eeh_add_device_late(struct pci_dev *dev);
|
||||
void eeh_add_device_tree_early(struct device_node *);
|
||||
void eeh_add_device_tree_late(struct pci_bus *);
|
||||
|
||||
/**
|
||||
* eeh_remove_device - undo EEH setup for the indicated pci device
|
||||
* @dev: pci device to be removed
|
||||
*
|
||||
* This routine should be called when a device is removed from
|
||||
* a running system (e.g. by hotplug or dlpar). It unregisters
|
||||
* the PCI device from the EEH subsystem. I/O errors affecting
|
||||
* this device will no longer be detected after this call; thus,
|
||||
* i/o errors affecting this slot may leave this device unusable.
|
||||
*/
|
||||
void eeh_remove_device(struct pci_dev *);
|
||||
|
||||
/**
|
||||
* eeh_remove_device_recursive - undo EEH for device & children.
|
||||
* @dev: pci device to be removed
|
||||
|
@ -116,12 +102,6 @@ static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *d
|
|||
|
||||
static inline void pci_addr_cache_build(void) { }
|
||||
|
||||
static inline void eeh_add_device_early(struct device_node *dn) { }
|
||||
|
||||
static inline void eeh_add_device_late(struct pci_dev *dev) { }
|
||||
|
||||
static inline void eeh_remove_device(struct pci_dev *dev) { }
|
||||
|
||||
static inline void eeh_add_device_tree_early(struct device_node *dn) { }
|
||||
|
||||
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
|
||||
|
|
|
@ -4,47 +4,88 @@
|
|||
|
||||
#define HVSC .long 0x44000022
|
||||
|
||||
#define H_Success 0
|
||||
#define H_Busy 1 /* Hardware busy -- retry later */
|
||||
#define H_Closed 2 /* Resource closed */
|
||||
#define H_Constrained 4 /* Resource request constrained to max allowed */
|
||||
#define H_InProgress 14 /* Kind of like busy */
|
||||
#define H_Pending 17 /* returned from H_POLL_PENDING */
|
||||
#define H_Continue 18 /* Returned from H_Join on success */
|
||||
#define H_LongBusyStartRange 9900 /* Start of long busy range */
|
||||
#define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */
|
||||
#define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */
|
||||
#define H_LongBusyOrder100msec 9902 /* Long busy, hint that 100msec is a good time to retry */
|
||||
#define H_LongBusyOrder1sec 9903 /* Long busy, hint that 1sec is a good time to retry */
|
||||
#define H_LongBusyOrder10sec 9904 /* Long busy, hint that 10sec is a good time to retry */
|
||||
#define H_LongBusyOrder100sec 9905 /* Long busy, hint that 100sec is a good time to retry */
|
||||
#define H_LongBusyEndRange 9905 /* End of long busy range */
|
||||
#define H_Hardware -1 /* Hardware error */
|
||||
#define H_Function -2 /* Function not supported */
|
||||
#define H_Privilege -3 /* Caller not privileged */
|
||||
#define H_Parameter -4 /* Parameter invalid, out-of-range or conflicting */
|
||||
#define H_Bad_Mode -5 /* Illegal msr value */
|
||||
#define H_PTEG_Full -6 /* PTEG is full */
|
||||
#define H_Not_Found -7 /* PTE was not found" */
|
||||
#define H_Reserved_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
|
||||
#define H_NoMem -9
|
||||
#define H_Authority -10
|
||||
#define H_Permission -11
|
||||
#define H_Dropped -12
|
||||
#define H_SourceParm -13
|
||||
#define H_DestParm -14
|
||||
#define H_RemoteParm -15
|
||||
#define H_Resource -16
|
||||
#define H_SUCCESS 0
|
||||
#define H_BUSY 1 /* Hardware busy -- retry later */
|
||||
#define H_CLOSED 2 /* Resource closed */
|
||||
#define H_NOT_AVAILABLE 3
|
||||
#define H_CONSTRAINED 4 /* Resource request constrained to max allowed */
|
||||
#define H_PARTIAL 5
|
||||
#define H_IN_PROGRESS 14 /* Kind of like busy */
|
||||
#define H_PAGE_REGISTERED 15
|
||||
#define H_PARTIAL_STORE 16
|
||||
#define H_PENDING 17 /* returned from H_POLL_PENDING */
|
||||
#define H_CONTINUE 18 /* Returned from H_Join on success */
|
||||
#define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */
|
||||
#define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \
|
||||
is a good time to retry */
|
||||
#define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \
|
||||
is a good time to retry */
|
||||
#define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \
|
||||
is a good time to retry */
|
||||
#define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \
|
||||
is a good time to retry */
|
||||
#define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \
|
||||
is a good time to retry */
|
||||
#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
|
||||
is a good time to retry */
|
||||
#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
|
||||
#define H_HARDWARE -1 /* Hardware error */
|
||||
#define H_FUNCTION -2 /* Function not supported */
|
||||
#define H_PRIVILEGE -3 /* Caller not privileged */
|
||||
#define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */
|
||||
#define H_BAD_MODE -5 /* Illegal msr value */
|
||||
#define H_PTEG_FULL -6 /* PTEG is full */
|
||||
#define H_NOT_FOUND -7 /* PTE was not found" */
|
||||
#define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
|
||||
#define H_NO_MEM -9
|
||||
#define H_AUTHORITY -10
|
||||
#define H_PERMISSION -11
|
||||
#define H_DROPPED -12
|
||||
#define H_SOURCE_PARM -13
|
||||
#define H_DEST_PARM -14
|
||||
#define H_REMOTE_PARM -15
|
||||
#define H_RESOURCE -16
|
||||
#define H_ADAPTER_PARM -17
|
||||
#define H_RH_PARM -18
|
||||
#define H_RCQ_PARM -19
|
||||
#define H_SCQ_PARM -20
|
||||
#define H_EQ_PARM -21
|
||||
#define H_RT_PARM -22
|
||||
#define H_ST_PARM -23
|
||||
#define H_SIGT_PARM -24
|
||||
#define H_TOKEN_PARM -25
|
||||
#define H_MLENGTH_PARM -27
|
||||
#define H_MEM_PARM -28
|
||||
#define H_MEM_ACCESS_PARM -29
|
||||
#define H_ATTR_PARM -30
|
||||
#define H_PORT_PARM -31
|
||||
#define H_MCG_PARM -32
|
||||
#define H_VL_PARM -33
|
||||
#define H_TSIZE_PARM -34
|
||||
#define H_TRACE_PARM -35
|
||||
|
||||
#define H_MASK_PARM -37
|
||||
#define H_MCG_FULL -38
|
||||
#define H_ALIAS_EXIST -39
|
||||
#define H_P_COUNTER -40
|
||||
#define H_TABLE_FULL -41
|
||||
#define H_ALT_TABLE -42
|
||||
#define H_MR_CONDITION -43
|
||||
#define H_NOT_ENOUGH_RESOURCES -44
|
||||
#define H_R_STATE -45
|
||||
#define H_RESCINDEND -46
|
||||
|
||||
|
||||
/* Long Busy is a condition that can be returned by the firmware
|
||||
* when a call cannot be completed now, but the identical call
|
||||
* should be retried later. This prevents calls blocking in the
|
||||
* firmware for long periods of time. Annoyingly the firmware can return
|
||||
* firmware for long periods of time. Annoyingly the firmware can return
|
||||
* a range of return codes, hinting at how long we should wait before
|
||||
* retrying. If you don't care for the hint, the macro below is a good
|
||||
* way to check for the long_busy return codes
|
||||
*/
|
||||
#define H_isLongBusy(x) ((x >= H_LongBusyStartRange) && (x <= H_LongBusyEndRange))
|
||||
#define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \
|
||||
&& (x <= H_LONG_BUSY_END_RANGE))
|
||||
|
||||
/* Flags */
|
||||
#define H_LARGE_PAGE (1UL<<(63-16))
|
||||
|
@ -66,6 +107,9 @@
|
|||
#define H_DABRX_KERNEL (1UL<<(63-62))
|
||||
#define H_DABRX_USER (1UL<<(63-63))
|
||||
|
||||
/* Each control block has to be on a 4K bondary */
|
||||
#define H_CB_ALIGNMENT 4096
|
||||
|
||||
/* pSeries hypervisor opcodes */
|
||||
#define H_REMOVE 0x04
|
||||
#define H_ENTER 0x08
|
||||
|
@ -99,25 +143,52 @@
|
|||
#define H_PERFMON 0x7c
|
||||
#define H_MIGRATE_DMA 0x78
|
||||
#define H_REGISTER_VPA 0xDC
|
||||
#define H_CEDE 0xE0
|
||||
#define H_CEDE 0xE0
|
||||
#define H_CONFER 0xE4
|
||||
#define H_PROD 0xE8
|
||||
#define H_PROD 0xE8
|
||||
#define H_GET_PPP 0xEC
|
||||
#define H_SET_PPP 0xF0
|
||||
#define H_PURR 0xF4
|
||||
#define H_PIC 0xF8
|
||||
#define H_PIC 0xF8
|
||||
#define H_REG_CRQ 0xFC
|
||||
#define H_FREE_CRQ 0x100
|
||||
#define H_VIO_SIGNAL 0x104
|
||||
#define H_SEND_CRQ 0x108
|
||||
#define H_COPY_RDMA 0x110
|
||||
#define H_COPY_RDMA 0x110
|
||||
#define H_SET_XDABR 0x134
|
||||
#define H_STUFF_TCE 0x138
|
||||
#define H_PUT_TCE_INDIRECT 0x13C
|
||||
#define H_VTERM_PARTNER_INFO 0x150
|
||||
#define H_REGISTER_VTERM 0x154
|
||||
#define H_FREE_VTERM 0x158
|
||||
#define H_POLL_PENDING 0x1D8
|
||||
#define H_RESET_EVENTS 0x15C
|
||||
#define H_ALLOC_RESOURCE 0x160
|
||||
#define H_FREE_RESOURCE 0x164
|
||||
#define H_MODIFY_QP 0x168
|
||||
#define H_QUERY_QP 0x16C
|
||||
#define H_REREGISTER_PMR 0x170
|
||||
#define H_REGISTER_SMR 0x174
|
||||
#define H_QUERY_MR 0x178
|
||||
#define H_QUERY_MW 0x17C
|
||||
#define H_QUERY_HCA 0x180
|
||||
#define H_QUERY_PORT 0x184
|
||||
#define H_MODIFY_PORT 0x188
|
||||
#define H_DEFINE_AQP1 0x18C
|
||||
#define H_GET_TRACE_BUFFER 0x190
|
||||
#define H_DEFINE_AQP0 0x194
|
||||
#define H_RESIZE_MR 0x198
|
||||
#define H_ATTACH_MCQP 0x19C
|
||||
#define H_DETACH_MCQP 0x1A0
|
||||
#define H_CREATE_RPT 0x1A4
|
||||
#define H_REMOVE_RPT 0x1A8
|
||||
#define H_REGISTER_RPAGES 0x1AC
|
||||
#define H_DISABLE_AND_GETC 0x1B0
|
||||
#define H_ERROR_DATA 0x1B4
|
||||
#define H_GET_HCA_INFO 0x1B8
|
||||
#define H_GET_PERF_COUNT 0x1BC
|
||||
#define H_MANAGE_TRACE 0x1C0
|
||||
#define H_QUERY_INT_STATE 0x1E4
|
||||
#define H_POLL_PENDING 0x1D8
|
||||
#define H_JOIN 0x298
|
||||
#define H_ENABLE_CRQ 0x2B0
|
||||
|
||||
|
@ -152,7 +223,7 @@ long plpar_hcall_norets(unsigned long opcode, ...);
|
|||
*/
|
||||
long plpar_hcall_8arg_2ret(unsigned long opcode,
|
||||
unsigned long arg1,
|
||||
unsigned long arg2,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3,
|
||||
unsigned long arg4,
|
||||
unsigned long arg5,
|
||||
|
@ -176,6 +247,42 @@ long plpar_hcall_4out(unsigned long opcode,
|
|||
unsigned long *out3,
|
||||
unsigned long *out4);
|
||||
|
||||
long plpar_hcall_7arg_7ret(unsigned long opcode,
|
||||
unsigned long arg1,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3,
|
||||
unsigned long arg4,
|
||||
unsigned long arg5,
|
||||
unsigned long arg6,
|
||||
unsigned long arg7,
|
||||
unsigned long *out1,
|
||||
unsigned long *out2,
|
||||
unsigned long *out3,
|
||||
unsigned long *out4,
|
||||
unsigned long *out5,
|
||||
unsigned long *out6,
|
||||
unsigned long *out7);
|
||||
|
||||
long plpar_hcall_9arg_9ret(unsigned long opcode,
|
||||
unsigned long arg1,
|
||||
unsigned long arg2,
|
||||
unsigned long arg3,
|
||||
unsigned long arg4,
|
||||
unsigned long arg5,
|
||||
unsigned long arg6,
|
||||
unsigned long arg7,
|
||||
unsigned long arg8,
|
||||
unsigned long arg9,
|
||||
unsigned long *out1,
|
||||
unsigned long *out2,
|
||||
unsigned long *out3,
|
||||
unsigned long *out4,
|
||||
unsigned long *out5,
|
||||
unsigned long *out6,
|
||||
unsigned long *out7,
|
||||
unsigned long *out8,
|
||||
unsigned long *out9);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_HVCALL_H */
|
||||
|
|
Loading…
Reference in New Issue