Auto-update from upstream
This commit is contained in:
commit
3e356b2660
|
@ -1636,6 +1636,15 @@ L: ldm-devel@lists.sourceforge.net
|
|||
W: http://ldm.sourceforge.net
|
||||
S: Maintained
|
||||
|
||||
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
|
||||
P: Eric Moore
|
||||
M: Eric.Moore@lsil.com
|
||||
M: support@lsil.com
|
||||
L: mpt_linux_developer@lsil.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
W: http://www.lsilogic.com/support
|
||||
S: Supported
|
||||
|
||||
LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers
|
||||
P: Matthew Wilcox
|
||||
M: matthew@wil.cx
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 15
|
||||
EXTRAVERSION =-rc4
|
||||
EXTRAVERSION =-rc5
|
||||
NAME=Affluent Albatross
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# Rewritten to use lists instead of if-statements.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_PCI) += pci/ usb/
|
||||
obj-$(CONFIG_PCI) += pci/
|
||||
obj-$(CONFIG_PARISC) += parisc/
|
||||
obj-$(CONFIG_RAPIDIO) += rapidio/
|
||||
obj-y += video/
|
||||
|
@ -49,6 +49,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
|
|||
obj-$(CONFIG_PARIDE) += block/paride/
|
||||
obj-$(CONFIG_TC) += tc/
|
||||
obj-$(CONFIG_USB) += usb/
|
||||
obj-$(CONFIG_PCI) += usb/
|
||||
obj-$(CONFIG_USB_GADGET) += usb/gadget/
|
||||
obj-$(CONFIG_GAMEPORT) += input/gameport/
|
||||
obj-$(CONFIG_INPUT) += input/
|
||||
|
|
|
@ -169,15 +169,11 @@ acpi_processor_power_activate(struct acpi_processor *pr,
|
|||
|
||||
static void acpi_safe_halt(void)
|
||||
{
|
||||
int polling = test_thread_flag(TIF_POLLING_NRFLAG);
|
||||
if (polling) {
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
smp_mb__after_clear_bit();
|
||||
}
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
smp_mb__after_clear_bit();
|
||||
if (!need_resched())
|
||||
safe_halt();
|
||||
if (polling)
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
}
|
||||
|
||||
static atomic_t c3_cpu_count;
|
||||
|
@ -295,6 +291,16 @@ static void acpi_processor_idle(void)
|
|||
* ------
|
||||
* Invoke the current Cx state to put the processor to sleep.
|
||||
*/
|
||||
if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
smp_mb__after_clear_bit();
|
||||
if (need_resched()) {
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
local_irq_enable();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
switch (cx->type) {
|
||||
|
||||
case ACPI_STATE_C1:
|
||||
|
@ -327,6 +333,7 @@ static void acpi_processor_idle(void)
|
|||
t2 = inl(acpi_fadt.xpm_tmr_blk.address);
|
||||
/* Re-enable interrupts */
|
||||
local_irq_enable();
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
/* Compute time (ticks) that we were actually asleep */
|
||||
sleep_ticks =
|
||||
ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
|
||||
|
@ -366,6 +373,7 @@ static void acpi_processor_idle(void)
|
|||
|
||||
/* Re-enable interrupts */
|
||||
local_irq_enable();
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
/* Compute time (ticks) that we were actually asleep */
|
||||
sleep_ticks =
|
||||
ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
|
||||
|
|
|
@ -567,6 +567,7 @@ struct cx88_board cx88_boards[] = {
|
|||
.radio_type = UNSET,
|
||||
.tuner_addr = ADDR_UNSET,
|
||||
.radio_addr = ADDR_UNSET,
|
||||
.tda9887_conf = TDA9887_PRESENT,
|
||||
.input = {{
|
||||
.type = CX88_VMUX_TELEVISION,
|
||||
.vmux = 0,
|
||||
|
@ -711,6 +712,7 @@ struct cx88_board cx88_boards[] = {
|
|||
.radio_type = UNSET,
|
||||
.tuner_addr = ADDR_UNSET,
|
||||
.radio_addr = ADDR_UNSET,
|
||||
.tda9887_conf = TDA9887_PRESENT,
|
||||
.input = {{
|
||||
.type = CX88_VMUX_TELEVISION,
|
||||
.vmux = 0,
|
||||
|
|
|
@ -1118,6 +1118,65 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
|
|||
return -1;
|
||||
}
|
||||
|
||||
int
|
||||
mpt_alt_ioc_wait(MPT_ADAPTER *ioc)
|
||||
{
|
||||
int loop_count = 30 * 4; /* Wait 30 seconds */
|
||||
int status = -1; /* -1 means failed to get board READY */
|
||||
|
||||
do {
|
||||
spin_lock(&ioc->initializing_hba_lock);
|
||||
if (ioc->initializing_hba_lock_flag == 0) {
|
||||
ioc->initializing_hba_lock_flag=1;
|
||||
spin_unlock(&ioc->initializing_hba_lock);
|
||||
status = 0;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&ioc->initializing_hba_lock);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(HZ/4);
|
||||
} while (--loop_count);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/*
|
||||
* mpt_bringup_adapter - This is a wrapper function for mpt_do_ioc_recovery
|
||||
* @ioc: Pointer to MPT adapter structure
|
||||
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
|
||||
*
|
||||
* This routine performs all the steps necessary to bring the IOC
|
||||
* to a OPERATIONAL state.
|
||||
*
|
||||
* Special Note: This function was added with spin lock's so as to allow
|
||||
* the dv(domain validation) work thread to succeed on the other channel
|
||||
* that maybe occuring at the same time when this function is called.
|
||||
* Without this lock, the dv would fail when message frames were
|
||||
* requested during hba bringup on the alternate ioc.
|
||||
*/
|
||||
static int
|
||||
mpt_bringup_adapter(MPT_ADAPTER *ioc, int sleepFlag)
|
||||
{
|
||||
int r;
|
||||
|
||||
if(ioc->alt_ioc) {
|
||||
if((r=mpt_alt_ioc_wait(ioc->alt_ioc)!=0))
|
||||
return r;
|
||||
}
|
||||
|
||||
r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
|
||||
CAN_SLEEP);
|
||||
|
||||
if(ioc->alt_ioc) {
|
||||
spin_lock(&ioc->alt_ioc->initializing_hba_lock);
|
||||
ioc->alt_ioc->initializing_hba_lock_flag=0;
|
||||
spin_unlock(&ioc->alt_ioc->initializing_hba_lock);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/*
|
||||
* mpt_attach - Install a PCI intelligent MPT adapter.
|
||||
|
@ -1186,6 +1245,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
ioc->pcidev = pdev;
|
||||
ioc->diagPending = 0;
|
||||
spin_lock_init(&ioc->diagLock);
|
||||
spin_lock_init(&ioc->initializing_hba_lock);
|
||||
|
||||
/* Initialize the event logging.
|
||||
*/
|
||||
|
@ -1408,8 +1468,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
*/
|
||||
mpt_detect_bound_ports(ioc, pdev);
|
||||
|
||||
if ((r = mpt_do_ioc_recovery(ioc,
|
||||
MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0) {
|
||||
if ((r = mpt_bringup_adapter(ioc, CAN_SLEEP)) != 0){
|
||||
printk(KERN_WARNING MYNAM
|
||||
": WARNING - %s did not initialize properly! (%d)\n",
|
||||
ioc->name, r);
|
||||
|
@ -6298,6 +6357,7 @@ EXPORT_SYMBOL(mpt_read_ioc_pg_3);
|
|||
EXPORT_SYMBOL(mpt_alloc_fw_memory);
|
||||
EXPORT_SYMBOL(mpt_free_fw_memory);
|
||||
EXPORT_SYMBOL(mptbase_sas_persist_operation);
|
||||
EXPORT_SYMBOL(mpt_alt_ioc_wait);
|
||||
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
|
|
|
@ -611,6 +611,8 @@ typedef struct _MPT_ADAPTER
|
|||
int DoneCtx;
|
||||
int TaskCtx;
|
||||
int InternalCtx;
|
||||
spinlock_t initializing_hba_lock;
|
||||
int initializing_hba_lock_flag;
|
||||
struct list_head list;
|
||||
struct net_device *netdev;
|
||||
struct list_head sas_topology;
|
||||
|
@ -1001,6 +1003,7 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
|
|||
extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
|
||||
extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
|
||||
extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
|
||||
extern int mpt_alt_ioc_wait(MPT_ADAPTER *ioc);
|
||||
|
||||
/*
|
||||
* Public data decl's...
|
||||
|
|
|
@ -4162,6 +4162,12 @@ mptscsih_domainValidation(void *arg)
|
|||
}
|
||||
}
|
||||
|
||||
if(mpt_alt_ioc_wait(hd->ioc)!=0) {
|
||||
ddvprintk((MYIOC_s_WARN_FMT "alt_ioc busy!\n",
|
||||
hd->ioc->name));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (mptscsih_doDv(hd, 0, id) == 1) {
|
||||
/* Untagged device was busy, try again
|
||||
*/
|
||||
|
@ -4173,6 +4179,10 @@ mptscsih_domainValidation(void *arg)
|
|||
hd->ioc->spi_data.dvStatus[id] &= ~(MPT_SCSICFG_DV_NOT_DONE | MPT_SCSICFG_DV_PENDING);
|
||||
}
|
||||
|
||||
spin_lock(&hd->ioc->initializing_hba_lock);
|
||||
hd->ioc->initializing_hba_lock_flag=0;
|
||||
spin_unlock(&hd->ioc->initializing_hba_lock);
|
||||
|
||||
if (isPhysDisk) {
|
||||
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
|
||||
if (hd->ioc->raid_data.isRaid & (1 << ii)) {
|
||||
|
|
|
@ -996,6 +996,20 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
|
|||
spin_lock_init(&adapter->fsf_req_list_lock);
|
||||
INIT_LIST_HEAD(&adapter->fsf_req_list_head);
|
||||
|
||||
/* initialize debug locks */
|
||||
|
||||
spin_lock_init(&adapter->erp_dbf_lock);
|
||||
spin_lock_init(&adapter->hba_dbf_lock);
|
||||
spin_lock_init(&adapter->san_dbf_lock);
|
||||
spin_lock_init(&adapter->scsi_dbf_lock);
|
||||
|
||||
/* initialize error recovery stuff */
|
||||
|
||||
rwlock_init(&adapter->erp_lock);
|
||||
sema_init(&adapter->erp_ready_sem, 0);
|
||||
INIT_LIST_HEAD(&adapter->erp_ready_head);
|
||||
INIT_LIST_HEAD(&adapter->erp_running_head);
|
||||
|
||||
/* initialize abort lock */
|
||||
rwlock_init(&adapter->abort_lock);
|
||||
|
||||
|
|
|
@ -926,7 +926,6 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
|
|||
char dbf_name[DEBUG_MAX_NAME_LEN];
|
||||
|
||||
/* debug feature area which records recovery activity */
|
||||
spin_lock_init(&adapter->erp_dbf_lock);
|
||||
sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter));
|
||||
adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2,
|
||||
sizeof(struct zfcp_erp_dbf_record));
|
||||
|
@ -936,7 +935,6 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
|
|||
debug_set_level(adapter->erp_dbf, 3);
|
||||
|
||||
/* debug feature area which records HBA (FSF and QDIO) conditions */
|
||||
spin_lock_init(&adapter->hba_dbf_lock);
|
||||
sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
|
||||
adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
|
||||
sizeof(struct zfcp_hba_dbf_record));
|
||||
|
@ -947,7 +945,6 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
|
|||
debug_set_level(adapter->hba_dbf, 3);
|
||||
|
||||
/* debug feature area which records SAN command failures and recovery */
|
||||
spin_lock_init(&adapter->san_dbf_lock);
|
||||
sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
|
||||
adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
|
||||
sizeof(struct zfcp_san_dbf_record));
|
||||
|
@ -958,7 +955,6 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
|
|||
debug_set_level(adapter->san_dbf, 6);
|
||||
|
||||
/* debug feature area which records SCSI command failures and recovery */
|
||||
spin_lock_init(&adapter->scsi_dbf_lock);
|
||||
sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
|
||||
adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
|
||||
sizeof(struct zfcp_scsi_dbf_record));
|
||||
|
|
|
@ -1071,11 +1071,6 @@ zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
|
|||
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
|
||||
|
||||
rwlock_init(&adapter->erp_lock);
|
||||
INIT_LIST_HEAD(&adapter->erp_ready_head);
|
||||
INIT_LIST_HEAD(&adapter->erp_running_head);
|
||||
sema_init(&adapter->erp_ready_sem, 0);
|
||||
|
||||
retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
|
||||
if (retval < 0) {
|
||||
ZFCP_LOG_NORMAL("error: creation of erp thread failed for "
|
||||
|
@ -2248,29 +2243,26 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
|
|||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* function: zfcp_fsf_init
|
||||
*
|
||||
* purpose: initializes FSF operation for the specified adapter
|
||||
*
|
||||
* returns: 0 - succesful initialization of FSF operation
|
||||
* !0 - failed to initialize FSF operation
|
||||
*/
|
||||
static int
|
||||
zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int xconfig, xport;
|
||||
int retval;
|
||||
|
||||
if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&erp_action->adapter->status)) {
|
||||
if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&erp_action->adapter->status)) &&
|
||||
(erp_action->adapter->adapter_features &
|
||||
FSF_FEATURE_HBAAPI_MANAGEMENT)) {
|
||||
zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
|
||||
atomic_set(&erp_action->adapter->erp_counter, 0);
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
||||
xconfig = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
|
||||
xport = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
|
||||
if ((xconfig == ZFCP_ERP_FAILED) || (xport == ZFCP_ERP_FAILED))
|
||||
retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
|
||||
if (retval == ZFCP_ERP_FAILED)
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
||||
retval = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
|
||||
if (retval == ZFCP_ERP_FAILED)
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
||||
return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
|
||||
|
@ -2359,41 +2351,29 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
|
|||
static int
|
||||
zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int retval = ZFCP_ERP_SUCCEEDED;
|
||||
int ret;
|
||||
int retries;
|
||||
int sleep;
|
||||
struct zfcp_adapter *adapter = erp_action->adapter;
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
|
||||
|
||||
for (retries = 0; ; retries++) {
|
||||
ZFCP_LOG_DEBUG("Doing exchange port data\n");
|
||||
retries = 0;
|
||||
do {
|
||||
write_lock(&adapter->erp_lock);
|
||||
zfcp_erp_action_to_running(erp_action);
|
||||
write_unlock(&adapter->erp_lock);
|
||||
zfcp_erp_timeout_init(erp_action);
|
||||
if (zfcp_fsf_exchange_port_data(erp_action, adapter, NULL)) {
|
||||
retval = ZFCP_ERP_FAILED;
|
||||
debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
|
||||
ZFCP_LOG_INFO("error: initiation of exchange of "
|
||||
"port data failed for adapter %s\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
break;
|
||||
ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
|
||||
return ZFCP_ERP_SUCCEEDED;
|
||||
} else if (ret) {
|
||||
debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
|
||||
ZFCP_LOG_DEBUG("Xchange underway\n");
|
||||
debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
|
||||
|
||||
/*
|
||||
* Why this works:
|
||||
* Both the normal completion handler as well as the timeout
|
||||
* handler will do an 'up' when the 'exchange port data'
|
||||
* request completes or times out. Thus, the signal to go on
|
||||
* won't be lost utilizing this semaphore.
|
||||
* Furthermore, this 'adapter_reopen' action is
|
||||
* guaranteed to be the only action being there (highest action
|
||||
* which prevents other actions from being created).
|
||||
* Resulting from that, the wake signal recognized here
|
||||
* _must_ be the one belonging to the 'exchange port
|
||||
* data' request.
|
||||
*/
|
||||
down(&adapter->erp_ready_sem);
|
||||
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
|
||||
ZFCP_LOG_INFO("error: exchange of port data "
|
||||
|
@ -2401,29 +2381,19 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
|
|||
zfcp_get_busid_by_adapter(adapter));
|
||||
break;
|
||||
}
|
||||
|
||||
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&adapter->status))
|
||||
break;
|
||||
|
||||
ZFCP_LOG_DEBUG("host connection still initialising... "
|
||||
"waiting and retrying...\n");
|
||||
/* sleep a little bit before retry */
|
||||
sleep = retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES ?
|
||||
ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP :
|
||||
ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP;
|
||||
msleep(jiffies_to_msecs(sleep));
|
||||
}
|
||||
if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) {
|
||||
sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP;
|
||||
retries++;
|
||||
} else
|
||||
sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP;
|
||||
schedule_timeout(sleep);
|
||||
} while (1);
|
||||
|
||||
if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&adapter->status)) {
|
||||
ZFCP_LOG_INFO("error: exchange of port data for "
|
||||
"adapter %s failed\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
retval = ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
||||
return retval;
|
||||
return ZFCP_ERP_SUCCEEDED;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -554,6 +554,17 @@ static void
|
|||
zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
|
||||
struct fsf_link_down_info *link_down)
|
||||
{
|
||||
if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&adapter->status))
|
||||
return;
|
||||
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
|
||||
|
||||
if (link_down == NULL) {
|
||||
zfcp_erp_adapter_reopen(adapter, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (link_down->error_code) {
|
||||
case FSF_PSQ_LINK_NO_LIGHT:
|
||||
ZFCP_LOG_NORMAL("The local link to adapter %s is down "
|
||||
|
@ -634,20 +645,15 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
|
|||
link_down->explanation_code,
|
||||
link_down->vendor_specific_code);
|
||||
|
||||
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&adapter->status)) {
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
|
||||
&adapter->status);
|
||||
switch (link_down->error_code) {
|
||||
case FSF_PSQ_LINK_NO_LIGHT:
|
||||
case FSF_PSQ_LINK_WRAP_PLUG:
|
||||
case FSF_PSQ_LINK_NO_FCP:
|
||||
case FSF_PSQ_LINK_FIRMWARE_UPDATE:
|
||||
zfcp_erp_adapter_reopen(adapter, 0);
|
||||
break;
|
||||
default:
|
||||
zfcp_erp_adapter_failed(adapter);
|
||||
}
|
||||
switch (link_down->error_code) {
|
||||
case FSF_PSQ_LINK_NO_LIGHT:
|
||||
case FSF_PSQ_LINK_WRAP_PLUG:
|
||||
case FSF_PSQ_LINK_NO_FCP:
|
||||
case FSF_PSQ_LINK_FIRMWARE_UPDATE:
|
||||
zfcp_erp_adapter_reopen(adapter, 0);
|
||||
break;
|
||||
default:
|
||||
zfcp_erp_adapter_failed(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -919,30 +925,36 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
|
|||
case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
|
||||
ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
zfcp_fsf_link_down_info_eval(adapter,
|
||||
(struct fsf_link_down_info *)
|
||||
&status_buffer->payload);
|
||||
break;
|
||||
case FSF_STATUS_READ_SUB_FDISC_FAILED:
|
||||
ZFCP_LOG_INFO("Local link to adapter %s is down "
|
||||
"due to failed FDISC login\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
zfcp_fsf_link_down_info_eval(adapter,
|
||||
(struct fsf_link_down_info *)
|
||||
&status_buffer->payload);
|
||||
break;
|
||||
case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
|
||||
ZFCP_LOG_INFO("Local link to adapter %s is down "
|
||||
"due to firmware update on adapter\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
zfcp_fsf_link_down_info_eval(adapter, NULL);
|
||||
break;
|
||||
default:
|
||||
ZFCP_LOG_INFO("Local link to adapter %s is down "
|
||||
"due to unknown reason\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
zfcp_fsf_link_down_info_eval(adapter, NULL);
|
||||
};
|
||||
zfcp_fsf_link_down_info_eval(adapter,
|
||||
(struct fsf_link_down_info *) &status_buffer->payload);
|
||||
break;
|
||||
|
||||
case FSF_STATUS_READ_LINK_UP:
|
||||
ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. "
|
||||
"Restarting operations on this adapter\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
"Restarting operations on this adapter\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
/* All ports should be marked as ready to run again */
|
||||
zfcp_erp_modify_adapter_status(adapter,
|
||||
ZFCP_STATUS_COMMON_RUNNING,
|
||||
|
@ -2191,13 +2203,10 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
|
||||
if (!timer)
|
||||
return -ENOMEM;
|
||||
|
||||
/* setup new FSF request */
|
||||
retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
|
||||
0, 0, &lock_flags, &fsf_req);
|
||||
erp_action ? ZFCP_REQ_AUTO_CLEANUP : 0,
|
||||
0, &lock_flags, &fsf_req);
|
||||
if (retval < 0) {
|
||||
ZFCP_LOG_INFO("error: Out of resources. Could not create an "
|
||||
"exchange port data request for"
|
||||
|
@ -2205,25 +2214,33 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
|
|||
zfcp_get_busid_by_adapter(adapter));
|
||||
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
|
||||
lock_flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (erp_action) {
|
||||
erp_action->fsf_req = fsf_req;
|
||||
fsf_req->erp_action = erp_action;
|
||||
return retval;
|
||||
}
|
||||
|
||||
if (data)
|
||||
fsf_req->data = (unsigned long) data;
|
||||
fsf_req->data = (unsigned long) data;
|
||||
|
||||
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
|
||||
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
|
||||
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
|
||||
|
||||
init_timer(timer);
|
||||
timer->function = zfcp_fsf_request_timeout_handler;
|
||||
timer->data = (unsigned long) adapter;
|
||||
timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
|
||||
if (erp_action) {
|
||||
erp_action->fsf_req = fsf_req;
|
||||
fsf_req->erp_action = erp_action;
|
||||
timer = &erp_action->timer;
|
||||
} else {
|
||||
timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC);
|
||||
if (!timer) {
|
||||
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
|
||||
lock_flags);
|
||||
zfcp_fsf_req_free(fsf_req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
init_timer(timer);
|
||||
timer->function = zfcp_fsf_request_timeout_handler;
|
||||
timer->data = (unsigned long) adapter;
|
||||
timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
|
||||
}
|
||||
|
||||
retval = zfcp_fsf_req_send(fsf_req, timer);
|
||||
if (retval) {
|
||||
|
@ -2233,23 +2250,22 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
|
|||
zfcp_fsf_req_free(fsf_req);
|
||||
if (erp_action)
|
||||
erp_action->fsf_req = NULL;
|
||||
else
|
||||
kfree(timer);
|
||||
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
|
||||
lock_flags);
|
||||
goto out;
|
||||
return retval;
|
||||
}
|
||||
|
||||
ZFCP_LOG_DEBUG("Exchange Port Data request initiated (adapter %s)\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
|
||||
|
||||
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
|
||||
lock_flags);
|
||||
|
||||
wait_event(fsf_req->completion_wq,
|
||||
fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
|
||||
del_timer_sync(timer);
|
||||
zfcp_fsf_req_free(fsf_req);
|
||||
out:
|
||||
kfree(timer);
|
||||
if (!erp_action) {
|
||||
wait_event(fsf_req->completion_wq,
|
||||
fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
|
||||
del_timer_sync(timer);
|
||||
zfcp_fsf_req_free(fsf_req);
|
||||
kfree(timer);
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp)
|
|||
struct zfcp_adapter *adapter;
|
||||
struct zfcp_unit *unit;
|
||||
unsigned long flags;
|
||||
int retval = -ENODEV;
|
||||
int retval = -ENXIO;
|
||||
|
||||
adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
|
||||
if (!adapter)
|
||||
|
|
|
@ -325,6 +325,8 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
|
|||
* translations ( 64/32, 128/32, 255/63 ).
|
||||
*/
|
||||
buf = scsi_bios_ptable(bdev);
|
||||
if (!buf)
|
||||
return 0;
|
||||
if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
|
||||
struct partition *first = (struct partition * )buf;
|
||||
struct partition *entry = first;
|
||||
|
|
|
@ -2105,7 +2105,7 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
scmd_id(cmd),
|
||||
scmd_channel(cmd) + 'A',
|
||||
CAM_LUN_WILDCARD,
|
||||
SCB_LIST_NULL, ROLE_INITIATOR) == 0)
|
||||
SCB_LIST_NULL, ROLE_INITIATOR))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2169,7 +2169,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
|
||||
scmd_channel(cmd) + 'A',
|
||||
CAM_LUN_WILDCARD,
|
||||
SCB_LIST_NULL, ROLE_INITIATOR) == 0)
|
||||
SCB_LIST_NULL, ROLE_INITIATOR))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2239,7 +2239,7 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
|
|||
struct scsi_cmnd *cmd = qc->scsicmd;
|
||||
|
||||
if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
|
||||
return 1;
|
||||
goto invalid_fld;
|
||||
|
||||
/*
|
||||
* 12 and 16 byte CDBs use different offsets to
|
||||
|
@ -2301,7 +2301,7 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
|
|||
*/
|
||||
if ((tf->command == ATA_CMD_SET_FEATURES)
|
||||
&& (tf->feature == SETFEATURES_XFER))
|
||||
return 1;
|
||||
goto invalid_fld;
|
||||
|
||||
/*
|
||||
* Set flags so that all registers will be written,
|
||||
|
@ -2322,6 +2322,11 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
|
|||
qc->nsect = cmd->bufflen / ATA_SECT_SIZE;
|
||||
|
||||
return 0;
|
||||
|
||||
invalid_fld:
|
||||
ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00);
|
||||
/* "Invalid field in cdb" */
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -542,17 +542,10 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
|
|||
|
||||
void scsi_next_command(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_device *sdev = cmd->device;
|
||||
struct request_queue *q = sdev->request_queue;
|
||||
|
||||
/* need to hold a reference on the device before we let go of the cmd */
|
||||
get_device(&sdev->sdev_gendev);
|
||||
struct request_queue *q = cmd->device->request_queue;
|
||||
|
||||
scsi_put_command(cmd);
|
||||
scsi_run_queue(q);
|
||||
|
||||
/* ok to remove device now */
|
||||
put_device(&sdev->sdev_gendev);
|
||||
}
|
||||
|
||||
void scsi_run_host_queues(struct Scsi_Host *shost)
|
||||
|
|
|
@ -812,12 +812,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
|
|||
if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev))
|
||||
return;
|
||||
|
||||
/* see if the device has an echo buffer. If it does we can
|
||||
* do the SPI pattern write tests */
|
||||
|
||||
len = 0;
|
||||
if (scsi_device_dt(sdev))
|
||||
len = spi_dv_device_get_echo_buffer(sdev, buffer);
|
||||
/* len == -1 is the signal that we need to ascertain the
|
||||
* presence of an echo buffer before trying to use it. len ==
|
||||
* 0 means we don't have an echo buffer */
|
||||
len = -1;
|
||||
|
||||
retry:
|
||||
|
||||
|
@ -840,11 +838,23 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
|
|||
if (spi_min_period(starget) == 8)
|
||||
DV_SET(pcomp_en, 1);
|
||||
}
|
||||
/* Do the read only INQUIRY tests */
|
||||
spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
|
||||
spi_dv_device_compare_inquiry);
|
||||
/* See if we actually managed to negotiate and sustain DT */
|
||||
if (i->f->get_dt)
|
||||
i->f->get_dt(starget);
|
||||
|
||||
if (len == 0) {
|
||||
/* see if the device has an echo buffer. If it does we can do
|
||||
* the SPI pattern write tests. Because of some broken
|
||||
* devices, we *only* try this on a device that has actually
|
||||
* negotiated DT */
|
||||
|
||||
if (len == -1 && spi_dt(starget))
|
||||
len = spi_dv_device_get_echo_buffer(sdev, buffer);
|
||||
|
||||
if (len <= 0) {
|
||||
starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n");
|
||||
spi_dv_retrain(sdev, buffer, buffer + len,
|
||||
spi_dv_device_compare_inquiry);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1860,9 +1860,11 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
|
|||
unlock_page(pages[j]); */
|
||||
res = 0;
|
||||
out_unmap:
|
||||
if (res > 0)
|
||||
if (res > 0) {
|
||||
for (j=0; j < res; j++)
|
||||
page_cache_release(pages[j]);
|
||||
res = 0;
|
||||
}
|
||||
kfree(pages);
|
||||
return res;
|
||||
}
|
||||
|
@ -1878,8 +1880,6 @@ st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
|
|||
for (i=0; i < nr_pages; i++) {
|
||||
struct page *page = sgl[i].page;
|
||||
|
||||
/* XXX: just for debug. Remove when PageReserved is removed */
|
||||
BUG_ON(PageReserved(page));
|
||||
if (dirtied)
|
||||
SetPageDirty(page);
|
||||
/* unlock_page(page); */
|
||||
|
|
|
@ -4509,6 +4509,7 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
|
|||
if (res > 0) {
|
||||
for (j=0; j < res; j++)
|
||||
page_cache_release(pages[j]);
|
||||
res = 0;
|
||||
}
|
||||
kfree(pages);
|
||||
return res;
|
||||
|
@ -4524,8 +4525,6 @@ static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_p
|
|||
for (i=0; i < nr_pages; i++) {
|
||||
struct page *page = sgl[i].page;
|
||||
|
||||
/* XXX: just for debug. Remove when PageReserved is removed */
|
||||
BUG_ON(PageReserved(page));
|
||||
if (dirtied)
|
||||
SetPageDirty(page);
|
||||
/* FIXME: cache flush missing for rw==READ
|
||||
|
|
|
@ -2086,6 +2086,7 @@ static void sym2_set_dt(struct scsi_target *starget, int dt)
|
|||
tp->tgoal.check_nego = 1;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void sym2_set_iu(struct scsi_target *starget, int iu)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
|
||||
|
@ -2111,7 +2112,7 @@ static void sym2_set_qas(struct scsi_target *starget, int qas)
|
|||
tp->tgoal.qas = 0;
|
||||
tp->tgoal.check_nego = 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static struct spi_function_template sym2_transport_functions = {
|
||||
.set_offset = sym2_set_offset,
|
||||
|
@ -2122,10 +2123,12 @@ static struct spi_function_template sym2_transport_functions = {
|
|||
.show_width = 1,
|
||||
.set_dt = sym2_set_dt,
|
||||
.show_dt = 1,
|
||||
#if 0
|
||||
.set_iu = sym2_set_iu,
|
||||
.show_iu = 1,
|
||||
.set_qas = sym2_set_qas,
|
||||
.show_qas = 1,
|
||||
#endif
|
||||
.get_signalling = sym2_get_signalling,
|
||||
};
|
||||
|
||||
|
|
|
@ -1287,6 +1287,7 @@ dentry->d_parent->d_name.name, dentry->d_name.name);
|
|||
nfs_begin_data_update(dentry->d_inode);
|
||||
error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
|
||||
dir, &qsilly);
|
||||
nfs_mark_for_revalidate(dentry->d_inode);
|
||||
nfs_end_data_update(dentry->d_inode);
|
||||
} else
|
||||
error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
|
||||
|
@ -1334,6 +1335,7 @@ static int nfs_safe_remove(struct dentry *dentry)
|
|||
/* The VFS may want to delete this inode */
|
||||
if (error == 0)
|
||||
inode->i_nlink--;
|
||||
nfs_mark_for_revalidate(inode);
|
||||
nfs_end_data_update(inode);
|
||||
} else
|
||||
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
|
||||
|
@ -1556,6 +1558,7 @@ go_ahead:
|
|||
nfs_begin_data_update(old_inode);
|
||||
error = NFS_PROTO(old_dir)->rename(old_dir, &old_dentry->d_name,
|
||||
new_dir, &new_dentry->d_name);
|
||||
nfs_mark_for_revalidate(old_inode);
|
||||
nfs_end_data_update(old_inode);
|
||||
nfs_end_data_update(new_dir);
|
||||
nfs_end_data_update(old_dir);
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
|
||||
|
||||
static void nfs_invalidate_inode(struct inode *);
|
||||
static int nfs_update_inode(struct inode *, struct nfs_fattr *, unsigned long);
|
||||
static int nfs_update_inode(struct inode *, struct nfs_fattr *);
|
||||
|
||||
static struct inode *nfs_alloc_inode(struct super_block *sb);
|
||||
static void nfs_destroy_inode(struct inode *);
|
||||
|
@ -1080,8 +1080,6 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
int status = -ESTALE;
|
||||
struct nfs_fattr fattr;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
unsigned long verifier;
|
||||
unsigned long cache_validity;
|
||||
|
||||
dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
|
||||
inode->i_sb->s_id, (long long)NFS_FILEID(inode));
|
||||
|
@ -1106,8 +1104,6 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
}
|
||||
}
|
||||
|
||||
/* Protect against RPC races by saving the change attribute */
|
||||
verifier = nfs_save_change_attribute(inode);
|
||||
status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), &fattr);
|
||||
if (status != 0) {
|
||||
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n",
|
||||
|
@ -1122,7 +1118,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
status = nfs_update_inode(inode, &fattr, verifier);
|
||||
status = nfs_update_inode(inode, &fattr);
|
||||
if (status) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
|
||||
|
@ -1130,20 +1126,11 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
(long long)NFS_FILEID(inode), status);
|
||||
goto out;
|
||||
}
|
||||
cache_validity = nfsi->cache_validity;
|
||||
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
|
||||
|
||||
/*
|
||||
* We may need to keep the attributes marked as invalid if
|
||||
* we raced with nfs_end_attr_update().
|
||||
*/
|
||||
if (time_after_eq(verifier, nfsi->cache_change_attribute))
|
||||
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
nfs_revalidate_mapping(inode, inode->i_mapping);
|
||||
|
||||
if (cache_validity & NFS_INO_INVALID_ACL)
|
||||
if (nfsi->cache_validity & NFS_INO_INVALID_ACL)
|
||||
nfs_zap_acl_cache(inode);
|
||||
|
||||
dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n",
|
||||
|
@ -1346,10 +1333,8 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
return 0;
|
||||
spin_lock(&inode->i_lock);
|
||||
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
|
||||
if (nfs_verify_change_attribute(inode, fattr->time_start))
|
||||
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
|
||||
if (time_after(fattr->time_start, nfsi->last_updated))
|
||||
status = nfs_update_inode(inode, fattr, fattr->time_start);
|
||||
status = nfs_update_inode(inode, fattr);
|
||||
else
|
||||
status = nfs_check_inode_attributes(inode, fattr);
|
||||
|
||||
|
@ -1375,10 +1360,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
nfsi->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
|
||||
goto out;
|
||||
}
|
||||
status = nfs_update_inode(inode, fattr, fattr->time_start);
|
||||
if (time_after_eq(fattr->time_start, nfsi->cache_change_attribute))
|
||||
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE);
|
||||
nfsi->cache_change_attribute = jiffies;
|
||||
status = nfs_update_inode(inode, fattr);
|
||||
out:
|
||||
spin_unlock(&inode->i_lock);
|
||||
return status;
|
||||
|
@ -1396,12 +1378,12 @@ out:
|
|||
*
|
||||
* A very similar scenario holds for the dir cache.
|
||||
*/
|
||||
static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsigned long verifier)
|
||||
static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
loff_t cur_isize, new_isize;
|
||||
unsigned int invalid = 0;
|
||||
int data_unstable;
|
||||
int data_stable;
|
||||
|
||||
dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
|
||||
__FUNCTION__, inode->i_sb->s_id, inode->i_ino,
|
||||
|
@ -1432,8 +1414,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
|
|||
nfsi->last_updated = jiffies;
|
||||
|
||||
/* Are we racing with known updates of the metadata on the server? */
|
||||
data_unstable = ! (nfs_verify_change_attribute(inode, verifier) ||
|
||||
(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE));
|
||||
data_stable = nfs_verify_change_attribute(inode, fattr->time_start);
|
||||
if (data_stable)
|
||||
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
|
||||
|
||||
/* Check if our cached file size is stale */
|
||||
new_isize = nfs_size_to_loff_t(fattr->size);
|
||||
|
@ -1442,7 +1425,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
|
|||
/* Do we perhaps have any outstanding writes? */
|
||||
if (nfsi->npages == 0) {
|
||||
/* No, but did we race with nfs_end_data_update()? */
|
||||
if (time_after_eq(verifier, nfsi->cache_change_attribute)) {
|
||||
if (data_stable) {
|
||||
inode->i_size = new_isize;
|
||||
invalid |= NFS_INO_INVALID_DATA;
|
||||
}
|
||||
|
@ -1451,6 +1434,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
|
|||
inode->i_size = new_isize;
|
||||
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
|
||||
}
|
||||
nfsi->cache_change_attribute = jiffies;
|
||||
dprintk("NFS: isize change on server for file %s/%ld\n",
|
||||
inode->i_sb->s_id, inode->i_ino);
|
||||
}
|
||||
|
@ -1460,8 +1444,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
|
|||
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
|
||||
dprintk("NFS: mtime change on server for file %s/%ld\n",
|
||||
inode->i_sb->s_id, inode->i_ino);
|
||||
if (!data_unstable)
|
||||
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
|
||||
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
|
||||
nfsi->cache_change_attribute = jiffies;
|
||||
}
|
||||
|
||||
if ((fattr->valid & NFS_ATTR_FATTR_V4)
|
||||
|
@ -1469,15 +1453,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
|
|||
dprintk("NFS: change_attr change on server for file %s/%ld\n",
|
||||
inode->i_sb->s_id, inode->i_ino);
|
||||
nfsi->change_attr = fattr->change_attr;
|
||||
if (!data_unstable)
|
||||
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
||||
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
||||
nfsi->cache_change_attribute = jiffies;
|
||||
}
|
||||
|
||||
/* If ctime has changed we should definitely clear access+acl caches */
|
||||
if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
|
||||
if (!data_unstable)
|
||||
invalid |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
||||
invalid |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
||||
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
|
||||
nfsi->cache_change_attribute = jiffies;
|
||||
}
|
||||
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
|
||||
|
||||
|
@ -1515,6 +1499,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
|
|||
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|
||||
|| S_ISLNK(inode->i_mode)))
|
||||
invalid &= ~NFS_INO_INVALID_DATA;
|
||||
if (data_stable)
|
||||
invalid &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE);
|
||||
if (!nfs_have_delegation(inode, FMODE_READ))
|
||||
nfsi->cache_validity |= invalid;
|
||||
|
||||
|
@ -2066,6 +2052,7 @@ static struct inode *nfs_alloc_inode(struct super_block *sb)
|
|||
return NULL;
|
||||
nfsi->flags = 0UL;
|
||||
nfsi->cache_validity = 0UL;
|
||||
nfsi->cache_change_attribute = jiffies;
|
||||
#ifdef CONFIG_NFS_V3_ACL
|
||||
nfsi->acl_access = ERR_PTR(-EAGAIN);
|
||||
nfsi->acl_default = ERR_PTR(-EAGAIN);
|
||||
|
|
|
@ -1506,10 +1506,15 @@ static int _nfs4_proc_write(struct nfs_write_data *wdata)
|
|||
dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
|
||||
(long long) wdata->args.offset);
|
||||
|
||||
wdata->args.bitmask = server->attr_bitmask;
|
||||
wdata->res.server = server;
|
||||
nfs_fattr_init(fattr);
|
||||
status = rpc_call_sync(server->client, &msg, rpcflags);
|
||||
dprintk("NFS reply write: %d\n", status);
|
||||
return status;
|
||||
if (status < 0)
|
||||
return status;
|
||||
nfs_post_op_update_inode(inode, fattr);
|
||||
return wdata->res.count;
|
||||
}
|
||||
|
||||
static int nfs4_proc_write(struct nfs_write_data *wdata)
|
||||
|
@ -1540,9 +1545,13 @@ static int _nfs4_proc_commit(struct nfs_write_data *cdata)
|
|||
dprintk("NFS call commit %d @ %Ld\n", cdata->args.count,
|
||||
(long long) cdata->args.offset);
|
||||
|
||||
cdata->args.bitmask = server->attr_bitmask;
|
||||
cdata->res.server = server;
|
||||
nfs_fattr_init(fattr);
|
||||
status = rpc_call_sync(server->client, &msg, 0);
|
||||
dprintk("NFS reply commit: %d\n", status);
|
||||
if (status >= 0)
|
||||
nfs_post_op_update_inode(inode, fattr);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -375,6 +375,7 @@ nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
|
|||
|
||||
dprintk("NFS call link %s\n", name->name);
|
||||
status = rpc_call(NFS_CLIENT(inode), NFSPROC_LINK, &arg, NULL, 0);
|
||||
nfs_mark_for_revalidate(inode);
|
||||
nfs_mark_for_revalidate(dir);
|
||||
dprintk("NFS reply link: %d\n", status);
|
||||
return status;
|
||||
|
|
|
@ -189,6 +189,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
|
|||
(long long)NFS_FILEID(inode),
|
||||
count, (long long)(page_offset(page) + offset));
|
||||
|
||||
set_page_writeback(page);
|
||||
nfs_begin_data_update(inode);
|
||||
do {
|
||||
if (count < wsize)
|
||||
|
@ -221,6 +222,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
|
|||
|
||||
io_error:
|
||||
nfs_end_data_update(inode);
|
||||
end_page_writeback(page);
|
||||
nfs_writedata_free(wdata);
|
||||
return written ? written : result;
|
||||
}
|
||||
|
@ -929,7 +931,7 @@ static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
|
|||
atomic_set(&req->wb_complete, requests);
|
||||
|
||||
ClearPageError(page);
|
||||
SetPageWriteback(page);
|
||||
set_page_writeback(page);
|
||||
offset = 0;
|
||||
nbytes = req->wb_bytes;
|
||||
do {
|
||||
|
@ -992,7 +994,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
|
|||
nfs_list_remove_request(req);
|
||||
nfs_list_add_request(req, &data->pages);
|
||||
ClearPageError(req->wb_page);
|
||||
SetPageWriteback(req->wb_page);
|
||||
set_page_writeback(req->wb_page);
|
||||
*pages++ = req->wb_page;
|
||||
count += req->wb_bytes;
|
||||
}
|
||||
|
|
|
@ -1223,7 +1223,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
|
|||
return -EINVAL;
|
||||
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vm_insert_page);
|
||||
EXPORT_SYMBOL(vm_insert_page);
|
||||
|
||||
/*
|
||||
* Somebody does a pfn remapping that doesn't actually work as a vma.
|
||||
|
|
|
@ -1772,16 +1772,16 @@ static int __devinit zone_batchsize(struct zone *zone)
|
|||
batch = 1;
|
||||
|
||||
/*
|
||||
* We will be trying to allcoate bigger chunks of contiguous
|
||||
* memory of the order of fls(batch). This should result in
|
||||
* better cache coloring.
|
||||
* Clamp the batch to a 2^n - 1 value. Having a power
|
||||
* of 2 value was found to be more likely to have
|
||||
* suboptimal cache aliasing properties in some cases.
|
||||
*
|
||||
* A sanity check also to ensure that batch is still in limits.
|
||||
* For example if 2 tasks are alternately allocating
|
||||
* batches of pages, one task can end up with a lot
|
||||
* of pages of one half of the possible page colors
|
||||
* and the other with pages of the other colors.
|
||||
*/
|
||||
batch = (1 << fls(batch + batch/2));
|
||||
|
||||
if (fls(batch) >= (PAGE_SHIFT + MAX_ORDER - 2))
|
||||
batch = PAGE_SHIFT + ((MAX_ORDER - 1 - PAGE_SHIFT)/2);
|
||||
batch = (1 << (fls(batch + batch/2)-1)) - 1;
|
||||
|
||||
return batch;
|
||||
}
|
||||
|
|
|
@ -228,13 +228,14 @@ static int inet_create(struct socket *sock, int protocol)
|
|||
unsigned char answer_flags;
|
||||
char answer_no_check;
|
||||
int try_loading_module = 0;
|
||||
int err = -ESOCKTNOSUPPORT;
|
||||
int err;
|
||||
|
||||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
/* Look for the requested type/protocol pair. */
|
||||
answer = NULL;
|
||||
lookup_protocol:
|
||||
err = -ESOCKTNOSUPPORT;
|
||||
rcu_read_lock();
|
||||
list_for_each_rcu(p, &inetsw[sock->type]) {
|
||||
answer = list_entry(p, struct inet_protosw, list);
|
||||
|
@ -252,6 +253,7 @@ lookup_protocol:
|
|||
if (IPPROTO_IP == answer->protocol)
|
||||
break;
|
||||
}
|
||||
err = -EPROTONOSUPPORT;
|
||||
answer = NULL;
|
||||
}
|
||||
|
||||
|
@ -280,9 +282,6 @@ lookup_protocol:
|
|||
err = -EPERM;
|
||||
if (answer->capability > 0 && !capable(answer->capability))
|
||||
goto out_rcu_unlock;
|
||||
err = -EPROTONOSUPPORT;
|
||||
if (!protocol)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
sock->ops = answer->ops;
|
||||
answer_prot = answer->prot;
|
||||
|
|
|
@ -897,7 +897,10 @@ int igmp_rcv(struct sk_buff *skb)
|
|||
/* Is it our report looped back? */
|
||||
if (((struct rtable*)skb->dst)->fl.iif == 0)
|
||||
break;
|
||||
igmp_heard_report(in_dev, ih->group);
|
||||
/* don't rely on MC router hearing unicast reports */
|
||||
if (skb->pkt_type == PACKET_MULTICAST ||
|
||||
skb->pkt_type == PACKET_BROADCAST)
|
||||
igmp_heard_report(in_dev, ih->group);
|
||||
break;
|
||||
case IGMP_PIM:
|
||||
#ifdef CONFIG_IP_PIMSM_V1
|
||||
|
|
|
@ -92,10 +92,13 @@ static int inet6_create(struct socket *sock, int protocol)
|
|||
struct proto *answer_prot;
|
||||
unsigned char answer_flags;
|
||||
char answer_no_check;
|
||||
int rc;
|
||||
int try_loading_module = 0;
|
||||
int err;
|
||||
|
||||
/* Look for the requested type/protocol pair. */
|
||||
answer = NULL;
|
||||
lookup_protocol:
|
||||
err = -ESOCKTNOSUPPORT;
|
||||
rcu_read_lock();
|
||||
list_for_each_rcu(p, &inetsw6[sock->type]) {
|
||||
answer = list_entry(p, struct inet_protosw, list);
|
||||
|
@ -113,21 +116,37 @@ static int inet6_create(struct socket *sock, int protocol)
|
|||
if (IPPROTO_IP == answer->protocol)
|
||||
break;
|
||||
}
|
||||
err = -EPROTONOSUPPORT;
|
||||
answer = NULL;
|
||||
}
|
||||
|
||||
rc = -ESOCKTNOSUPPORT;
|
||||
if (!answer)
|
||||
goto out_rcu_unlock;
|
||||
rc = -EPERM;
|
||||
if (!answer) {
|
||||
if (try_loading_module < 2) {
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* Be more specific, e.g. net-pf-10-proto-132-type-1
|
||||
* (net-pf-PF_INET6-proto-IPPROTO_SCTP-type-SOCK_STREAM)
|
||||
*/
|
||||
if (++try_loading_module == 1)
|
||||
request_module("net-pf-%d-proto-%d-type-%d",
|
||||
PF_INET6, protocol, sock->type);
|
||||
/*
|
||||
* Fall back to generic, e.g. net-pf-10-proto-132
|
||||
* (net-pf-PF_INET6-proto-IPPROTO_SCTP)
|
||||
*/
|
||||
else
|
||||
request_module("net-pf-%d-proto-%d",
|
||||
PF_INET6, protocol);
|
||||
goto lookup_protocol;
|
||||
} else
|
||||
goto out_rcu_unlock;
|
||||
}
|
||||
|
||||
err = -EPERM;
|
||||
if (answer->capability > 0 && !capable(answer->capability))
|
||||
goto out_rcu_unlock;
|
||||
rc = -EPROTONOSUPPORT;
|
||||
if (!protocol)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
sock->ops = answer->ops;
|
||||
|
||||
answer_prot = answer->prot;
|
||||
answer_no_check = answer->no_check;
|
||||
answer_flags = answer->flags;
|
||||
|
@ -135,14 +154,14 @@ static int inet6_create(struct socket *sock, int protocol)
|
|||
|
||||
BUG_TRAP(answer_prot->slab != NULL);
|
||||
|
||||
rc = -ENOBUFS;
|
||||
err = -ENOBUFS;
|
||||
sk = sk_alloc(PF_INET6, GFP_KERNEL, answer_prot, 1);
|
||||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
sock_init_data(sock, sk);
|
||||
|
||||
rc = 0;
|
||||
err = 0;
|
||||
sk->sk_no_check = answer_no_check;
|
||||
if (INET_PROTOSW_REUSE & answer_flags)
|
||||
sk->sk_reuse = 1;
|
||||
|
@ -202,14 +221,14 @@ static int inet6_create(struct socket *sock, int protocol)
|
|||
sk->sk_prot->hash(sk);
|
||||
}
|
||||
if (sk->sk_prot->init) {
|
||||
rc = sk->sk_prot->init(sk);
|
||||
if (rc) {
|
||||
err = sk->sk_prot->init(sk);
|
||||
if (err) {
|
||||
sk_common_release(sk);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
return err;
|
||||
out_rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
|
|
|
@ -1231,6 +1231,11 @@ int igmp6_event_report(struct sk_buff *skb)
|
|||
if (skb->pkt_type == PACKET_LOOPBACK)
|
||||
return 0;
|
||||
|
||||
/* send our report if the MC router may not have heard this report */
|
||||
if (skb->pkt_type != PACKET_MULTICAST &&
|
||||
skb->pkt_type != PACKET_BROADCAST)
|
||||
return 0;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -4743,11 +4743,6 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
|
|||
struct sk_buff *skb;
|
||||
long timeo;
|
||||
|
||||
/* Caller is allowed not to check sk->sk_err before calling. */
|
||||
error = sock_error(sk);
|
||||
if (error)
|
||||
goto no_packet;
|
||||
|
||||
timeo = sock_rcvtimeo(sk, noblock);
|
||||
|
||||
SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n",
|
||||
|
@ -4774,6 +4769,11 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
|
|||
if (skb)
|
||||
return skb;
|
||||
|
||||
/* Caller is allowed not to check sk->sk_err before calling. */
|
||||
error = sock_error(sk);
|
||||
if (error)
|
||||
goto no_packet;
|
||||
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
break;
|
||||
|
||||
|
|
|
@ -261,7 +261,8 @@ void sctp_transport_route(struct sctp_transport *transport,
|
|||
* association's active path for getsockname().
|
||||
*/
|
||||
if (asoc && (transport == asoc->peer.active_path))
|
||||
af->to_sk_saddr(&transport->saddr, asoc->base.sk);
|
||||
opt->pf->af->to_sk_saddr(&transport->saddr,
|
||||
asoc->base.sk);
|
||||
} else
|
||||
transport->pmtu = SCTP_DEFAULT_MAXSEGMENT;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,6 @@ __rpc_purge_upcall(struct inode *inode, int err)
|
|||
struct rpc_inode *rpci = RPC_I(inode);
|
||||
|
||||
__rpc_purge_list(rpci, &rpci->pipe, err);
|
||||
__rpc_purge_list(rpci, &rpci->in_upcall, err);
|
||||
rpci->pipelen = 0;
|
||||
wake_up(&rpci->waitq);
|
||||
}
|
||||
|
@ -119,6 +118,7 @@ rpc_close_pipes(struct inode *inode)
|
|||
down(&inode->i_sem);
|
||||
if (rpci->ops != NULL) {
|
||||
rpci->nreaders = 0;
|
||||
__rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE);
|
||||
__rpc_purge_upcall(inode, -EPIPE);
|
||||
rpci->nwriters = 0;
|
||||
if (rpci->ops->release_pipe)
|
||||
|
|
Loading…
Reference in New Issue