Merge branches 'cxgb4', 'misc', 'mlx4', 'nes' and 'qib' into for-next

This commit is contained in:
Roland Dreier 2010-05-25 09:54:03 -07:00
73 changed files with 50980 additions and 5070 deletions

View File

@ -43,6 +43,7 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"

View File

@ -1,6 +1,7 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/

View File

@ -38,7 +38,9 @@
#include <rdma/ib_verbs.h>
int ib_device_register_sysfs(struct ib_device *device);
int ib_device_register_sysfs(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
int ib_sysfs_setup(void);

View File

@ -267,7 +267,9 @@ out:
* callback for each device that is added. @device must be allocated
* with ib_alloc_device().
*/
int ib_register_device(struct ib_device *device)
int ib_register_device(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *))
{
int ret;
@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device)
goto out;
}
ret = ib_device_register_sysfs(device);
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n",
device->name);

View File

@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
module_param_named(send_queue_size, mad_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");

View File

@ -475,7 +475,9 @@ err:
return NULL;
}
static int add_port(struct ib_device *device, int port_num)
static int add_port(struct ib_device *device, int port_num,
int (*port_callback)(struct ib_device *,
u8, struct kobject *))
{
struct ib_port *p;
struct ib_port_attr attr;
@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num)
if (ret)
goto err_free_pkey;
if (port_callback) {
ret = port_callback(device, port_num, &p->kobj);
if (ret)
goto err_remove_pkey;
}
list_add_tail(&p->kobj.entry, &device->port_list);
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
err_remove_pkey:
sysfs_remove_group(&p->kobj, &p->pkey_group);
err_free_pkey:
for (i = 0; i < attr.pkey_tbl_len; ++i)
kfree(p->pkey_group.attrs[i]);
@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = {
.attrs = iw_proto_stats_attrs,
};
int ib_device_register_sysfs(struct ib_device *device)
int ib_device_register_sysfs(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *))
{
struct device *class_dev = &device->dev;
int ret;
@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device)
}
if (device->node_type == RDMA_NODE_IB_SWITCH) {
ret = add_port(device, 0);
ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
} else {
for (i = 1; i <= device->phys_port_cnt; ++i) {
ret = add_port(device, i);
ret = add_port(device, i, port_callback);
if (ret)
goto err_put;
}

View File

@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.iwcm->create_listen = c2_service_create;
dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
ret = ib_register_device(&dev->ibdev);
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto out_free_iwcm;

View File

@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
ret = ib_register_device(&dev->ibdev);
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto bail1;

View File

@ -486,7 +486,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
ret = ib_register_device(&dev->ibdev);
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto bail1;

View File

@ -798,7 +798,7 @@ static int __devinit ehca_probe(struct of_device *dev,
goto probe5;
}
ret = ib_register_device(&shca->ib_device);
ret = ib_register_device(&shca->ib_device, NULL);
if (ret) {
ehca_err(&shca->ib_device,
"ib_register_device() failed ret=%i", ret);

View File

@ -1,9 +1,11 @@
config INFINIBAND_IPATH
tristate "QLogic InfiniPath Driver"
depends on 64BIT && NET
tristate "QLogic HTX HCA support"
depends on 64BIT && NET && HT_IRQ
---help---
This is a driver for QLogic InfiniPath host channel adapters,
This is a driver for the obsolete QLogic Hyper-Transport
IB host channel adapter (model QHT7140),
including InfiniBand verbs support. This driver allows these
devices to be used with both kernel upper level protocols such
as IP-over-InfiniBand as well as with userspace applications
(in conjunction with InfiniBand userspace access).
For QLogic PCIe QLE based cards, use the QIB driver instead.

View File

@ -29,13 +29,9 @@ ib_ipath-y := \
ipath_user_pages.o \
ipath_user_sdma.o \
ipath_verbs_mcast.o \
ipath_verbs.o \
ipath_iba7220.o \
ipath_sd7220.o \
ipath_sd7220_img.o
ipath_verbs.o
ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o

View File

@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *,
/* Only needed for registration, nothing else needs this info */
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
#define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
/* Number of seconds before our card status check... */
#define STATUS_TIMEOUT 60
static const struct pci_device_id ipath_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
{ 0, }
};
@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
/* setup the chip-specific functions, as early as possible. */
switch (ent->device) {
case PCI_DEVICE_ID_INFINIPATH_HT:
#ifdef CONFIG_HT_IRQ
ipath_init_iba6110_funcs(dd);
break;
#else
ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
"CONFIG_HT_IRQ is not enabled\n", ent->device);
return -ENODEV;
#endif
case PCI_DEVICE_ID_INFINIPATH_PE800:
#ifdef CONFIG_PCI_MSI
ipath_init_iba6120_funcs(dd);
break;
#else
ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
"CONFIG_PCI_MSI is not enabled\n", ent->device);
return -ENODEV;
#endif
case PCI_DEVICE_ID_INFINIPATH_7220:
#ifndef CONFIG_PCI_MSI
ipath_dbg("CONFIG_PCI_MSI is not enabled, "
"using INTx for unit %u\n", dd->ipath_unit);
#endif
ipath_init_iba7220_funcs(dd);
break;
default:
ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
"failing\n", ent->device);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd);
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail);
void ipath_init_iba7220_funcs(struct ipath_devdata *);
void ipath_init_iba6120_funcs(struct ipath_devdata *);
void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *);
int ipath_update_eeprom_log(struct ipath_devdata *dd);

View File

@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename);
ret = ib_register_device(dev);
ret = ib_register_device(dev, NULL);
if (ret)
goto err_reg;

View File

@ -662,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
if (ib_register_device(&ibdev->ib_dev))
if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_map;
if (mlx4_ib_mad_init(ibdev))

View File

@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
ret = ib_register_device(&dev->ib_dev);
ret = ib_register_device(&dev->ib_dev, NULL);
if (ret)
return ret;

View File

@ -2584,7 +2584,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
break;
}
}
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x0004) {
if (wide_ppm_offset &&
@ -2639,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
}
}
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
}
@ -3422,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 aeq_info;
u32 next_iwarp_state = 0;
u32 aeqe_cq_id;
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
@ -3449,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
if (aeq_info & NES_AEQE_QP) {
if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps,
aeqe_cq_id)) ||
(atomic_read(&nesqp->close_timer_started)))
return;
}
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
if (nesqp->term_flags)

View File

@ -1002,6 +1002,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Link Change Interrupts",
"Linearized SKBs",
@ -1016,11 +1017,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Rx Jabber Errors",
"Rx Oversized Frames",
"Rx Short Frames",
"Rx Length Errors",
"Rx CRC Errors",
"Rx Port Discard",
"Endnode Rx Discards",
"Endnode Rx Octets",
"Endnode Rx Frames",
"Endnode Tx Octets",
"Endnode Tx Frames",
"Tx Errors",
"mh detected",
"mh pauses",
"Retransmission Count",
@ -1049,19 +1054,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"CM Nodes Destroyed",
"CM Accel Drops",
"CM Resets Received",
"Free 4Kpbls",
"Free 256pbls",
"Timer Inits",
"CQ Depth 1",
"CQ Depth 4",
"CQ Depth 16",
"CQ Depth 24",
"CQ Depth 32",
"CQ Depth 128",
"CQ Depth 256",
"LRO aggregated",
"LRO flushed",
"LRO no_desc",
};
#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
/**
@ -1121,12 +1120,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
/**
* nes_netdev_get_ethtool_stats
*/
static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
{
u64 u64temp;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 nic_count;
u32 u32temp;
u32 index = 0;
@ -1155,6 +1156,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
nesvnic->nesdev->port_tx_discards += u32temp;
nesvnic->netstats.tx_dropped += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_short_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_length_errors += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_crc_errors += u32temp;
nesvnic->netstats.rx_crc_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_tx_errors += u32temp;
nesvnic->netstats.tx_errors += u32temp;
for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
if (nesvnic->qp_nic_index[nic_count] == 0xf)
break;
@ -1219,11 +1260,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
target_stat_values[++index] = mh_detected;
target_stat_values[++index] = mh_pauses_sent;
target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
@ -1252,21 +1297,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
target_stat_values[++index] = atomic_read(&cm_resets_recvd);
target_stat_values[++index] = nesadapter->free_4kpbl;
target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
target_stat_values[++index] = int_mod_cq_depth_1;
target_stat_values[++index] = int_mod_cq_depth_4;
target_stat_values[++index] = int_mod_cq_depth_16;
target_stat_values[++index] = int_mod_cq_depth_24;
target_stat_values[++index] = int_mod_cq_depth_32;
target_stat_values[++index] = int_mod_cq_depth_128;
target_stat_values[++index] = int_mod_cq_depth_256;
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
}
/**
* nes_netdev_get_drvinfo
*/

View File

@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
struct nes_adapter *nesadapter = nesdev->nesadapter;
int i, ret;
ret = ib_register_device(&nesvnic->nesibdev->ibdev);
ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
if (ret) {
return ret;
}

View File

@ -0,0 +1,7 @@
config INFINIBAND_QIB
tristate "QLogic PCIe HCA support"
depends on 64BIT && NET
---help---
This is a low-level driver for QLogic PCIe QLE InfiniBand host
channel adapters. This driver does not support the QLogic
HyperTransport card (model QHT7140).

View File

@ -0,0 +1,15 @@
obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o
# 6120 has no fallback if no MSI interrupts, others can do INTx
ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,977 @@
/*
* Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
#define QIB_6120_Revision_OFFS 0x0
#define QIB_6120_Revision_R_Simulator_LSB 0x3F
#define QIB_6120_Revision_R_Simulator_RMASK 0x1
#define QIB_6120_Revision_Reserved_LSB 0x28
#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
#define QIB_6120_Revision_BoardID_LSB 0x20
#define QIB_6120_Revision_BoardID_RMASK 0xFF
#define QIB_6120_Revision_R_SW_LSB 0x18
#define QIB_6120_Revision_R_SW_RMASK 0xFF
#define QIB_6120_Revision_R_Arch_LSB 0x10
#define QIB_6120_Revision_R_Arch_RMASK 0xFF
#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
#define QIB_6120_Control_OFFS 0x8
#define QIB_6120_Control_TxLatency_LSB 0x4
#define QIB_6120_Control_TxLatency_RMASK 0x1
#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
#define QIB_6120_Control_LinkEn_LSB 0x2
#define QIB_6120_Control_LinkEn_RMASK 0x1
#define QIB_6120_Control_FreezeMode_LSB 0x1
#define QIB_6120_Control_FreezeMode_RMASK 0x1
#define QIB_6120_Control_SyncReset_LSB 0x0
#define QIB_6120_Control_SyncReset_RMASK 0x1
#define QIB_6120_PageAlign_OFFS 0x10
#define QIB_6120_PortCnt_OFFS 0x18
#define QIB_6120_SendRegBase_OFFS 0x30
#define QIB_6120_UserRegBase_OFFS 0x38
#define QIB_6120_CntrRegBase_OFFS 0x40
#define QIB_6120_Scratch_OFFS 0x48
#define QIB_6120_Scratch_TopHalf_LSB 0x20
#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
#define QIB_6120_Scratch_BottomHalf_LSB 0x0
#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
#define QIB_6120_IntBlocked_OFFS 0x60
#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_Reserved_LSB 0xF
#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
#define QIB_6120_IntMask_OFFS 0x68
#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
#define QIB_6120_IntMask_Reserved_LSB 0x11
#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
#define QIB_6120_IntMask_Reserved1_LSB 0x5
#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
#define QIB_6120_IntStatus_OFFS 0x70
#define QIB_6120_IntStatus_Error_LSB 0x1F
#define QIB_6120_IntStatus_Error_RMASK 0x1
#define QIB_6120_IntStatus_PioSent_LSB 0x1E
#define QIB_6120_IntStatus_PioSent_RMASK 0x1
#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
#define QIB_6120_IntStatus_Reserved_LSB 0xF
#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
#define QIB_6120_IntStatus_Reserved1_LSB 0x5
#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
#define QIB_6120_IntClear_OFFS 0x78
#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
#define QIB_6120_IntClear_Reserved_LSB 0xF
#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
#define QIB_6120_IntClear_Reserved1_LSB 0x5
#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
#define QIB_6120_ErrMask_OFFS 0x80
#define QIB_6120_ErrMask_Reserved_LSB 0x34
#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
#define QIB_6120_ErrMask_Reserved1_LSB 0x26
#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_Reserved2_LSB 0x12
#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
#define QIB_6120_ErrStatus_OFFS 0x88
#define QIB_6120_ErrStatus_Reserved_LSB 0x34
#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
#define QIB_6120_ErrClear_OFFS 0x90
#define QIB_6120_ErrClear_Reserved_LSB 0x34
#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
#define QIB_6120_ErrClear_Reserved1_LSB 0x26
#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_Reserved2_LSB 0x12
#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
#define QIB_6120_HwErrMask_OFFS 0x98
#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
#define QIB_6120_HwErrStatus_OFFS 0xA0
#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
#define QIB_6120_HwErrClear_OFFS 0xA8
#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
#define QIB_6120_HwDiagCtrl_OFFS 0xB0
#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
#define QIB_6120_IBCStatus_OFFS 0xC0
#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
#define QIB_6120_IBCStatus_Reserved_LSB 0x7
#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
#define QIB_6120_IBCStatus_LinkState_LSB 0x4
#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
#define QIB_6120_IBCCtrl_OFFS 0xC8
#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
#define QIB_6120_EXTStatus_OFFS 0xD0
#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
#define QIB_6120_EXTStatus_Reserved_LSB 0x20
#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
#define QIB_6120_EXTCtrl_OFFS 0xD8
#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
#define QIB_6120_GPIOOut_OFFS 0xE0
#define QIB_6120_GPIOMask_OFFS 0xE8
#define QIB_6120_GPIOStatus_OFFS 0xF0
#define QIB_6120_GPIOClear_OFFS 0xF8
#define QIB_6120_RcvCtrl_OFFS 0x100
#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
#define QIB_6120_RcvBTHQP_OFFS 0x108
#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
#define QIB_6120_RcvHdrSize_OFFS 0x110
#define QIB_6120_RcvHdrCnt_OFFS 0x118
#define QIB_6120_RcvHdrEntSize_OFFS 0x120
#define QIB_6120_RcvTIDBase_OFFS 0x128
#define QIB_6120_RcvTIDCnt_OFFS 0x130
#define QIB_6120_RcvEgrBase_OFFS 0x138
#define QIB_6120_RcvEgrCnt_OFFS 0x140
#define QIB_6120_RcvBufBase_OFFS 0x148
#define QIB_6120_RcvBufSize_OFFS 0x150
#define QIB_6120_RxIntMemBase_OFFS 0x158
#define QIB_6120_RxIntMemSize_OFFS 0x160
#define QIB_6120_RcvPartitionKey_OFFS 0x168
#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
#define QIB_6120_SendCtrl_OFFS 0x1C0
#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
#define QIB_6120_SendCtrl_Reserved_LSB 0x17
#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
#define QIB_6120_SendCtrl_Abort_LSB 0x0
#define QIB_6120_SendCtrl_Abort_RMASK 0x1
#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
#define QIB_6120_SendPIOSize_OFFS 0x1D0
#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
#define QIB_6120_SendBufErr0_OFFS 0x240
#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
#define QIB_6120_RcvHdrAddr0_OFFS 0x280
#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
#define QIB_6120_SerdesCfg0_OFFS 0x3C0
#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
#define QIB_6120_SerdesStat_OFFS 0x3D0
#define QIB_6120_SerdesStat_Reserved_LSB 0xC
#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
#define QIB_6120_XGXSCfg_OFFS 0x3D8
#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
#define QIB_6120_LBIntCnt_OFFS 0x12000
#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
#define QIB_6120_TxDataPktCnt_OFFS 0x12020
#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
#define QIB_6120_TxDwordCnt_OFFS 0x12030
#define QIB_6120_TxLenErrCnt_OFFS 0x12038
#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
#define QIB_6120_RxDataPktCnt_OFFS 0x12068
#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
#define QIB_6120_RxDwordCnt_OFFS 0x12078
#define QIB_6120_RxLenErrCnt_OFFS 0x12080
#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
#define QIB_6120_RxEBPCnt_OFFS 0x120B8
#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
#define QIB_6120_RcvEgrArray0_OFFS 0x14000
#define QIB_6120_RcvTIDArray0_OFFS 0x54000
#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
#define QIB_6120_RcvBuf1_OFFS 0x72000
#define QIB_6120_RcvBuf2_OFFS 0x75000
#define QIB_6120_RcvFlags_OFFS 0x77000
#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
#define QIB_6120_RcvDMABuf_OFFS 0x7B000
#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
#define QIB_6120_PCIERcvBuf_OFFS 0x80000
#define QIB_6120_PCIERetryBuf_OFFS 0x82000
#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
#define QIB_6120_PIOBuf0_MA_OFFS 0x100000

View File

@ -0,0 +1,156 @@
#ifndef _QIB_7220_H
#define _QIB_7220_H
/*
* Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* grab register-defs auto-generated by HW */
#include "qib_7220_regs.h"
/* The number of eager receive TIDs for context zero. */
#define IBA7220_KRCVEGRCNT 2048U
#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
#define IB_7220_LT_STATE_TXREVLANES 0x0d
#define IB_7220_LT_STATE_CFGENH 0x10
struct qib_chip_specific {
u64 __iomem *cregbase;
u64 *cntrs;
u64 *portcntrs;
spinlock_t sdepb_lock; /* serdes EPB bus */
spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
u64 hwerrmask;
u64 errormask;
u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
u64 gpio_mask; /* shadow the gpio mask register */
u64 extctrl; /* shadow the gpio output enable, etc... */
u32 ncntrs;
u32 nportcntrs;
u32 cntrnamelen;
u32 portcntrnamelen;
u32 numctxts;
u32 rcvegrcnt;
u32 autoneg_tries;
u32 serdes_first_init_done;
u32 sdmabufcnt;
u32 lastbuf_for_pio;
u32 updthresh; /* current AvailUpdThld */
u32 updthresh_dflt; /* default AvailUpdThld */
int irq;
u8 presets_needed;
u8 relock_timer_active;
char emsgbuf[128];
char sdmamsgbuf[192];
char bitsmsgbuf[64];
struct timer_list relock_timer;
unsigned int relock_interval; /* in jiffies */
};
struct qib_chippport_specific {
struct qib_pportdata pportdata;
wait_queue_head_t autoneg_wait;
struct delayed_work autoneg_work;
struct timer_list chase_timer;
/*
* these 5 fields are used to establish deltas for IB symbol
* errors and linkrecovery errors. They can be reported on
* some chips during link negotiation prior to INIT, and with
* DDR when faking DDR negotiations with non-IBTA switches.
* The chip counters are adjusted at driver unload if there is
* a non-zero delta.
*/
u64 ibdeltainprog;
u64 ibsymdelta;
u64 ibsymsnap;
u64 iblnkerrdelta;
u64 iblnkerrsnap;
u64 ibcctrl; /* kr_ibcctrl shadow */
u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
u64 chase_end;
u32 last_delay_mult;
};
/*
* This header file provides the declarations and common definitions
* for (mostly) manipulation of the SerDes blocks within the IBA7220.
* the functions declared should only be called from within other
* 7220-related files such as qib_iba7220.c or qib_sd7220.c.
*/
int qib_sd7220_presets(struct qib_devdata *dd);
int qib_sd7220_init(struct qib_devdata *dd);
int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img,
int len, int offset);
int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img,
int len, int offset);
void qib_sd7220_clr_ibpar(struct qib_devdata *);
/*
* Below used for sdnum parameter, selecting one of the two sections
* used for PCIe, or the single SerDes used for IB, which is the
* only one currently used
*/
#define IB_7220_SERDES 2
int qib_sd7220_ib_load(struct qib_devdata *dd);
int qib_sd7220_ib_vfy(struct qib_devdata *dd);
static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
const u16 regno)
{
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
return -1;
return readl((u32 __iomem *)&dd->kregbase[regno]);
}
static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
const u16 regno)
{
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
return -1;
return readq(&dd->kregbase[regno]);
}
static inline void qib_write_kreg(const struct qib_devdata *dd,
const u16 regno, u64 value)
{
if (dd->kregbase)
writeq(value, &dd->kregbase[regno]);
}
void set_7220_relock_poll(struct qib_devdata *, int);
void shutdown_7220_relock_poll(struct qib_devdata *);
void toggle_7220_rclkrls(struct qib_devdata *);
#endif /* _QIB_7220_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,758 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _QIB_COMMON_H
#define _QIB_COMMON_H
/*
* This file contains defines, structures, etc. that are used
* to communicate between kernel and user code.
*/
/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
#define QIB_SRC_OUI_1 0x00
#define QIB_SRC_OUI_2 0x11
#define QIB_SRC_OUI_3 0x75
/* version of protocol header (known to chip also). In the long run,
* we should be able to generate and accept a range of version numbers;
* for now we only accept one, and it's compiled in.
*/
#define IPS_PROTO_VERSION 2
/*
* These are compile time constants that you may want to enable or disable
* if you are trying to debug problems with code or performance.
* QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
* fastpath code
* QIB_TRACE_REGWRITES define as 1 if you want register writes to be
* traced in faspath code
* _QIB_TRACING define as 0 if you want to remove all tracing in a
* compilation unit
*/
/*
* The value in the BTH QP field that QLogic_IB uses to differentiate
* an qlogic_ib protocol IB packet vs standard IB transport
* This it needs to be even (0x656b78), because the LSB is sometimes
* used for the MSB of context. The change may cause a problem
* interoperating with older software.
*/
#define QIB_KD_QP 0x656b78
/*
* These are the status bits readable (in ascii form, 64bit value)
* from the "status" sysfs file. For binary compatibility, values
* must remain as is; removed states can be reused for different
* purposes.
*/
#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
/* Chip has been found and initted */
#define QIB_STATUS_CHIP_PRESENT 0x20
/* IB link is at ACTIVE, usable for data traffic */
#define QIB_STATUS_IB_READY 0x40
/* link is configured, LID, MTU, etc. have been set */
#define QIB_STATUS_IB_CONF 0x80
/* A Fatal hardware error has occurred. */
#define QIB_STATUS_HWERROR 0x200
/*
* The list of usermode accessible registers. Also see Reg_* later in file.
*/
enum qib_ureg {
/* (RO) DMA RcvHdr to be used next. */
ur_rcvhdrtail = 0,
/* (RW) RcvHdr entry to be processed next by host. */
ur_rcvhdrhead = 1,
/* (RO) Index of next Eager index to use. */
ur_rcvegrindextail = 2,
/* (RW) Eager TID to be processed next */
ur_rcvegrindexhead = 3,
/* For internal use only; max register number. */
_QIB_UregMax
};
/* bit values for spi_runtime_flags */
#define QIB_RUNTIME_PCIE 0x0002
#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
#define QIB_RUNTIME_RCVHDR_COPY 0x0008
#define QIB_RUNTIME_MASTER 0x0010
#define QIB_RUNTIME_RCHK 0x0020
#define QIB_RUNTIME_NODMA_RTAIL 0x0080
#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
#define QIB_RUNTIME_SDMA 0x0200
#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
#define QIB_RUNTIME_HDRSUPP 0x4000
/*
* This structure is returned by qib_userinit() immediately after
* open to get implementation-specific info, and info specific to this
* instance.
*
* This struct must have explict pad fields where type sizes
* may result in different alignments between 32 and 64 bit
* programs, since the 64 bit * bit kernel requires the user code
* to have matching offsets
*/
struct qib_base_info {
/* version of hardware, for feature checking. */
__u32 spi_hw_version;
/* version of software, for feature checking. */
__u32 spi_sw_version;
/* QLogic_IB context assigned, goes into sent packets */
__u16 spi_ctxt;
__u16 spi_subctxt;
/*
* IB MTU, packets IB data must be less than this.
* The MTU is in bytes, and will be a multiple of 4 bytes.
*/
__u32 spi_mtu;
/*
* Size of a PIO buffer. Any given packet's total size must be less
* than this (in words). Included is the starting control word, so
* if 513 is returned, then total pkt size is 512 words or less.
*/
__u32 spi_piosize;
/* size of the TID cache in qlogic_ib, in entries */
__u32 spi_tidcnt;
/* size of the TID Eager list in qlogic_ib, in entries */
__u32 spi_tidegrcnt;
/* size of a single receive header queue entry in words. */
__u32 spi_rcvhdrent_size;
/*
* Count of receive header queue entries allocated.
* This may be less than the spu_rcvhdrcnt passed in!.
*/
__u32 spi_rcvhdr_cnt;
/* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
__u32 spi_runtime_flags;
/* address where hardware receive header queue is mapped */
__u64 spi_rcvhdr_base;
/* user program. */
/* base address of eager TID receive buffers used by hardware. */
__u64 spi_rcv_egrbufs;
/* Allocated by initialization code, not by protocol. */
/*
* Size of each TID buffer in host memory, starting at
* spi_rcv_egrbufs. The buffers are virtually contiguous.
*/
__u32 spi_rcv_egrbufsize;
/*
* The special QP (queue pair) value that identifies an qlogic_ib
* protocol packet from standard IB packets. More, probably much
* more, to be added.
*/
__u32 spi_qpair;
/*
* User register base for init code, not to be used directly by
* protocol or applications. Always points to chip registers,
* for normal or shared context.
*/
__u64 spi_uregbase;
/*
* Maximum buffer size in bytes that can be used in a single TID
* entry (assuming the buffer is aligned to this boundary). This is
* the minimum of what the hardware and software support Guaranteed
* to be a power of 2.
*/
__u32 spi_tid_maxsize;
/*
* alignment of each pio send buffer (byte count
* to add to spi_piobufbase to get to second buffer)
*/
__u32 spi_pioalign;
/*
* The index of the first pio buffer available to this process;
* needed to do lookup in spi_pioavailaddr; not added to
* spi_piobufbase.
*/
__u32 spi_pioindex;
/* number of buffers mapped for this process */
__u32 spi_piocnt;
/*
* Base address of writeonly pio buffers for this process.
* Each buffer has spi_piosize words, and is aligned on spi_pioalign
* boundaries. spi_piocnt buffers are mapped from this address
*/
__u64 spi_piobufbase;
/*
* Base address of readonly memory copy of the pioavail registers.
* There are 2 bits for each buffer.
*/
__u64 spi_pioavailaddr;
/*
* Address where driver updates a copy of the interface and driver
* status (QIB_STATUS_*) as a 64 bit value. It's followed by a
* link status qword (formerly combined with driver status), then a
* string indicating hardware error, if there was one.
*/
__u64 spi_status;
/* number of chip ctxts available to user processes */
__u32 spi_nctxts;
__u16 spi_unit; /* unit number of chip we are using */
__u16 spi_port; /* IB port number we are using */
/* num bufs in each contiguous set */
__u32 spi_rcv_egrperchunk;
/* size in bytes of each contiguous set */
__u32 spi_rcv_egrchunksize;
/* total size of mmap to cover full rcvegrbuffers */
__u32 spi_rcv_egrbuftotlen;
__u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
/* address of readonly memory copy of the rcvhdrq tail register. */
__u64 spi_rcvhdr_tailaddr;
/*
* shared memory pages for subctxts if ctxt is shared; these cover
* all the processes in the group sharing a single context.
* all have enough space for the num_subcontexts value on this job.
*/
__u64 spi_subctxt_uregbase;
__u64 spi_subctxt_rcvegrbuf;
__u64 spi_subctxt_rcvhdr_base;
/* shared memory page for send buffer disarm status */
__u64 spi_sendbuf_status;
} __attribute__ ((aligned(8)));
/*
* This version number is given to the driver by the user code during
* initialization in the spu_userversion field of qib_user_info, so
* the driver can check for compatibility with user code.
*
* The major version changes when data structures
* change in an incompatible way. The driver must be the same or higher
* for initialization to succeed. In some cases, a higher version
* driver will not interoperate with older software, and initialization
* will return an error.
*/
#define QIB_USER_SWMAJOR 1
/*
* Minor version differences are always compatible
* a within a major version, however if user software is larger
* than driver software, some new features and/or structure fields
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
#define QIB_USER_SWMINOR 10
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
#ifndef QIB_KERN_TYPE
#define QIB_KERN_TYPE 0
#define QIB_IDSTR "QLogic kernel.org driver"
#endif
/*
* Similarly, this is the kernel version going back to the user. It's
* slightly different, in that we want to tell if the driver was built as
* part of a QLogic release, or from the driver from openfabrics.org,
* kernel.org, or a standard distribution, for support reasons.
* The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
*
* It's returned by the driver to the user code during initialization in the
* spi_sw_version field of qib_base_info, so the user code can in turn
* check for compatibility with the kernel.
*/
#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
/*
* This structure is passed to qib_userinit() to tell the driver where
* user code buffers are, sizes, etc. The offsets and sizes of the
* fields must remain unchanged, for binary compatibility. It can
* be extended, if userversion is changed so user code can tell, if needed
*/
struct qib_user_info {
/*
* version of user software, to detect compatibility issues.
* Should be set to QIB_USER_SWVERSION.
*/
__u32 spu_userversion;
__u32 _spu_unused2;
/* size of struct base_info to write to */
__u32 spu_base_info_size;
__u32 _spu_unused3;
/*
* If two or more processes wish to share a context, each process
* must set the spu_subctxt_cnt and spu_subctxt_id to the same
* values. The only restriction on the spu_subctxt_id is that
* it be unique for a given node.
*/
__u16 spu_subctxt_cnt;
__u16 spu_subctxt_id;
__u32 spu_port; /* IB port requested by user if > 0 */
/*
* address of struct base_info to write to
*/
__u64 spu_base_info;
} __attribute__ ((aligned(8)));
/* User commands. */
/* 16 available, was: old set up userspace (for old user code) */
#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
/* 22 available, was: return info on slave processes (for old user code) */
#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
#define QIB_CMD_USER_INIT 24 /* set up userspace */
#define QIB_CMD_UNUSED_1 25
#define QIB_CMD_UNUSED_2 26
#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
/* 30 is unused */
#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
/* 33 available, was a testing feature */
#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
* processes: qib_cpus_list */
/*
* QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
* compatibility with libraries from previous release. The ACK_EVENT
* will take appropriate driver action (if any, just DISARM for now),
* then clear the bits passed in as part of the mask. These bits are
* in the first 64bit word at spi_sendbuf_status, and are passed to
* the driver in the event_mask union as well.
*/
#define _QIB_EVENT_DISARM_BUFS_BIT 0
#define _QIB_EVENT_LINKDOWN_BIT 1
#define _QIB_EVENT_LID_CHANGE_BIT 2
#define _QIB_EVENT_LMC_CHANGE_BIT 3
#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
/*
* Poll types
*/
#define QIB_POLL_TYPE_ANYRCV 0x0
#define QIB_POLL_TYPE_URGENT 0x1
struct qib_ctxt_info {
__u16 num_active; /* number of active units */
__u16 unit; /* unit (chip) assigned to caller */
__u16 port; /* IB port assigned to caller (1-based) */
__u16 ctxt; /* ctxt on unit assigned to caller */
__u16 subctxt; /* subctxt on unit assigned to caller */
__u16 num_ctxts; /* number of ctxts available on unit */
__u16 num_subctxts; /* number of subctxts opened on ctxt */
__u16 rec_cpu; /* cpu # for affinity (ffff if none) */
};
struct qib_tid_info {
__u32 tidcnt;
/* make structure same size in 32 and 64 bit */
__u32 tid__unused;
/* virtual address of first page in transfer */
__u64 tidvaddr;
/* pointer (same size 32/64 bit) to __u16 tid array */
__u64 tidlist;
/*
* pointer (same size 32/64 bit) to bitmap of TIDs used
* for this call; checked for being large enough at open
*/
__u64 tidmap;
};
struct qib_cmd {
__u32 type; /* command type */
union {
struct qib_tid_info tid_info;
struct qib_user_info user_info;
/*
* address in userspace where we should put the sdma
* inflight counter
*/
__u64 sdma_inflight;
/*
* address in userspace where we should put the sdma
* completion counter
*/
__u64 sdma_complete;
/* address in userspace of struct qib_ctxt_info to
write result to */
__u64 ctxt_info;
/* enable/disable receipt of packets */
__u32 recv_ctrl;
/* enable/disable armlaunch errors (non-zero to enable) */
__u32 armlaunch_ctrl;
/* partition key to set */
__u16 part_key;
/* user address of __u32 bitmask of active slaves */
__u64 slave_mask_addr;
/* type of polling we want */
__u16 poll_type;
/* back pressure enable bit for one particular context */
__u8 ctxt_bp;
/* qib_user_event_ack(), IPATH_EVENT_* bits */
__u64 event_mask;
} cmd;
};
struct qib_iovec {
/* Pointer to data, but same size 32 and 64 bit */
__u64 iov_base;
/*
* Length of data; don't need 64 bits, but want
* qib_sendpkt to remain same size as before 32 bit changes, so...
*/
__u64 iov_len;
};
/*
* Describes a single packet for send. Each packet can have one or more
* buffers, but the total length (exclusive of IB headers) must be less
* than the MTU, and if using the PIO method, entire packet length,
* including IB headers, must be less than the qib_piosize value (words).
* Use of this necessitates including sys/uio.h
*/
struct __qib_sendpkt {
__u32 sps_flags; /* flags for packet (TBD) */
__u32 sps_cnt; /* number of entries to use in sps_iov */
/* array of iov's describing packet. TEMPORARY */
struct qib_iovec sps_iov[4];
};
/*
* Diagnostics can send a packet by "writing" the following
* structs to the diag data special file.
* This allows a custom
* pbc (+ static rate) qword, so that special modes and deliberate
* changes to CRCs can be used. The elements were also re-ordered
* for better alignment and to avoid padding issues.
*/
#define _DIAG_XPKT_VERS 3
struct qib_diag_xpkt {
__u16 version;
__u16 unit;
__u16 port;
__u16 len;
__u64 data;
__u64 pbc_wd;
};
/*
* Data layout in I2C flash (for GUID, etc.)
* All fields are little-endian binary unless otherwise stated
*/
#define QIB_FLASH_VERSION 2
struct qib_flash {
/* flash layout version (QIB_FLASH_VERSION) */
__u8 if_fversion;
/* checksum protecting if_length bytes */
__u8 if_csum;
/*
* valid length (in use, protected by if_csum), including
* if_fversion and if_csum themselves)
*/
__u8 if_length;
/* the GUID, in network order */
__u8 if_guid[8];
/* number of GUIDs to use, starting from if_guid */
__u8 if_numguid;
/* the (last 10 characters of) board serial number, in ASCII */
char if_serial[12];
/* board mfg date (YYYYMMDD ASCII) */
char if_mfgdate[8];
/* last board rework/test date (YYYYMMDD ASCII) */
char if_testdate[8];
/* logging of error counts, TBD */
__u8 if_errcntp[4];
/* powered on hours, updated at driver unload */
__u8 if_powerhour[2];
/* ASCII free-form comment field */
char if_comment[32];
/* Backwards compatible prefix for longer QLogic Serial Numbers */
char if_sprefix[4];
/* 82 bytes used, min flash size is 128 bytes */
__u8 if_future[46];
};
/*
* These are the counters implemented in the chip, and are listed in order.
* The InterCaps naming is taken straight from the chip spec.
*/
struct qlogic_ib_counters {
__u64 LBIntCnt;
__u64 LBFlowStallCnt;
__u64 TxSDmaDescCnt; /* was Reserved1 */
__u64 TxUnsupVLErrCnt;
__u64 TxDataPktCnt;
__u64 TxFlowPktCnt;
__u64 TxDwordCnt;
__u64 TxLenErrCnt;
__u64 TxMaxMinLenErrCnt;
__u64 TxUnderrunCnt;
__u64 TxFlowStallCnt;
__u64 TxDroppedPktCnt;
__u64 RxDroppedPktCnt;
__u64 RxDataPktCnt;
__u64 RxFlowPktCnt;
__u64 RxDwordCnt;
__u64 RxLenErrCnt;
__u64 RxMaxMinLenErrCnt;
__u64 RxICRCErrCnt;
__u64 RxVCRCErrCnt;
__u64 RxFlowCtrlErrCnt;
__u64 RxBadFormatCnt;
__u64 RxLinkProblemCnt;
__u64 RxEBPCnt;
__u64 RxLPCRCErrCnt;
__u64 RxBufOvflCnt;
__u64 RxTIDFullErrCnt;
__u64 RxTIDValidErrCnt;
__u64 RxPKeyMismatchCnt;
__u64 RxP0HdrEgrOvflCnt;
__u64 RxP1HdrEgrOvflCnt;
__u64 RxP2HdrEgrOvflCnt;
__u64 RxP3HdrEgrOvflCnt;
__u64 RxP4HdrEgrOvflCnt;
__u64 RxP5HdrEgrOvflCnt;
__u64 RxP6HdrEgrOvflCnt;
__u64 RxP7HdrEgrOvflCnt;
__u64 RxP8HdrEgrOvflCnt;
__u64 RxP9HdrEgrOvflCnt;
__u64 RxP10HdrEgrOvflCnt;
__u64 RxP11HdrEgrOvflCnt;
__u64 RxP12HdrEgrOvflCnt;
__u64 RxP13HdrEgrOvflCnt;
__u64 RxP14HdrEgrOvflCnt;
__u64 RxP15HdrEgrOvflCnt;
__u64 RxP16HdrEgrOvflCnt;
__u64 IBStatusChangeCnt;
__u64 IBLinkErrRecoveryCnt;
__u64 IBLinkDownedCnt;
__u64 IBSymbolErrCnt;
__u64 RxVL15DroppedPktCnt;
__u64 RxOtherLocalPhyErrCnt;
__u64 PcieRetryBufDiagQwordCnt;
__u64 ExcessBufferOvflCnt;
__u64 LocalLinkIntegrityErrCnt;
__u64 RxVlErrCnt;
__u64 RxDlidFltrCnt;
};
/*
* The next set of defines are for packet headers, and chip register
* and memory bits that are visible to and/or used by user-mode software.
*/
/* RcvHdrFlags bits */
#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
#define QLOGIC_IB_RHF_SEQ_MASK 0xF
#define QLOGIC_IB_RHF_SEQ_SHIFT 0
#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
#define QLOGIC_IB_RHF_H_LENERR 0x10000000
#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
#define QLOGIC_IB_RHF_H_MKERR 0x01000000
#define QLOGIC_IB_RHF_H_IBERR 0x00800000
#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
#define QLOGIC_IB_RHF_L_SWA 0x00008000
#define QLOGIC_IB_RHF_L_SWB 0x00004000
/* qlogic_ib header fields */
#define QLOGIC_IB_I_VERS_MASK 0xF
#define QLOGIC_IB_I_VERS_SHIFT 28
#define QLOGIC_IB_I_CTXT_MASK 0xF
#define QLOGIC_IB_I_CTXT_SHIFT 24
#define QLOGIC_IB_I_TID_MASK 0x7FF
#define QLOGIC_IB_I_TID_SHIFT 13
#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
#define QLOGIC_IB_I_OFFSET_SHIFT 0
/* K_PktFlags bits */
#define QLOGIC_IB_KPF_INTR 0x1
#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
#define QLOGIC_IB_MAX_SUBCTXT 4
/* SendPIO per-buffer control */
#define QLOGIC_IB_SP_TEST 0x40
#define QLOGIC_IB_SP_TESTEBP 0x20
#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
/* SendPIOAvail bits */
#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
/* qlogic_ib header format */
struct qib_header {
/*
* Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
* 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
* Context 4, TID 11, offset 13.
*/
__le32 ver_ctxt_tid_offset;
__le16 chksum;
__le16 pkt_flags;
};
/*
* qlogic_ib user message header format.
* This structure contains the first 4 fields common to all protocols
* that employ qlogic_ib.
*/
struct qib_message_header {
__be16 lrh[4];
__be32 bth[3];
/* fields below this point are in host byte order */
struct qib_header iph;
__u8 sub_opcode;
};
/* IB - LRH header consts */
#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
/* misc. */
#define SIZE_OF_CRC 1
#define QIB_DEFAULT_P_KEY 0xFFFF
#define QIB_PERMISSIVE_LID 0xFFFF
#define QIB_AETH_CREDIT_SHIFT 24
#define QIB_AETH_CREDIT_MASK 0x1F
#define QIB_AETH_CREDIT_INVAL 0x1F
#define QIB_PSN_MASK 0xFFFFFF
#define QIB_MSN_MASK 0xFFFFFF
#define QIB_QPN_MASK 0xFFFFFF
#define QIB_MULTICAST_LID_BASE 0xC000
#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
#define QIB_MULTICAST_QPN 0xFFFFFF
/* Receive Header Queue: receive type (from qlogic_ib) */
#define RCVHQ_RCV_TYPE_EXPECTED 0
#define RCVHQ_RCV_TYPE_EAGER 1
#define RCVHQ_RCV_TYPE_NON_KD 2
#define RCVHQ_RCV_TYPE_ERROR 3
#define QIB_HEADER_QUEUE_WORDS 9
/* functions for extracting fields from rcvhdrq entries for the driver.
*/
static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
{
return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
}
static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
{
return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
QLOGIC_IB_RHF_RCVTYPE_MASK;
}
static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
{
return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
QLOGIC_IB_RHF_LENGTH_MASK) << 2;
}
static inline __u32 qib_hdrget_index(const __le32 *rbuf)
{
return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
QLOGIC_IB_RHF_EGRINDEX_MASK;
}
static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
{
return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
QLOGIC_IB_RHF_SEQ_MASK;
}
static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
{
return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
}
static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
{
return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
}
static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
{
return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
QLOGIC_IB_I_VERS_MASK;
}
#endif /* _QIB_COMMON_H */

View File

@ -0,0 +1,484 @@
/*
* Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "qib_verbs.h"
/**
* qib_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
* @sig: true if @entry is a solicitated entry
*
* This may be called with qp->s_lock held.
*/
void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
{
struct qib_cq_wc *wc;
unsigned long flags;
u32 head;
u32 next;
spin_lock_irqsave(&cq->lock, flags);
/*
* Note that the head pointer might be writable by user processes.
* Take care to verify it is a sane value.
*/
wc = cq->queue;
head = wc->head;
if (head >= (unsigned) cq->ibcq.cqe) {
head = cq->ibcq.cqe;
next = 0;
} else
next = head + 1;
if (unlikely(next == wc->tail)) {
spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) {
struct ib_event ev;
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
ev.event = IB_EVENT_CQ_ERR;
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
}
return;
}
if (cq->ip) {
wc->uqueue[head].wr_id = entry->wr_id;
wc->uqueue[head].status = entry->status;
wc->uqueue[head].opcode = entry->opcode;
wc->uqueue[head].vendor_err = entry->vendor_err;
wc->uqueue[head].byte_len = entry->byte_len;
wc->uqueue[head].ex.imm_data =
(__u32 __force)entry->ex.imm_data;
wc->uqueue[head].qp_num = entry->qp->qp_num;
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
wc->uqueue[head].pkey_index = entry->pkey_index;
wc->uqueue[head].slid = entry->slid;
wc->uqueue[head].sl = entry->sl;
wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
wc->uqueue[head].port_num = entry->port_num;
/* Make sure entry is written before the head index. */
smp_wmb();
} else
wc->kqueue[head] = *entry;
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = IB_CQ_NONE;
cq->triggered++;
/*
* This will cause send_complete() to be called in
* another thread.
*/
queue_work(qib_cq_wq, &cq->comptask);
}
spin_unlock_irqrestore(&cq->lock, flags);
}
/**
* qib_poll_cq - poll for work completion entries
* @ibcq: the completion queue to poll
* @num_entries: the maximum number of entries to return
* @entry: pointer to array where work completions are placed
*
* Returns the number of completion entries polled.
*
* This may be called from interrupt context. Also called by ib_poll_cq()
* in the generic verbs code.
*/
int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
struct qib_cq *cq = to_icq(ibcq);
struct qib_cq_wc *wc;
unsigned long flags;
int npolled;
u32 tail;
/* The kernel can only poll a kernel completion queue */
if (cq->ip) {
npolled = -EINVAL;
goto bail;
}
spin_lock_irqsave(&cq->lock, flags);
wc = cq->queue;
tail = wc->tail;
if (tail > (u32) cq->ibcq.cqe)
tail = (u32) cq->ibcq.cqe;
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
if (tail == wc->head)
break;
/* The kernel doesn't need a RMB since it has the lock. */
*entry = wc->kqueue[tail];
if (tail >= cq->ibcq.cqe)
tail = 0;
else
tail++;
}
wc->tail = tail;
spin_unlock_irqrestore(&cq->lock, flags);
bail:
return npolled;
}
static void send_complete(struct work_struct *work)
{
struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
/*
* The completion handler will most likely rearm the notification
* and poll for all pending entries. If a new completion entry
* is added while we are in this routine, queue_work()
* won't call us again until we return so we check triggered to
* see if we need to call the handler again.
*/
for (;;) {
u8 triggered = cq->triggered;
/*
* IPoIB connected mode assumes the callback is from a
* soft IRQ. We simulate this by blocking "bottom halves".
* See the implementation for ipoib_cm_handle_tx_wc(),
* netif_tx_lock_bh() and netif_tx_lock().
*/
local_bh_disable();
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
local_bh_enable();
if (cq->triggered == triggered)
return;
}
}
/**
* qib_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to
* @entries: the minimum size of the completion queue
* @context: unused by the QLogic_IB driver
* @udata: user data for libibverbs.so
*
* Returns a pointer to the completion queue or negative errno values
* for failure.
*
* Called by ib_create_cq() in the generic verbs code.
*/
struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
int comp_vector, struct ib_ucontext *context,
struct ib_udata *udata)
{
struct qib_ibdev *dev = to_idev(ibdev);
struct qib_cq *cq;
struct qib_cq_wc *wc;
struct ib_cq *ret;
u32 sz;
if (entries < 1 || entries > ib_qib_max_cqes) {
ret = ERR_PTR(-EINVAL);
goto done;
}
/* Allocate the completion queue structure. */
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
ret = ERR_PTR(-ENOMEM);
goto done;
}
/*
* Allocate the completion queue entries and head/tail pointers.
* This is allocated separately so that it can be resized and
* also mapped into user space.
* We need to use vmalloc() in order to support mmap and large
* numbers of entries.
*/
sz = sizeof(*wc);
if (udata && udata->outlen >= sizeof(__u64))
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
else
sz += sizeof(struct ib_wc) * (entries + 1);
wc = vmalloc_user(sz);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
}
/*
* Return the address of the WC as the offset to mmap.
* See qib_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
cq->ip = qib_create_mmap_info(dev, sz, context, wc);
if (!cq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
}
err = ib_copy_to_udata(udata, &cq->ip->offset,
sizeof(cq->ip->offset));
if (err) {
ret = ERR_PTR(err);
goto bail_ip;
}
} else
cq->ip = NULL;
spin_lock(&dev->n_cqs_lock);
if (dev->n_cqs_allocated == ib_qib_max_cqs) {
spin_unlock(&dev->n_cqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
dev->n_cqs_allocated++;
spin_unlock(&dev->n_cqs_lock);
if (cq->ip) {
spin_lock_irq(&dev->pending_lock);
list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
/*
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
* The number of entries should be >= the number requested or return
* an error.
*/
cq->ibcq.cqe = entries;
cq->notify = IB_CQ_NONE;
cq->triggered = 0;
spin_lock_init(&cq->lock);
INIT_WORK(&cq->comptask, send_complete);
wc->head = 0;
wc->tail = 0;
cq->queue = wc;
ret = &cq->ibcq;
goto done;
bail_ip:
kfree(cq->ip);
bail_wc:
vfree(wc);
bail_cq:
kfree(cq);
done:
return ret;
}
/**
* qib_destroy_cq - destroy a completion queue
* @ibcq: the completion queue to destroy.
*
* Returns 0 for success.
*
* Called by ib_destroy_cq() in the generic verbs code.
*/
int qib_destroy_cq(struct ib_cq *ibcq)
{
struct qib_ibdev *dev = to_idev(ibcq->device);
struct qib_cq *cq = to_icq(ibcq);
flush_work(&cq->comptask);
spin_lock(&dev->n_cqs_lock);
dev->n_cqs_allocated--;
spin_unlock(&dev->n_cqs_lock);
if (cq->ip)
kref_put(&cq->ip->ref, qib_release_mmap_info);
else
vfree(cq->queue);
kfree(cq);
return 0;
}
/**
* qib_req_notify_cq - change the notification type for a completion queue
* @ibcq: the completion queue
* @notify_flags: the type of notification to request
*
* Returns 0 for success.
*
* This may be called from interrupt context. Also called by
* ib_req_notify_cq() in the generic verbs code.
*/
int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
{
struct qib_cq *cq = to_icq(ibcq);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&cq->lock, flags);
/*
* Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
* any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
*/
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
cq->queue->head != cq->queue->tail)
ret = 1;
spin_unlock_irqrestore(&cq->lock, flags);
return ret;
}
/**
* qib_resize_cq - change the size of the CQ
* @ibcq: the completion queue
*
* Returns 0 for success.
*/
int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
struct qib_cq *cq = to_icq(ibcq);
struct qib_cq_wc *old_wc;
struct qib_cq_wc *wc;
u32 head, tail, n;
int ret;
u32 sz;
if (cqe < 1 || cqe > ib_qib_max_cqes) {
ret = -EINVAL;
goto bail;
}
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
sz = sizeof(*wc);
if (udata && udata->outlen >= sizeof(__u64))
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
else
sz += sizeof(struct ib_wc) * (cqe + 1);
wc = vmalloc_user(sz);
if (!wc) {
ret = -ENOMEM;
goto bail;
}
/* Check that we can write the offset to mmap. */
if (udata && udata->outlen >= sizeof(__u64)) {
__u64 offset = 0;
ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (ret)
goto bail_free;
}
spin_lock_irq(&cq->lock);
/*
* Make sure head and tail are sane since they
* might be user writable.
*/
old_wc = cq->queue;
head = old_wc->head;
if (head > (u32) cq->ibcq.cqe)
head = (u32) cq->ibcq.cqe;
tail = old_wc->tail;
if (tail > (u32) cq->ibcq.cqe)
tail = (u32) cq->ibcq.cqe;
if (head < tail)
n = cq->ibcq.cqe + 1 + head - tail;
else
n = head - tail;
if (unlikely((u32)cqe < n)) {
ret = -EINVAL;
goto bail_unlock;
}
for (n = 0; tail != head; n++) {
if (cq->ip)
wc->uqueue[n] = old_wc->uqueue[tail];
else
wc->kqueue[n] = old_wc->kqueue[tail];
if (tail == (u32) cq->ibcq.cqe)
tail = 0;
else
tail++;
}
cq->ibcq.cqe = cqe;
wc->head = n;
wc->tail = 0;
cq->queue = wc;
spin_unlock_irq(&cq->lock);
vfree(old_wc);
if (cq->ip) {
struct qib_ibdev *dev = to_idev(ibcq->device);
struct qib_mmap_info *ip = cq->ip;
qib_update_mmap_info(dev, ip, sz, wc);
/*
* Return the offset to mmap.
* See qib_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
goto bail;
}
spin_lock_irq(&dev->pending_lock);
if (list_empty(&ip->pending_mmaps))
list_add(&ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
ret = 0;
goto bail;
bail_unlock:
spin_unlock_irq(&cq->lock);
bail_free:
vfree(wc);
bail:
return ret;
}

View File

@ -0,0 +1,894 @@
/*
* Copyright (c) 2010 QLogic Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file contains support for diagnostic functions. It is accessed by
* opening the qib_diag device, normally minor number 129. Diagnostic use
* of the QLogic_IB chip may render the chip or board unusable until the
* driver is unloaded, or in some cases, until the system is rebooted.
*
* Accesses to the chip through this interface are not similar to going
* through the /sys/bus/pci resource mmap interface.
*/
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include "qib.h"
#include "qib_common.h"
/*
* Each client that opens the diag device must read then write
* offset 0, to prevent lossage from random cat or od. diag_state
* sequences this "handshake".
*/
enum diag_state { UNUSED = 0, OPENED, INIT, READY };
/* State for an individual client. PID so children cannot abuse handshake */
static struct qib_diag_client {
struct qib_diag_client *next;
struct qib_devdata *dd;
pid_t pid;
enum diag_state state;
} *client_pool;
/*
* Get a client struct. Recycled if possible, else kmalloc.
* Must be called with qib_mutex held
*/
static struct qib_diag_client *get_client(struct qib_devdata *dd)
{
struct qib_diag_client *dc;
dc = client_pool;
if (dc)
/* got from pool remove it and use */
client_pool = dc->next;
else
/* None in pool, alloc and init */
dc = kmalloc(sizeof *dc, GFP_KERNEL);
if (dc) {
dc->next = NULL;
dc->dd = dd;
dc->pid = current->pid;
dc->state = OPENED;
}
return dc;
}
/*
* Return to pool. Must be called with qib_mutex held
*/
static void return_client(struct qib_diag_client *dc)
{
struct qib_devdata *dd = dc->dd;
struct qib_diag_client *tdc, *rdc;
rdc = NULL;
if (dc == dd->diag_client) {
dd->diag_client = dc->next;
rdc = dc;
} else {
tdc = dc->dd->diag_client;
while (tdc) {
if (dc == tdc->next) {
tdc->next = dc->next;
rdc = dc;
break;
}
tdc = tdc->next;
}
}
if (rdc) {
rdc->state = UNUSED;
rdc->dd = NULL;
rdc->pid = 0;
rdc->next = client_pool;
client_pool = rdc;
}
}
static int qib_diag_open(struct inode *in, struct file *fp);
static int qib_diag_release(struct inode *in, struct file *fp);
static ssize_t qib_diag_read(struct file *fp, char __user *data,
size_t count, loff_t *off);
static ssize_t qib_diag_write(struct file *fp, const char __user *data,
size_t count, loff_t *off);
static const struct file_operations diag_file_ops = {
.owner = THIS_MODULE,
.write = qib_diag_write,
.read = qib_diag_read,
.open = qib_diag_open,
.release = qib_diag_release
};
static atomic_t diagpkt_count = ATOMIC_INIT(0);
static struct cdev *diagpkt_cdev;
static struct device *diagpkt_device;
static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
size_t count, loff_t *off);
static const struct file_operations diagpkt_file_ops = {
.owner = THIS_MODULE,
.write = qib_diagpkt_write,
};
int qib_diag_add(struct qib_devdata *dd)
{
char name[16];
int ret = 0;
if (atomic_inc_return(&diagpkt_count) == 1) {
ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
&diagpkt_file_ops, &diagpkt_cdev,
&diagpkt_device);
if (ret)
goto done;
}
snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
&diag_file_ops, &dd->diag_cdev,
&dd->diag_device);
done:
return ret;
}
static void qib_unregister_observers(struct qib_devdata *dd);
void qib_diag_remove(struct qib_devdata *dd)
{
struct qib_diag_client *dc;
if (atomic_dec_and_test(&diagpkt_count))
qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
/*
* Return all diag_clients of this device. There should be none,
* as we are "guaranteed" that no clients are still open
*/
while (dd->diag_client)
return_client(dd->diag_client);
/* Now clean up all unused client structs */
while (client_pool) {
dc = client_pool;
client_pool = dc->next;
kfree(dc);
}
/* Clean up observer list */
qib_unregister_observers(dd);
}
/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
*
* @dd: the qlogic_ib device
* @offs: the offset in chip-space
* @cntp: Pointer to max (byte) count for transfer starting at offset
* This returns a u32 __iomem * so it can be used for both 64 and 32-bit
* mapping. It is needed because with the use of PAT for control of
* write-combining, the logically contiguous address-space of the chip
* may be split into virtually non-contiguous spaces, with different
* attributes, which are them mapped to contiguous physical space
* based from the first BAR.
*
* The code below makes the same assumptions as were made in
* init_chip_wc_pat() (qib_init.c), copied here:
* Assumes chip address space looks like:
* - kregs + sregs + cregs + uregs (in any order)
* - piobufs (2K and 4K bufs in either order)
* or:
* - kregs + sregs + cregs (in any order)
* - piobufs (2K and 4K bufs in either order)
* - uregs
*
* If cntp is non-NULL, returns how many bytes from offset can be accessed
* Returns 0 if the offset is not mapped.
*/
static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
u32 *cntp)
{
u32 kreglen;
u32 snd_bottom, snd_lim = 0;
u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
u32 __iomem *map = NULL;
u32 cnt = 0;
/* First, simplest case, offset is within the first map. */
kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
if (offset < kreglen) {
map = krb32 + (offset / sizeof(u32));
cnt = kreglen - offset;
goto mapped;
}
/*
* Next check for user regs, the next most common case,
* and a cheap check because if they are not in the first map
* they are last in chip.
*/
if (dd->userbase) {
/* If user regs mapped, they are after send, so set limit. */
u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
snd_lim = dd->uregbase;
krb32 = (u32 __iomem *)dd->userbase;
if (offset >= dd->uregbase && offset < ulim) {
map = krb32 + (offset - dd->uregbase) / sizeof(u32);
cnt = ulim - offset;
goto mapped;
}
}
/*
* Lastly, check for offset within Send Buffers.
* This is gnarly because struct devdata is deliberately vague
* about things like 7322 VL15 buffers, and we are not in
* chip-specific code here, so should not make many assumptions.
* The one we _do_ make is that the only chip that has more sndbufs
* than we admit is the 7322, and it has userregs above that, so
* we know the snd_lim.
*/
/* Assume 2K buffers are first. */
snd_bottom = dd->pio2k_bufbase;
if (snd_lim == 0) {
u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
snd_lim = snd_bottom + tot2k;
}
/* If 4k buffers exist, account for them by bumping
* appropriate limit.
*/
if (dd->piobcnt4k) {
u32 tot4k = dd->piobcnt4k * dd->align4k;
u32 offs4k = dd->piobufbase >> 32;
if (snd_bottom > offs4k)
snd_bottom = offs4k;
else {
/* 4k above 2k. Bump snd_lim, if needed*/
if (!dd->userbase)
snd_lim = offs4k + tot4k;
}
}
/*
* Judgement call: can we ignore the space between SendBuffs and
* UserRegs, where we would like to see vl15 buffs, but not more?
*/
if (offset >= snd_bottom && offset < snd_lim) {
offset -= snd_bottom;
map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
cnt = snd_lim - offset;
}
mapped:
if (cntp)
*cntp = cnt;
return map;
}
/*
* qib_read_umem64 - read a 64-bit quantity from the chip into user space
* @dd: the qlogic_ib device
* @uaddr: the location to store the data in user memory
* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
* @count: number of bytes to copy (multiple of 32 bits)
*
* This function also localizes all chip memory accesses.
* The copy should be written such that we read full cacheline packets
* from the chip. This is usually used for a single qword
*
* NOTE: This assumes the chip address is 64-bit aligned.
*/
static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
u32 regoffs, size_t count)
{
const u64 __iomem *reg_addr;
const u64 __iomem *reg_end;
u32 limit;
int ret;
reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
ret = -EINVAL;
goto bail;
}
if (count >= limit)
count = limit;
reg_end = reg_addr + (count / sizeof(u64));
/* not very efficient, but it works for now */
while (reg_addr < reg_end) {
u64 data = readq(reg_addr);
if (copy_to_user(uaddr, &data, sizeof(u64))) {
ret = -EFAULT;
goto bail;
}
reg_addr++;
uaddr += sizeof(u64);
}
ret = 0;
bail:
return ret;
}
/*
* qib_write_umem64 - write a 64-bit quantity to the chip from user space
* @dd: the qlogic_ib device
* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
* @uaddr: the source of the data in user memory
* @count: the number of bytes to copy (multiple of 32 bits)
*
* This is usually used for a single qword
* NOTE: This assumes the chip address is 64-bit aligned.
*/
static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
const void __user *uaddr, size_t count)
{
u64 __iomem *reg_addr;
const u64 __iomem *reg_end;
u32 limit;
int ret;
reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
ret = -EINVAL;
goto bail;
}
if (count >= limit)
count = limit;
reg_end = reg_addr + (count / sizeof(u64));
/* not very efficient, but it works for now */
while (reg_addr < reg_end) {
u64 data;
if (copy_from_user(&data, uaddr, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
writeq(data, reg_addr);
reg_addr++;
uaddr += sizeof(u64);
}
ret = 0;
bail:
return ret;
}
/*
* qib_read_umem32 - read a 32-bit quantity from the chip into user space
* @dd: the qlogic_ib device
* @uaddr: the location to store the data in user memory
* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
* @count: number of bytes to copy
*
* read 32 bit values, not 64 bit; for memories that only
* support 32 bit reads; usually a single dword.
*/
static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
u32 regoffs, size_t count)
{
const u32 __iomem *reg_addr;
const u32 __iomem *reg_end;
u32 limit;
int ret;
reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
ret = -EINVAL;
goto bail;
}
if (count >= limit)
count = limit;
reg_end = reg_addr + (count / sizeof(u32));
/* not very efficient, but it works for now */
while (reg_addr < reg_end) {
u32 data = readl(reg_addr);
if (copy_to_user(uaddr, &data, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
reg_addr++;
uaddr += sizeof(u32);
}
ret = 0;
bail:
return ret;
}
/*
* qib_write_umem32 - write a 32-bit quantity to the chip from user space
* @dd: the qlogic_ib device
* @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
* @uaddr: the source of the data in user memory
* @count: number of bytes to copy
*
* write 32 bit values, not 64 bit; for memories that only
* support 32 bit write; usually a single dword.
*/
static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
const void __user *uaddr, size_t count)
{
u32 __iomem *reg_addr;
const u32 __iomem *reg_end;
u32 limit;
int ret;
reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
ret = -EINVAL;
goto bail;
}
if (count >= limit)
count = limit;
reg_end = reg_addr + (count / sizeof(u32));
while (reg_addr < reg_end) {
u32 data;
if (copy_from_user(&data, uaddr, sizeof(data))) {
ret = -EFAULT;
goto bail;
}
writel(data, reg_addr);
reg_addr++;
uaddr += sizeof(u32);
}
ret = 0;
bail:
return ret;
}
static int qib_diag_open(struct inode *in, struct file *fp)
{
int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
struct qib_devdata *dd;
struct qib_diag_client *dc;
int ret;
mutex_lock(&qib_mutex);
dd = qib_lookup(unit);
if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
!dd->kregbase) {
ret = -ENODEV;
goto bail;
}
dc = get_client(dd);
if (!dc) {
ret = -ENOMEM;
goto bail;
}
dc->next = dd->diag_client;
dd->diag_client = dc;
fp->private_data = dc;
ret = 0;
bail:
mutex_unlock(&qib_mutex);
return ret;
}
/**
* qib_diagpkt_write - write an IB packet
* @fp: the diag data device file pointer
* @data: qib_diag_pkt structure saying where to get the packet
* @count: size of data to write
* @off: unused by this code
*/
static ssize_t qib_diagpkt_write(struct file *fp,
const char __user *data,
size_t count, loff_t *off)
{
u32 __iomem *piobuf;
u32 plen, clen, pbufn;
struct qib_diag_xpkt dp;
u32 *tmpbuf = NULL;
struct qib_devdata *dd;
struct qib_pportdata *ppd;
ssize_t ret = 0;
if (count != sizeof(dp)) {
ret = -EINVAL;
goto bail;
}
if (copy_from_user(&dp, data, sizeof(dp))) {
ret = -EFAULT;
goto bail;
}
dd = qib_lookup(dp.unit);
if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
ret = -ENODEV;
goto bail;
}
if (!(dd->flags & QIB_INITTED)) {
/* no hardware, freeze, etc. */
ret = -ENODEV;
goto bail;
}
if (dp.version != _DIAG_XPKT_VERS) {
qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
dp.version);
ret = -EINVAL;
goto bail;
}
/* send count must be an exact number of dwords */
if (dp.len & 3) {
ret = -EINVAL;
goto bail;
}
if (!dp.port || dp.port > dd->num_pports) {
ret = -EINVAL;
goto bail;
}
ppd = &dd->pport[dp.port - 1];
/* need total length before first word written */
/* +1 word is for the qword padding */
plen = sizeof(u32) + dp.len;
clen = dp.len >> 2;
if ((plen + 4) > ppd->ibmaxlen) {
ret = -EINVAL;
goto bail; /* before writing pbc */
}
tmpbuf = vmalloc(plen);
if (!tmpbuf) {
qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
"failing\n");
ret = -ENOMEM;
goto bail;
}
if (copy_from_user(tmpbuf,
(const void __user *) (unsigned long) dp.data,
dp.len)) {
ret = -EFAULT;
goto bail;
}
plen >>= 2; /* in dwords */
if (dp.pbc_wd == 0)
dp.pbc_wd = plen;
piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
if (!piobuf) {
ret = -EBUSY;
goto bail;
}
/* disarm it just to be extra sure */
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
/* disable header check on pbufn for this packet */
dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
writeq(dp.pbc_wd, piobuf);
/*
* Copy all but the trigger word, then flush, so it's written
* to chip before trigger word, then write trigger word, then
* flush again, so packet is sent.
*/
if (dd->flags & QIB_PIO_FLUSH_WC) {
qib_flush_wc();
qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
qib_flush_wc();
__raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
} else
qib_pio_copy(piobuf + 2, tmpbuf, clen);
if (dd->flags & QIB_USE_SPCL_TRIG) {
u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
qib_flush_wc();
__raw_writel(0xaebecede, piobuf + spcl_off);
}
/*
* Ensure buffer is written to the chip, then re-enable
* header checks (if supported by chip). The txchk
* code will ensure seen by chip before returning.
*/
qib_flush_wc();
qib_sendbuf_done(dd, pbufn);
dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
ret = sizeof(dp);
bail:
vfree(tmpbuf);
return ret;
}
static int qib_diag_release(struct inode *in, struct file *fp)
{
mutex_lock(&qib_mutex);
return_client(fp->private_data);
fp->private_data = NULL;
mutex_unlock(&qib_mutex);
return 0;
}
/*
* Chip-specific code calls to register its interest in
* a specific range.
*/
struct diag_observer_list_elt {
struct diag_observer_list_elt *next;
const struct diag_observer *op;
};
int qib_register_observer(struct qib_devdata *dd,
const struct diag_observer *op)
{
struct diag_observer_list_elt *olp;
int ret = -EINVAL;
if (!dd || !op)
goto bail;
ret = -ENOMEM;
olp = vmalloc(sizeof *olp);
if (!olp) {
printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
goto bail;
}
if (olp) {
unsigned long flags;
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
olp->op = op;
olp->next = dd->diag_observer_list;
dd->diag_observer_list = olp;
spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
ret = 0;
}
bail:
return ret;
}
/* Remove all registered observers when device is closed */
static void qib_unregister_observers(struct qib_devdata *dd)
{
struct diag_observer_list_elt *olp;
unsigned long flags;
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
olp = dd->diag_observer_list;
while (olp) {
/* Pop one observer, let go of lock */
dd->diag_observer_list = olp->next;
spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
vfree(olp);
/* try again. */
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
olp = dd->diag_observer_list;
}
spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
}
/*
* Find the observer, if any, for the specified address. Initial implementation
* is simple stack of observers. This must be called with diag transaction
* lock held.
*/
static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
u32 addr)
{
struct diag_observer_list_elt *olp;
const struct diag_observer *op = NULL;
olp = dd->diag_observer_list;
while (olp) {
op = olp->op;
if (addr >= op->bottom && addr <= op->top)
break;
olp = olp->next;
}
if (!olp)
op = NULL;
return op;
}
static ssize_t qib_diag_read(struct file *fp, char __user *data,
size_t count, loff_t *off)
{
struct qib_diag_client *dc = fp->private_data;
struct qib_devdata *dd = dc->dd;
void __iomem *kreg_base;
ssize_t ret;
if (dc->pid != current->pid) {
ret = -EPERM;
goto bail;
}
kreg_base = dd->kregbase;
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if (dc->state < READY && (*off || count != 8))
ret = -EINVAL; /* prevent cat /dev/qib_diag* */
else {
unsigned long flags;
u64 data64 = 0;
int use_32;
const struct diag_observer *op;
use_32 = (count % 8) || (*off % 8);
ret = -1;
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
/*
* Check for observer on this address range.
* we only support a single 32 or 64-bit read
* via observer, currently.
*/
op = diag_get_observer(dd, *off);
if (op) {
u32 offset = *off;
ret = op->hook(dd, op, offset, &data64, 0, use_32);
}
/*
* We need to release lock before any copy_to_user(),
* whether implicit in qib_read_umem* or explicit below.
*/
spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
if (!op) {
if (use_32)
/*
* Address or length is not 64-bit aligned;
* do 32-bit rd
*/
ret = qib_read_umem32(dd, data, (u32) *off,
count);
else
ret = qib_read_umem64(dd, data, (u32) *off,
count);
} else if (ret == count) {
/* Below finishes case where observer existed */
ret = copy_to_user(data, &data64, use_32 ?
sizeof(u32) : sizeof(u64));
if (ret)
ret = -EFAULT;
}
}
if (ret >= 0) {
*off += count;
ret = count;
if (dc->state == OPENED)
dc->state = INIT;
}
bail:
return ret;
}
static ssize_t qib_diag_write(struct file *fp, const char __user *data,
size_t count, loff_t *off)
{
struct qib_diag_client *dc = fp->private_data;
struct qib_devdata *dd = dc->dd;
void __iomem *kreg_base;
ssize_t ret;
if (dc->pid != current->pid) {
ret = -EPERM;
goto bail;
}
kreg_base = dd->kregbase;
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if (dc->state < READY &&
((*off || count != 8) || dc->state != INIT))
/* No writes except second-step of init seq */
ret = -EINVAL; /* before any other write allowed */
else {
unsigned long flags;
const struct diag_observer *op = NULL;
int use_32 = (count % 8) || (*off % 8);
/*
* Check for observer on this address range.
* We only support a single 32 or 64-bit write
* via observer, currently. This helps, because
* we would otherwise have to jump through hoops
* to make "diag transaction" meaningful when we
* cannot do a copy_from_user while holding the lock.
*/
if (count == 4 || count == 8) {
u64 data64;
u32 offset = *off;
ret = copy_from_user(&data64, data, count);
if (ret) {
ret = -EFAULT;
goto bail;
}
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
op = diag_get_observer(dd, *off);
if (op)
ret = op->hook(dd, op, offset, &data64, ~0Ull,
use_32);
spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
}
if (!op) {
if (use_32)
/*
* Address or length is not 64-bit aligned;
* do 32-bit write
*/
ret = qib_write_umem32(dd, (u32) *off, data,
count);
else
ret = qib_write_umem64(dd, (u32) *off, data,
count);
}
}
if (ret >= 0) {
*off += count;
ret = count;
if (dc->state == INIT)
dc->state = READY; /* all read/write OK now */
}
bail:
return ret;
}

View File

@ -0,0 +1,182 @@
/*
* Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/types.h>
#include <linux/scatterlist.h>
#include "qib_verbs.h"
#define BAD_DMA_ADDRESS ((u64) 0)
/*
* The following functions implement driver specific replacements
* for the ib_dma_*() functions.
*
* These functions return kernel virtual addresses instead of
* device bus addresses since the driver uses the CPU to copy
* data instead of using hardware DMA.
*/
static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
{
return dma_addr == BAD_DMA_ADDRESS;
}
static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
size_t size, enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
return (u64) cpu_addr;
}
static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
u64 addr;
BUG_ON(!valid_dma_direction(direction));
if (offset + size > PAGE_SIZE) {
addr = BAD_DMA_ADDRESS;
goto done;
}
addr = (u64) page_address(page);
if (addr)
addr += offset;
/* TODO: handle highmem pages */
done:
return addr;
}
static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction)
{
struct scatterlist *sg;
u64 addr;
int i;
int ret = nents;
BUG_ON(!valid_dma_direction(direction));
for_each_sg(sgl, sg, nents, i) {
addr = (u64) page_address(sg_page(sg));
/* TODO: handle highmem pages */
if (!addr) {
ret = 0;
break;
}
}
return ret;
}
static void qib_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
{
u64 addr = (u64) page_address(sg_page(sg));
if (addr)
addr += sg->offset;
return addr;
}
static unsigned int qib_sg_dma_len(struct ib_device *dev,
struct scatterlist *sg)
{
return sg->length;
}
static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir)
{
}
static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
size_t size,
enum dma_data_direction dir)
{
}
static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
u64 *dma_handle, gfp_t flag)
{
struct page *p;
void *addr = NULL;
p = alloc_pages(flag, get_order(size));
if (p)
addr = page_address(p);
if (dma_handle)
*dma_handle = (u64) addr;
return addr;
}
static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, u64 dma_handle)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
struct ib_dma_mapping_ops qib_dma_mapping_ops = {
.mapping_error = qib_mapping_error,
.map_single = qib_dma_map_single,
.unmap_single = qib_dma_unmap_single,
.map_page = qib_dma_map_page,
.unmap_page = qib_dma_unmap_page,
.map_sg = qib_map_sg,
.unmap_sg = qib_unmap_sg,
.dma_address = qib_sg_dma_address,
.dma_len = qib_sg_dma_len,
.sync_single_for_cpu = qib_sync_single_for_cpu,
.sync_single_for_device = qib_sync_single_for_device,
.alloc_coherent = qib_dma_alloc_coherent,
.free_coherent = qib_dma_free_coherent
};

View File

@ -0,0 +1,665 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "qib.h"
/*
* The size has to be longer than this string, so we can append
* board/chip information to it in the init code.
*/
const char ib_qib_version[] = QIB_IDSTR "\n";
DEFINE_SPINLOCK(qib_devs_lock);
LIST_HEAD(qib_dev_list);
DEFINE_MUTEX(qib_mutex); /* general driver use */
unsigned qib_ibmtu;
module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
unsigned qib_compat_ddr_negotiate = 1;
module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(compat_ddr_negotiate,
"Attempt pre-IBTA 1.2 DDR speed negotiation");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("QLogic <support@qlogic.com>");
MODULE_DESCRIPTION("QLogic IB driver");
/*
* QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
* PIO send buffers. This is well beyond anything currently
* defined in the InfiniBand spec.
*/
#define QIB_PIO_MAXIBHDR 128
struct qlogic_ib_stats qib_stats;
const char *qib_get_unit_name(int unit)
{
static char iname[16];
snprintf(iname, sizeof iname, "infinipath%u", unit);
return iname;
}
/*
* Return count of units with at least one port ACTIVE.
*/
int qib_count_active_units(void)
{
struct qib_devdata *dd;
struct qib_pportdata *ppd;
unsigned long flags;
int pidx, nunits_active = 0;
spin_lock_irqsave(&qib_devs_lock, flags);
list_for_each_entry(dd, &qib_dev_list, list) {
if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
continue;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
QIBL_LINKARMED | QIBL_LINKACTIVE))) {
nunits_active++;
break;
}
}
}
spin_unlock_irqrestore(&qib_devs_lock, flags);
return nunits_active;
}
/*
* Return count of all units, optionally return in arguments
* the number of usable (present) units, and the number of
* ports that are up.
*/
int qib_count_units(int *npresentp, int *nupp)
{
int nunits = 0, npresent = 0, nup = 0;
struct qib_devdata *dd;
unsigned long flags;
int pidx;
struct qib_pportdata *ppd;
spin_lock_irqsave(&qib_devs_lock, flags);
list_for_each_entry(dd, &qib_dev_list, list) {
nunits++;
if ((dd->flags & QIB_PRESENT) && dd->kregbase)
npresent++;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
QIBL_LINKARMED | QIBL_LINKACTIVE)))
nup++;
}
}
spin_unlock_irqrestore(&qib_devs_lock, flags);
if (npresentp)
*npresentp = npresent;
if (nupp)
*nupp = nup;
return nunits;
}
/**
* qib_wait_linkstate - wait for an IB link state change to occur
* @dd: the qlogic_ib device
* @state: the state to wait for
* @msecs: the number of milliseconds to wait
*
* wait up to msecs milliseconds for IB link state change to occur for
* now, take the easy polling route. Currently used only by
* qib_set_linkstate. Returns 0 if state reached, otherwise
* -ETIMEDOUT state can have multiple states set, for any of several
* transitions.
*/
int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&ppd->lflags_lock, flags);
if (ppd->state_wanted) {
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
ret = -EBUSY;
goto bail;
}
ppd->state_wanted = state;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wait_event_interruptible_timeout(ppd->state_wait,
(ppd->lflags & state),
msecs_to_jiffies(msecs));
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->state_wanted = 0;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
if (!(ppd->lflags & state))
ret = -ETIMEDOUT;
else
ret = 0;
bail:
return ret;
}
int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
{
u32 lstate;
int ret;
struct qib_devdata *dd = ppd->dd;
unsigned long flags;
switch (newstate) {
case QIB_IB_LINKDOWN_ONLY:
dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
/* don't wait */
ret = 0;
goto bail;
case QIB_IB_LINKDOWN:
dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
/* don't wait */
ret = 0;
goto bail;
case QIB_IB_LINKDOWN_SLEEP:
dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
/* don't wait */
ret = 0;
goto bail;
case QIB_IB_LINKDOWN_DISABLE:
dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
/* don't wait */
ret = 0;
goto bail;
case QIB_IB_LINKARM:
if (ppd->lflags & QIBL_LINKARMED) {
ret = 0;
goto bail;
}
if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
ret = -EINVAL;
goto bail;
}
/*
* Since the port can be ACTIVE when we ask for ARMED,
* clear QIBL_LINKV so we can wait for a transition.
* If the link isn't ARMED, then something else happened
* and there is no point waiting for ARMED.
*/
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_LINKV;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
lstate = QIBL_LINKV;
break;
case QIB_IB_LINKACTIVE:
if (ppd->lflags & QIBL_LINKACTIVE) {
ret = 0;
goto bail;
}
if (!(ppd->lflags & QIBL_LINKARMED)) {
ret = -EINVAL;
goto bail;
}
dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
lstate = QIBL_LINKACTIVE;
break;
default:
ret = -EINVAL;
goto bail;
}
ret = qib_wait_linkstate(ppd, lstate, 10);
bail:
return ret;
}
/*
* Get address of eager buffer from it's index (allocated in chunks, not
* contiguous).
*/
static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
{
const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
const u32 idx = etail % rcd->rcvegrbufs_perchunk;
return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
}
/*
* Returns 1 if error was a CRC, else 0.
* Needed for some chip's synthesized error counters.
*/
static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
struct qib_message_header *hdr)
{
u32 ret = 0;
if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
ret = 1;
return ret;
}
/*
* qib_kreceive - receive a packet
* @rcd: the qlogic_ib context
* @llic: gets count of good packets needed to clear lli,
* (used with chips that need need to track crcs for lli)
*
* called from interrupt handler for errors or receive interrupt
* Returns number of CRC error packets, needed by some chips for
* local link integrity tracking. crcs are adjusted down by following
* good packets, if any, and count of good packets is also tracked.
*/
u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
{
struct qib_devdata *dd = rcd->dd;
struct qib_pportdata *ppd = rcd->ppd;
__le32 *rhf_addr;
void *ebuf;
const u32 rsize = dd->rcvhdrentsize; /* words */
const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
u32 etail = -1, l, hdrqtail;
struct qib_message_header *hdr;
u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
int last;
u64 lval;
struct qib_qp *qp, *nqp;
l = rcd->head;
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
if (dd->flags & QIB_NODMA_RTAIL) {
u32 seq = qib_hdrget_seq(rhf_addr);
if (seq != rcd->seq_cnt)
goto bail;
hdrqtail = 0;
} else {
hdrqtail = qib_get_rcvhdrtail(rcd);
if (l == hdrqtail)
goto bail;
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
}
for (last = 0, i = 1; !last; i += !last) {
hdr = dd->f_get_msgheader(dd, rhf_addr);
eflags = qib_hdrget_err_flags(rhf_addr);
etype = qib_hdrget_rcv_type(rhf_addr);
/* total length */
tlen = qib_hdrget_length_in_bytes(rhf_addr);
ebuf = NULL;
if ((dd->flags & QIB_NODMA_RTAIL) ?
qib_hdrget_use_egr_buf(rhf_addr) :
(etype != RCVHQ_RCV_TYPE_EXPECTED)) {
etail = qib_hdrget_index(rhf_addr);
updegr = 1;
if (tlen > sizeof(*hdr) ||
etype >= RCVHQ_RCV_TYPE_NON_KD)
ebuf = qib_get_egrbuf(rcd, etail);
}
if (!eflags) {
u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
if (lrh_len != tlen) {
qib_stats.sps_lenerrs++;
goto move_along;
}
}
if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
ebuf == NULL &&
tlen > (dd->rcvhdrentsize - 2 + 1 -
qib_hdrget_offset(rhf_addr)) << 2) {
goto move_along;
}
/*
* Both tiderr and qibhdrerr are set for all plain IB
* packets; only qibhdrerr should be set.
*/
if (unlikely(eflags))
crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
etail, rhf_addr, hdr);
else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
qib_ib_rcv(rcd, hdr, ebuf, tlen);
if (crcs)
crcs--;
else if (llic && *llic)
--*llic;
}
move_along:
l += rsize;
if (l >= maxcnt)
l = 0;
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
if (dd->flags & QIB_NODMA_RTAIL) {
u32 seq = qib_hdrget_seq(rhf_addr);
if (++rcd->seq_cnt > 13)
rcd->seq_cnt = 1;
if (seq != rcd->seq_cnt)
last = 1;
} else if (l == hdrqtail)
last = 1;
/*
* Update head regs etc., every 16 packets, if not last pkt,
* to help prevent rcvhdrq overflows, when many packets
* are processed and queue is nearly full.
* Don't request an interrupt for intermediate updates.
*/
lval = l;
if (!last && !(i & 0xf)) {
dd->f_update_usrhead(rcd, lval, updegr, etail);
updegr = 0;
}
}
rcd->head = l;
rcd->pkt_count += i;
/*
* Iterate over all QPs waiting to respond.
* The list won't change since the IRQ is only run on one CPU.
*/
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait);
if (qp->r_flags & QIB_R_RSP_NAK) {
qp->r_flags &= ~QIB_R_RSP_NAK;
qib_send_rc_ack(qp);
}
if (qp->r_flags & QIB_R_RSP_SEND) {
unsigned long flags;
qp->r_flags &= ~QIB_R_RSP_SEND;
spin_lock_irqsave(&qp->s_lock, flags);
if (ib_qib_state_ops[qp->state] &
QIB_PROCESS_OR_FLUSH_SEND)
qib_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
bail:
/* Report number of packets consumed */
if (npkts)
*npkts = i;
/*
* Always write head at end, and setup rcv interrupt, even
* if no packets were processed.
*/
lval = (u64)rcd->head | dd->rhdrhead_intr_off;
dd->f_update_usrhead(rcd, lval, updegr, etail);
return crcs;
}
/**
* qib_set_mtu - set the MTU
* @ppd: the perport data
* @arg: the new MTU
*
* We can handle "any" incoming size, the issue here is whether we
* need to restrict our outgoing size. For now, we don't do any
* sanity checking on this, and we don't deal with what happens to
* programs that are already running when the size changes.
* NOTE: changing the MTU will usually cause the IBC to go back to
* link INIT state...
*/
int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
{
u32 piosize;
int ret, chk;
if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
arg != 4096) {
ret = -EINVAL;
goto bail;
}
chk = ib_mtu_enum_to_int(qib_ibmtu);
if (chk > 0 && arg > chk) {
ret = -EINVAL;
goto bail;
}
piosize = ppd->ibmaxlen;
ppd->ibmtu = arg;
if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
/* Only if it's not the initial value (or reset to it) */
if (piosize != ppd->init_ibmaxlen) {
if (arg > piosize && arg <= ppd->init_ibmaxlen)
piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
ppd->ibmaxlen = piosize;
}
} else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
ppd->ibmaxlen = piosize;
}
ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
ret = 0;
bail:
return ret;
}
int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
{
struct qib_devdata *dd = ppd->dd;
ppd->lid = lid;
ppd->lmc = lmc;
dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
lid | (~((1U << lmc) - 1)) << 16);
qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
dd->unit, ppd->port, lid);
return 0;
}
/*
* Following deal with the "obviously simple" task of overriding the state
* of the LEDS, which normally indicate link physical and logical status.
* The complications arise in dealing with different hardware mappings
* and the board-dependent routine being called from interrupts.
* and then there's the requirement to _flash_ them.
*/
#define LED_OVER_FREQ_SHIFT 8
#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
/* Below is "non-zero" to force override, but both actual LEDs are off */
#define LED_OVER_BOTH_OFF (8)
static void qib_run_led_override(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
struct qib_devdata *dd = ppd->dd;
int timeoff;
int ph_idx;
if (!(dd->flags & QIB_INITTED))
return;
ph_idx = ppd->led_override_phase++ & 1;
ppd->led_override = ppd->led_override_vals[ph_idx];
timeoff = ppd->led_override_timeoff;
dd->f_setextled(ppd, 1);
/*
* don't re-fire the timer if user asked for it to be off; we let
* it fire one more time after they turn it off to simplify
*/
if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
mod_timer(&ppd->led_override_timer, jiffies + timeoff);
}
void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
{
struct qib_devdata *dd = ppd->dd;
int timeoff, freq;
if (!(dd->flags & QIB_INITTED))
return;
/* First check if we are blinking. If not, use 1HZ polling */
timeoff = HZ;
freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
if (freq) {
/* For blink, set each phase from one nybble of val */
ppd->led_override_vals[0] = val & 0xF;
ppd->led_override_vals[1] = (val >> 4) & 0xF;
timeoff = (HZ << 4)/freq;
} else {
/* Non-blink set both phases the same. */
ppd->led_override_vals[0] = val & 0xF;
ppd->led_override_vals[1] = val & 0xF;
}
ppd->led_override_timeoff = timeoff;
/*
* If the timer has not already been started, do so. Use a "quick"
* timeout so the function will be called soon, to look at our request.
*/
if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
/* Need to start timer */
init_timer(&ppd->led_override_timer);
ppd->led_override_timer.function = qib_run_led_override;
ppd->led_override_timer.data = (unsigned long) ppd;
ppd->led_override_timer.expires = jiffies + 1;
add_timer(&ppd->led_override_timer);
} else {
if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
mod_timer(&ppd->led_override_timer, jiffies + 1);
atomic_dec(&ppd->led_override_timer_active);
}
}
/**
* qib_reset_device - reset the chip if possible
* @unit: the device to reset
*
* Whether or not reset is successful, we attempt to re-initialize the chip
* (that is, much like a driver unload/reload). We clear the INITTED flag
* so that the various entry points will fail until we reinitialize. For
* now, we only allow this if no user contexts are open that use chip resources
*/
int qib_reset_device(int unit)
{
int ret, i;
struct qib_devdata *dd = qib_lookup(unit);
struct qib_pportdata *ppd;
unsigned long flags;
int pidx;
if (!dd) {
ret = -ENODEV;
goto bail;
}
qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
qib_devinfo(dd->pcidev, "Invalid unit number %u or "
"not initialized or not present\n", unit);
ret = -ENXIO;
goto bail;
}
spin_lock_irqsave(&dd->uctxt_lock, flags);
if (dd->rcd)
for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
if (!dd->rcd[i] || !dd->rcd[i]->cnt)
continue;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
ret = -EBUSY;
goto bail;
}
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (atomic_read(&ppd->led_override_timer_active)) {
/* Need to stop LED timer, _then_ shut off LEDs */
del_timer_sync(&ppd->led_override_timer);
atomic_set(&ppd->led_override_timer_active, 0);
}
/* Shut off LEDs after we are sure timer is not running */
ppd->led_override = LED_OVER_BOTH_OFF;
dd->f_setextled(ppd, 0);
if (dd->flags & QIB_HAS_SEND_DMA)
qib_teardown_sdma(ppd);
}
ret = dd->f_reset(dd);
if (ret == 1)
ret = qib_init(dd, 1);
else
ret = -EAGAIN;
if (ret)
qib_dev_err(dd, "Reinitialize unit %u after "
"reset failed with %d\n", unit, ret);
else
qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
"resetting\n", unit);
bail:
return ret;
}

View File

@ -0,0 +1,451 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include "qib.h"
/*
* Functions specific to the serial EEPROM on cards handled by ib_qib.
* The actual serail interface code is in qib_twsi.c. This file is a client
*/
/**
* qib_eeprom_read - receives bytes from the eeprom via I2C
* @dd: the qlogic_ib device
* @eeprom_offset: address to read from
* @buffer: where to store result
* @len: number of bytes to receive
*/
int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
void *buff, int len)
{
int ret;
ret = mutex_lock_interruptible(&dd->eep_lock);
if (!ret) {
ret = qib_twsi_reset(dd);
if (ret)
qib_dev_err(dd, "EEPROM Reset for read failed\n");
else
ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
eeprom_offset, buff, len);
mutex_unlock(&dd->eep_lock);
}
return ret;
}
/*
* Actually update the eeprom, first doing write enable if
* needed, then restoring write enable state.
* Must be called with eep_lock held
*/
static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
const void *buf, int len)
{
int ret, pwen;
pwen = dd->f_eeprom_wen(dd, 1);
ret = qib_twsi_reset(dd);
if (ret)
qib_dev_err(dd, "EEPROM Reset for write failed\n");
else
ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
offset, buf, len);
dd->f_eeprom_wen(dd, pwen);
return ret;
}
/**
* qib_eeprom_write - writes data to the eeprom via I2C
* @dd: the qlogic_ib device
* @eeprom_offset: where to place data
* @buffer: data to write
* @len: number of bytes to write
*/
int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
const void *buff, int len)
{
int ret;
ret = mutex_lock_interruptible(&dd->eep_lock);
if (!ret) {
ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
mutex_unlock(&dd->eep_lock);
}
return ret;
}
static u8 flash_csum(struct qib_flash *ifp, int adjust)
{
u8 *ip = (u8 *) ifp;
u8 csum = 0, len;
/*
* Limit length checksummed to max length of actual data.
* Checksum of erased eeprom will still be bad, but we avoid
* reading past the end of the buffer we were passed.
*/
len = ifp->if_length;
if (len > sizeof(struct qib_flash))
len = sizeof(struct qib_flash);
while (len--)
csum += *ip++;
csum -= ifp->if_csum;
csum = ~csum;
if (adjust)
ifp->if_csum = csum;
return csum;
}
/**
* qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
* @dd: the qlogic_ib device
*
* We have the capability to use the nguid field, and get
* the guid from the first chip's flash, to use for all of them.
*/
void qib_get_eeprom_info(struct qib_devdata *dd)
{
void *buf;
struct qib_flash *ifp;
__be64 guid;
int len, eep_stat;
u8 csum, *bguid;
int t = dd->unit;
struct qib_devdata *dd0 = qib_lookup(0);
if (t && dd0->nguid > 1 && t <= dd0->nguid) {
u8 oguid;
dd->base_guid = dd0->base_guid;
bguid = (u8 *) &dd->base_guid;
oguid = bguid[7];
bguid[7] += t;
if (oguid > bguid[7]) {
if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) {
qib_dev_err(dd, "Can't set %s GUID"
" from base, wraps to"
" OUI!\n",
qib_get_unit_name(t));
dd->base_guid = 0;
goto bail;
}
bguid[5]++;
}
bguid[6]++;
}
dd->nguid = 1;
goto bail;
}
/*
* Read full flash, not just currently used part, since it may have
* been written with a newer definition.
* */
len = sizeof(struct qib_flash);
buf = vmalloc(len);
if (!buf) {
qib_dev_err(dd, "Couldn't allocate memory to read %u "
"bytes from eeprom for GUID\n", len);
goto bail;
}
/*
* Use "public" eeprom read function, which does locking and
* figures out device. This will migrate to chip-specific.
*/
eep_stat = qib_eeprom_read(dd, 0, buf, len);
if (eep_stat) {
qib_dev_err(dd, "Failed reading GUID from eeprom\n");
goto done;
}
ifp = (struct qib_flash *)buf;
csum = flash_csum(ifp, 0);
if (csum != ifp->if_csum) {
qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
"0x%x, not 0x%x\n", csum, ifp->if_csum);
goto done;
}
if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
*(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
*(unsigned long long *) ifp->if_guid);
/* don't allow GUID if all 0 or all 1's */
goto done;
}
/* complain, but allow it */
if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
"default, probably not correct!\n",
*(unsigned long long *) ifp->if_guid);
bguid = ifp->if_guid;
if (!bguid[0] && !bguid[1] && !bguid[2]) {
/*
* Original incorrect GUID format in flash; fix in
* core copy, by shifting up 2 octets; don't need to
* change top octet, since both it and shifted are 0.
*/
bguid[1] = bguid[3];
bguid[2] = bguid[4];
bguid[3] = 0;
bguid[4] = 0;
guid = *(__be64 *) ifp->if_guid;
} else
guid = *(__be64 *) ifp->if_guid;
dd->base_guid = guid;
dd->nguid = ifp->if_numguid;
/*
* Things are slightly complicated by the desire to transparently
* support both the Pathscale 10-digit serial number and the QLogic
* 13-character version.
*/
if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
((u8 *) ifp->if_sprefix)[0] != 0xFF) {
char *snp = dd->serial;
/*
* This board has a Serial-prefix, which is stored
* elsewhere for backward-compatibility.
*/
memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
snp[sizeof ifp->if_sprefix] = '\0';
len = strlen(snp);
snp += len;
len = (sizeof dd->serial) - len;
if (len > sizeof ifp->if_serial)
len = sizeof ifp->if_serial;
memcpy(snp, ifp->if_serial, len);
} else
memcpy(dd->serial, ifp->if_serial,
sizeof ifp->if_serial);
if (!strstr(ifp->if_comment, "Tested successfully"))
qib_dev_err(dd, "Board SN %s did not pass functional "
"test: %s\n", dd->serial, ifp->if_comment);
memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
/*
* Power-on (actually "active") hours are kept as little-endian value
* in EEPROM, but as seconds in a (possibly as small as 24-bit)
* atomic_t while running.
*/
atomic_set(&dd->active_time, 0);
dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
done:
vfree(buf);
bail:;
}
/**
* qib_update_eeprom_log - copy active-time and error counters to eeprom
* @dd: the qlogic_ib device
*
* Although the time is kept as seconds in the qib_devdata struct, it is
* rounded to hours for re-write, as we have only 16 bits in EEPROM.
* First-cut code reads whole (expected) struct qib_flash, modifies,
* re-writes. Future direction: read/write only what we need, assuming
* that the EEPROM had to have been "good enough" for driver init, and
* if not, we aren't making it worse.
*
*/
int qib_update_eeprom_log(struct qib_devdata *dd)
{
void *buf;
struct qib_flash *ifp;
int len, hi_water;
uint32_t new_time, new_hrs;
u8 csum;
int ret, idx;
unsigned long flags;
/* first, check if we actually need to do anything. */
ret = 0;
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
if (dd->eep_st_new_errs[idx]) {
ret = 1;
break;
}
}
new_time = atomic_read(&dd->active_time);
if (ret == 0 && new_time < 3600)
goto bail;
/*
* The quick-check above determined that there is something worthy
* of logging, so get current contents and do a more detailed idea.
* read full flash, not just currently used part, since it may have
* been written with a newer definition
*/
len = sizeof(struct qib_flash);
buf = vmalloc(len);
ret = 1;
if (!buf) {
qib_dev_err(dd, "Couldn't allocate memory to read %u "
"bytes from eeprom for logging\n", len);
goto bail;
}
/* Grab semaphore and read current EEPROM. If we get an
* error, let go, but if not, keep it until we finish write.
*/
ret = mutex_lock_interruptible(&dd->eep_lock);
if (ret) {
qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
goto free_bail;
}
ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
if (ret) {
mutex_unlock(&dd->eep_lock);
qib_dev_err(dd, "Unable read EEPROM for logging\n");
goto free_bail;
}
ifp = (struct qib_flash *)buf;
csum = flash_csum(ifp, 0);
if (csum != ifp->if_csum) {
mutex_unlock(&dd->eep_lock);
qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
csum, ifp->if_csum);
ret = 1;
goto free_bail;
}
hi_water = 0;
spin_lock_irqsave(&dd->eep_st_lock, flags);
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
int new_val = dd->eep_st_new_errs[idx];
if (new_val) {
/*
* If we have seen any errors, add to EEPROM values
* We need to saturate at 0xFF (255) and we also
* would need to adjust the checksum if we were
* trying to minimize EEPROM traffic
* Note that we add to actual current count in EEPROM,
* in case it was altered while we were running.
*/
new_val += ifp->if_errcntp[idx];
if (new_val > 0xFF)
new_val = 0xFF;
if (ifp->if_errcntp[idx] != new_val) {
ifp->if_errcntp[idx] = new_val;
hi_water = offsetof(struct qib_flash,
if_errcntp) + idx;
}
/*
* update our shadow (used to minimize EEPROM
* traffic), to match what we are about to write.
*/
dd->eep_st_errs[idx] = new_val;
dd->eep_st_new_errs[idx] = 0;
}
}
/*
* Now update active-time. We would like to round to the nearest hour
* but unless atomic_t are sure to be proper signed ints we cannot,
* because we need to account for what we "transfer" to EEPROM and
* if we log an hour at 31 minutes, then we would need to set
* active_time to -29 to accurately count the _next_ hour.
*/
if (new_time >= 3600) {
new_hrs = new_time / 3600;
atomic_sub((new_hrs * 3600), &dd->active_time);
new_hrs += dd->eep_hrs;
if (new_hrs > 0xFFFF)
new_hrs = 0xFFFF;
dd->eep_hrs = new_hrs;
if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
ifp->if_powerhour[0] = new_hrs & 0xFF;
hi_water = offsetof(struct qib_flash, if_powerhour);
}
if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
ifp->if_powerhour[1] = new_hrs >> 8;
hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
}
}
/*
* There is a tiny possibility that we could somehow fail to write
* the EEPROM after updating our shadows, but problems from holding
* the spinlock too long are a much bigger issue.
*/
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
if (hi_water) {
/* we made some change to the data, uopdate cksum and write */
csum = flash_csum(ifp, 1);
ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
}
mutex_unlock(&dd->eep_lock);
if (ret)
qib_dev_err(dd, "Failed updating EEPROM\n");
free_bail:
vfree(buf);
bail:
return ret;
}
/**
* qib_inc_eeprom_err - increment one of the four error counters
* that are logged to EEPROM.
* @dd: the qlogic_ib device
* @eidx: 0..3, the counter to increment
* @incr: how much to add
*
* Each counter is 8-bits, and saturates at 255 (0xFF). They
* are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
* is called, but it can only be called in a context that allows sleep.
* This function can be called even at interrupt level.
*/
void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
{
uint new_val;
unsigned long flags;
spin_lock_irqsave(&dd->eep_st_lock, flags);
new_val = dd->eep_st_new_errs[eidx] + incr;
if (new_val > 255)
new_val = 255;
dd->eep_st_new_errs[eidx] = new_val;
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,613 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include "qib.h"
#define QIBFS_MAGIC 0x726a77
static struct super_block *qib_super;
#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
int mode, const struct file_operations *fops,
void *data)
{
int error;
struct inode *inode = new_inode(dir->i_sb);
if (!inode) {
error = -EPERM;
goto bail;
}
inode->i_mode = mode;
inode->i_uid = 0;
inode->i_gid = 0;
inode->i_blocks = 0;
inode->i_atime = CURRENT_TIME;
inode->i_mtime = inode->i_atime;
inode->i_ctime = inode->i_atime;
inode->i_private = data;
if ((mode & S_IFMT) == S_IFDIR) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
}
inode->i_fop = fops;
d_instantiate(dentry, inode);
error = 0;
bail:
return error;
}
static int create_file(const char *name, mode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
int error;
*dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
error = qibfs_mknod(parent->d_inode, *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
static ssize_t driver_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, &qib_stats,
sizeof qib_stats);
}
/*
* driver stats field names, one line per stat, single string. Used by
* programs like ipathstats to print the stats in a way which works for
* different versions of drivers, without changing program source.
* if qlogic_ib_stats changes, this needs to change. Names need to be
* 12 chars or less (w/o newline), for proper display by ipathstats utility.
*/
static const char qib_statnames[] =
"KernIntr\n"
"ErrorIntr\n"
"Tx_Errs\n"
"Rcv_Errs\n"
"H/W_Errs\n"
"NoPIOBufs\n"
"CtxtsOpen\n"
"RcvLen_Errs\n"
"EgrBufFull\n"
"EgrHdrFull\n"
;
static ssize_t driver_names_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, qib_statnames,
sizeof qib_statnames - 1); /* no null */
}
static const struct file_operations driver_ops[] = {
{ .read = driver_stats_read, },
{ .read = driver_names_read, },
};
/* read the per-device counters */
static ssize_t dev_counters_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
u64 *counters;
struct qib_devdata *dd = private2dd(file);
return simple_read_from_buffer(buf, count, ppos, counters,
dd->f_read_cntrs(dd, *ppos, NULL, &counters));
}
/* read the per-device counters */
static ssize_t dev_names_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
char *names;
struct qib_devdata *dd = private2dd(file);
return simple_read_from_buffer(buf, count, ppos, names,
dd->f_read_cntrs(dd, *ppos, &names, NULL));
}
static const struct file_operations cntr_ops[] = {
{ .read = dev_counters_read, },
{ .read = dev_names_read, },
};
/*
* Could use file->f_dentry->d_inode->i_ino to figure out which file,
* instead of separate routine for each, but for now, this works...
*/
/* read the per-port names (same for each port) */
static ssize_t portnames_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
char *names;
struct qib_devdata *dd = private2dd(file);
return simple_read_from_buffer(buf, count, ppos, names,
dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL));
}
/* read the per-port counters for port 1 (pidx 0) */
static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
u64 *counters;
struct qib_devdata *dd = private2dd(file);
return simple_read_from_buffer(buf, count, ppos, counters,
dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters));
}
/* read the per-port counters for port 2 (pidx 1) */
static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
u64 *counters;
struct qib_devdata *dd = private2dd(file);
return simple_read_from_buffer(buf, count, ppos, counters,
dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters));
}
static const struct file_operations portcntr_ops[] = {
{ .read = portnames_read, },
{ .read = portcntrs_1_read, },
{ .read = portcntrs_2_read, },
};
/*
* read the per-port QSFP data for port 1 (pidx 0)
*/
static ssize_t qsfp_1_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct qib_devdata *dd = private2dd(file);
char *tmp;
int ret;
tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
if (ret > 0)
ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
kfree(tmp);
return ret;
}
/*
* read the per-port QSFP data for port 2 (pidx 1)
*/
static ssize_t qsfp_2_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct qib_devdata *dd = private2dd(file);
char *tmp;
int ret;
if (dd->num_pports < 2)
return -ENODEV;
tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
if (ret > 0)
ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
kfree(tmp);
return ret;
}
static const struct file_operations qsfp_ops[] = {
{ .read = qsfp_1_read, },
{ .read = qsfp_2_read, },
};
static ssize_t flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct qib_devdata *dd;
ssize_t ret;
loff_t pos;
char *tmp;
pos = *ppos;
if (pos < 0) {
ret = -EINVAL;
goto bail;
}
if (pos >= sizeof(struct qib_flash)) {
ret = 0;
goto bail;
}
if (count > sizeof(struct qib_flash) - pos)
count = sizeof(struct qib_flash) - pos;
tmp = kmalloc(count, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto bail;
}
dd = private2dd(file);
if (qib_eeprom_read(dd, pos, tmp, count)) {
qib_dev_err(dd, "failed to read from flash\n");
ret = -ENXIO;
goto bail_tmp;
}
if (copy_to_user(buf, tmp, count)) {
ret = -EFAULT;
goto bail_tmp;
}
*ppos = pos + count;
ret = count;
bail_tmp:
kfree(tmp);
bail:
return ret;
}
static ssize_t flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct qib_devdata *dd;
ssize_t ret;
loff_t pos;
char *tmp;
pos = *ppos;
if (pos != 0) {
ret = -EINVAL;
goto bail;
}
if (count != sizeof(struct qib_flash)) {
ret = -EINVAL;
goto bail;
}
tmp = kmalloc(count, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto bail;
}
if (copy_from_user(tmp, buf, count)) {
ret = -EFAULT;
goto bail_tmp;
}
dd = private2dd(file);
if (qib_eeprom_write(dd, pos, tmp, count)) {
ret = -ENXIO;
qib_dev_err(dd, "failed to write to flash\n");
goto bail_tmp;
}
*ppos = pos + count;
ret = count;
bail_tmp:
kfree(tmp);
bail:
return ret;
}
static const struct file_operations flash_ops = {
.read = flash_read,
.write = flash_write,
};
static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
{
struct dentry *dir, *tmp;
char unit[10];
int ret, i;
/* create the per-unit directory */
snprintf(unit, sizeof unit, "%u", dd->unit);
ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
&simple_dir_operations, dd);
if (ret) {
printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
goto bail;
}
/* create the files in the new directory */
ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
&cntr_ops[0], dd);
if (ret) {
printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
unit, ret);
goto bail;
}
ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
&cntr_ops[1], dd);
if (ret) {
printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
unit, ret);
goto bail;
}
ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
&portcntr_ops[0], dd);
if (ret) {
printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
unit, "portcounter_names", ret);
goto bail;
}
for (i = 1; i <= dd->num_pports; i++) {
char fname[24];
sprintf(fname, "port%dcounters", i);
/* create the files in the new directory */
ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
&portcntr_ops[i], dd);
if (ret) {
printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
unit, fname, ret);
goto bail;
}
if (!(dd->flags & QIB_HAS_QSFP))
continue;
sprintf(fname, "qsfp%d", i);
ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
&qsfp_ops[i - 1], dd);
if (ret) {
printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
unit, fname, ret);
goto bail;
}
}
ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
&flash_ops, dd);
if (ret)
printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
unit, ret);
bail:
return ret;
}
static int remove_file(struct dentry *parent, char *name)
{
struct dentry *tmp;
int ret;
tmp = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
goto bail;
}
spin_lock(&dcache_lock);
spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
dget_locked(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
spin_unlock(&dcache_lock);
simple_unlink(parent->d_inode, tmp);
} else {
spin_unlock(&tmp->d_lock);
spin_unlock(&dcache_lock);
}
ret = 0;
bail:
/*
* We don't expect clients to care about the return value, but
* it's there if they need it.
*/
return ret;
}
static int remove_device_files(struct super_block *sb,
struct qib_devdata *dd)
{
struct dentry *dir, *root;
char unit[10];
int ret, i;
root = dget(sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
snprintf(unit, sizeof unit, "%u", dd->unit);
dir = lookup_one_len(unit, root, strlen(unit));
if (IS_ERR(dir)) {
ret = PTR_ERR(dir);
printk(KERN_ERR "Lookup of %s failed\n", unit);
goto bail;
}
remove_file(dir, "counters");
remove_file(dir, "counter_names");
remove_file(dir, "portcounter_names");
for (i = 0; i < dd->num_pports; i++) {
char fname[24];
sprintf(fname, "port%dcounters", i + 1);
remove_file(dir, fname);
if (dd->flags & QIB_HAS_QSFP) {
sprintf(fname, "qsfp%d", i + 1);
remove_file(dir, fname);
}
}
remove_file(dir, "flash");
d_delete(dir);
ret = simple_rmdir(root->d_inode, dir);
bail:
mutex_unlock(&root->d_inode->i_mutex);
dput(root);
return ret;
}
/*
* This fills everything in when the fs is mounted, to handle umount/mount
* after device init. The direct add_cntr_files() call handles adding
* them from the init code, when the fs is already mounted.
*/
static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct qib_devdata *dd, *tmp;
unsigned long flags;
int ret;
static struct tree_descr files[] = {
[2] = {"driver_stats", &driver_ops[0], S_IRUGO},
[3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
{""},
};
ret = simple_fill_super(sb, QIBFS_MAGIC, files);
if (ret) {
printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
goto bail;
}
spin_lock_irqsave(&qib_devs_lock, flags);
list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
spin_unlock_irqrestore(&qib_devs_lock, flags);
ret = add_cntr_files(sb, dd);
if (ret) {
deactivate_super(sb);
goto bail;
}
spin_lock_irqsave(&qib_devs_lock, flags);
}
spin_unlock_irqrestore(&qib_devs_lock, flags);
bail:
return ret;
}
static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
int ret = get_sb_single(fs_type, flags, data,
qibfs_fill_super, mnt);
if (ret >= 0)
qib_super = mnt->mnt_sb;
return ret;
}
static void qibfs_kill_super(struct super_block *s)
{
kill_litter_super(s);
qib_super = NULL;
}
int qibfs_add(struct qib_devdata *dd)
{
int ret;
/*
* On first unit initialized, qib_super will not yet exist
* because nobody has yet tried to mount the filesystem, so
* we can't consider that to be an error; if an error occurs
* during the mount, that will get a complaint, so this is OK.
* add_cntr_files() for all units is done at mount from
* qibfs_fill_super(), so one way or another, everything works.
*/
if (qib_super == NULL)
ret = 0;
else
ret = add_cntr_files(qib_super, dd);
return ret;
}
int qibfs_remove(struct qib_devdata *dd)
{
int ret = 0;
if (qib_super)
ret = remove_device_files(qib_super, dd);
return ret;
}
static struct file_system_type qibfs_fs_type = {
.owner = THIS_MODULE,
.name = "ipathfs",
.get_sb = qibfs_get_sb,
.kill_sb = qibfs_kill_super,
};
int __init qib_init_qibfs(void)
{
return register_filesystem(&qibfs_fs_type);
}
int __exit qib_exit_qibfs(void)
{
return unregister_filesystem(&qibfs_fs_type);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,236 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include "qib.h"
#include "qib_common.h"
/**
* qib_format_hwmsg - format a single hwerror message
* @msg message buffer
* @msgl length of message buffer
* @hwmsg message to add to message buffer
*/
static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
{
strlcat(msg, "[", msgl);
strlcat(msg, hwmsg, msgl);
strlcat(msg, "]", msgl);
}
/**
* qib_format_hwerrors - format hardware error messages for display
* @hwerrs hardware errors bit vector
* @hwerrmsgs hardware error descriptions
* @nhwerrmsgs number of hwerrmsgs
* @msg message buffer
* @msgl message buffer length
*/
void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t msgl)
{
int i;
for (i = 0; i < nhwerrmsgs; i++)
if (hwerrs & hwerrmsgs[i].mask)
qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
}
static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
{
struct ib_event event;
struct qib_devdata *dd = ppd->dd;
event.device = &dd->verbs_dev.ibdev;
event.element.port_num = ppd->port;
event.event = ev;
ib_dispatch_event(&event);
}
void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
{
struct qib_devdata *dd = ppd->dd;
unsigned long flags;
u32 lstate;
u8 ltstate;
enum ib_event_type ev = 0;
lstate = dd->f_iblink_state(ibcs); /* linkstate */
ltstate = dd->f_ibphys_portstate(ibcs);
/*
* If linkstate transitions into INIT from any of the various down
* states, or if it transitions from any of the up (INIT or better)
* states into any of the down states (except link recovery), then
* call the chip-specific code to take appropriate actions.
*/
if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
ltstate == IB_PHYSPORTSTATE_LINKUP) {
/* transitioned to UP */
if (dd->f_ib_updown(ppd, 1, ibcs))
goto skip_ibchange; /* chip-code handled */
} else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
dd->f_ib_updown(ppd, 0, ibcs))
goto skip_ibchange; /* chip-code handled */
qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
}
if (lstate != IB_PORT_DOWN) {
/* lstate is INIT, ARMED, or ACTIVE */
if (lstate != IB_PORT_ACTIVE) {
*ppd->statusp &= ~QIB_STATUS_IB_READY;
if (ppd->lflags & QIBL_LINKACTIVE)
ev = IB_EVENT_PORT_ERR;
spin_lock_irqsave(&ppd->lflags_lock, flags);
if (lstate == IB_PORT_ARMED) {
ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
QIBL_LINKDOWN | QIBL_LINKACTIVE);
} else {
ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKARMED |
QIBL_LINKDOWN | QIBL_LINKACTIVE);
}
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
/* start a 75msec timer to clear symbol errors */
mod_timer(&ppd->symerr_clear_timer,
msecs_to_jiffies(75));
} else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
/* active, but not active defered */
qib_hol_up(ppd); /* useful only for 6120 now */
*ppd->statusp |=
QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
qib_clear_symerror_on_linkup((unsigned long)ppd);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
QIBL_LINKDOWN | QIBL_LINKARMED);
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
if (dd->flags & QIB_HAS_SEND_DMA)
qib_sdma_process_event(ppd,
qib_sdma_event_e30_go_running);
ev = IB_EVENT_PORT_ACTIVE;
dd->f_setextled(ppd, 1);
}
} else { /* down */
if (ppd->lflags & QIBL_LINKACTIVE)
ev = IB_EVENT_PORT_ERR;
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
QIBL_LINKACTIVE | QIBL_LINKARMED);
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
*ppd->statusp &= ~QIB_STATUS_IB_READY;
}
skip_ibchange:
ppd->lastibcstat = ibcs;
if (ev)
signal_ib_event(ppd, ev);
return;
}
void qib_clear_symerror_on_linkup(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
if (ppd->lflags & QIBL_LINKACTIVE)
return;
ppd->ibport_data.z_symbol_error_counter =
ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
}
/*
* Handle receive interrupts for user ctxts; this means a user
* process was waiting for a packet to arrive, and didn't want
* to poll.
*/
void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
{
struct qib_ctxtdata *rcd;
unsigned long flags;
int i;
spin_lock_irqsave(&dd->uctxt_lock, flags);
for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
if (!(ctxtr & (1ULL << i)))
continue;
rcd = dd->rcd[i];
if (!rcd || !rcd->cnt)
continue;
if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
wake_up_interruptible(&rcd->wait);
dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
rcd->ctxt);
} else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
&rcd->flag)) {
rcd->urgent++;
wake_up_interruptible(&rcd->wait);
}
}
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
}
void qib_bad_intrstatus(struct qib_devdata *dd)
{
static int allbits;
/* separate routine, for better optimization of qib_intr() */
/*
* We print the message and disable interrupts, in hope of
* having a better chance of debugging the problem.
*/
qib_dev_err(dd, "Read of chip interrupt status failed"
" disabling interrupts\n");
if (allbits++) {
/* disable interrupt delivery, something is very wrong */
if (allbits == 2)
dd->f_set_intr_state(dd, 0);
if (allbits == 3) {
qib_dev_err(dd, "2nd bad interrupt status, "
"unregistering interrupts\n");
dd->flags |= QIB_BADINTR;
dd->flags &= ~QIB_INITTED;
dd->f_free_irq(dd);
}
}
}

View File

@ -0,0 +1,328 @@
/*
* Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "qib.h"
/**
* qib_alloc_lkey - allocate an lkey
* @rkt: lkey table in which to allocate the lkey
* @mr: memory region that this lkey protects
*
* Returns 1 if successful, otherwise returns 0.
*/
int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
{
unsigned long flags;
u32 r;
u32 n;
int ret;
spin_lock_irqsave(&rkt->lock, flags);
/* Find the next available LKEY */
r = rkt->next;
n = r;
for (;;) {
if (rkt->table[r] == NULL)
break;
r = (r + 1) & (rkt->max - 1);
if (r == n) {
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 0;
goto bail;
}
}
rkt->next = (r + 1) & (rkt->max - 1);
/*
* Make sure lkey is never zero which is reserved to indicate an
* unrestricted LKEY.
*/
rkt->gen++;
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
<< 8);
if (mr->lkey == 0) {
mr->lkey |= 1 << 8;
rkt->gen++;
}
rkt->table[r] = mr;
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 1;
bail:
return ret;
}
/**
* qib_free_lkey - free an lkey
* @rkt: table from which to free the lkey
* @lkey: lkey id to free
*/
int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
{
unsigned long flags;
u32 lkey = mr->lkey;
u32 r;
int ret;
spin_lock_irqsave(&dev->lk_table.lock, flags);
if (lkey == 0) {
if (dev->dma_mr && dev->dma_mr == mr) {
ret = atomic_read(&dev->dma_mr->refcount);
if (!ret)
dev->dma_mr = NULL;
} else
ret = 0;
} else {
r = lkey >> (32 - ib_qib_lkey_table_size);
ret = atomic_read(&dev->lk_table.table[r]->refcount);
if (!ret)
dev->lk_table.table[r] = NULL;
}
spin_unlock_irqrestore(&dev->lk_table.lock, flags);
if (ret)
ret = -EBUSY;
return ret;
}
/**
* qib_lkey_ok - check IB SGE for validity and initialize
* @rkt: table containing lkey to check SGE against
* @isge: outgoing internal SGE
* @sge: SGE to check
* @acc: access flags
*
* Return 1 if valid and successful, otherwise returns 0.
*
* Check the IB SGE for validity and initialize our internal version
* of it.
*/
int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_sge *isge, struct ib_sge *sge, int acc)
{
struct qib_mregion *mr;
unsigned n, m;
size_t off;
int ret = 0;
unsigned long flags;
/*
* We use LKEY == zero for kernel virtual addresses
* (see qib_get_dma_mr and qib_dma.c).
*/
spin_lock_irqsave(&rkt->lock, flags);
if (sge->lkey == 0) {
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
goto bail;
if (!dev->dma_mr)
goto bail;
atomic_inc(&dev->dma_mr->refcount);
isge->mr = dev->dma_mr;
isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
isge->sge_length = sge->length;
isge->m = 0;
isge->n = 0;
goto ok;
}
mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
mr->pd != &pd->ibpd))
goto bail;
off = sge->addr - mr->user_base;
if (unlikely(sge->addr < mr->user_base ||
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc))
goto bail;
off += mr->offset;
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
if (n >= QIB_SEGSZ) {
m++;
n = 0;
}
}
atomic_inc(&mr->refcount);
isge->mr = mr;
isge->vaddr = mr->map[m]->segs[n].vaddr + off;
isge->length = mr->map[m]->segs[n].length - off;
isge->sge_length = sge->length;
isge->m = m;
isge->n = n;
ok:
ret = 1;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
return ret;
}
/**
* qib_rkey_ok - check the IB virtual address, length, and RKEY
* @dev: infiniband device
* @ss: SGE state
* @len: length of data
* @vaddr: virtual address to place data
* @rkey: rkey to check
* @acc: access flags
*
* Return 1 if successful, otherwise 0.
*/
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc)
{
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
struct qib_mregion *mr;
unsigned n, m;
size_t off;
int ret = 0;
unsigned long flags;
/*
* We use RKEY == zero for kernel virtual addresses
* (see qib_get_dma_mr and qib_dma.c).
*/
spin_lock_irqsave(&rkt->lock, flags);
if (rkey == 0) {
struct qib_pd *pd = to_ipd(qp->ibqp.pd);
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
goto bail;
if (!dev->dma_mr)
goto bail;
atomic_inc(&dev->dma_mr->refcount);
sge->mr = dev->dma_mr;
sge->vaddr = (void *) vaddr;
sge->length = len;
sge->sge_length = len;
sge->m = 0;
sge->n = 0;
goto ok;
}
mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
off = vaddr - mr->iova;
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0))
goto bail;
off += mr->offset;
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
if (n >= QIB_SEGSZ) {
m++;
n = 0;
}
}
atomic_inc(&mr->refcount);
sge->mr = mr;
sge->vaddr = mr->map[m]->segs[n].vaddr + off;
sge->length = mr->map[m]->segs[n].length - off;
sge->sge_length = len;
sge->m = m;
sge->n = n;
ok:
ret = 1;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
return ret;
}
/*
* Initialize the memory region specified by the work reqeust.
*/
int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
{
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
struct qib_pd *pd = to_ipd(qp->ibqp.pd);
struct qib_mregion *mr;
u32 rkey = wr->wr.fast_reg.rkey;
unsigned i, n, m;
int ret = -EINVAL;
unsigned long flags;
u64 *page_list;
size_t ps;
spin_lock_irqsave(&rkt->lock, flags);
if (pd->user || rkey == 0)
goto bail;
mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
goto bail;
if (wr->wr.fast_reg.page_list_len > mr->max_segs)
goto bail;
ps = 1UL << wr->wr.fast_reg.page_shift;
if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
goto bail;
mr->user_base = wr->wr.fast_reg.iova_start;
mr->iova = wr->wr.fast_reg.iova_start;
mr->lkey = rkey;
mr->length = wr->wr.fast_reg.length;
mr->access_flags = wr->wr.fast_reg.access_flags;
page_list = wr->wr.fast_reg.page_list->page_list;
m = 0;
n = 0;
for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
mr->map[m]->segs[n].vaddr = (void *) page_list[i];
mr->map[m]->segs[n].length = ps;
if (++n == QIB_SEGSZ) {
m++;
n = 0;
}
}
ret = 0;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,373 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
struct ib_node_info {
u8 base_version;
u8 class_version;
u8 node_type;
u8 num_ports;
__be64 sys_guid;
__be64 node_guid;
__be64 port_guid;
__be16 partition_cap;
__be16 device_id;
__be32 revision;
u8 local_port_num;
u8 vendor_id[3];
} __attribute__ ((packed));
struct ib_mad_notice_attr {
u8 generic_type;
u8 prod_type_msb;
__be16 prod_type_lsb;
__be16 trap_num;
__be16 issuer_lid;
__be16 toggle_count;
union {
struct {
u8 details[54];
} raw_data;
struct {
__be16 reserved;
__be16 lid; /* where violation happened */
u8 port_num; /* where violation happened */
} __attribute__ ((packed)) ntc_129_131;
struct {
__be16 reserved;
__be16 lid; /* LID where change occured */
u8 reserved2;
u8 local_changes; /* low bit - local changes */
__be32 new_cap_mask; /* new capability mask */
u8 reserved3;
u8 change_flags; /* low 3 bits only */
} __attribute__ ((packed)) ntc_144;
struct {
__be16 reserved;
__be16 lid; /* lid where sys guid changed */
__be16 reserved2;
__be64 new_sys_guid;
} __attribute__ ((packed)) ntc_145;
struct {
__be16 reserved;
__be16 lid;
__be16 dr_slid;
u8 method;
u8 reserved2;
__be16 attr_id;
__be32 attr_mod;
__be64 mkey;
u8 reserved3;
u8 dr_trunc_hop;
u8 dr_rtn_path[30];
} __attribute__ ((packed)) ntc_256;
struct {
__be16 reserved;
__be16 lid1;
__be16 lid2;
__be32 key;
__be32 sl_qp1; /* SL: high 4 bits */
__be32 qp2; /* high 8 bits reserved */
union ib_gid gid1;
union ib_gid gid2;
} __attribute__ ((packed)) ntc_257_258;
} details;
};
/*
* Generic trap/notice types
*/
#define IB_NOTICE_TYPE_FATAL 0x80
#define IB_NOTICE_TYPE_URGENT 0x81
#define IB_NOTICE_TYPE_SECURITY 0x82
#define IB_NOTICE_TYPE_SM 0x83
#define IB_NOTICE_TYPE_INFO 0x84
/*
* Generic trap/notice producers
*/
#define IB_NOTICE_PROD_CA cpu_to_be16(1)
#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
/*
* Generic trap/notice numbers
*/
#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
/*
* Repress trap/notice flags
*/
#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0)
#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1)
#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2)
#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3)
#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4)
#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5)
#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6)
#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7)
/*
* Generic trap/notice other local changes flags (trap 144).
*/
#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
/*
* Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
*/
#define IB_NOTICE_TRAP_DR_NOTICE 0x80
#define IB_NOTICE_TRAP_DR_TRUNC 0x40
struct ib_vl_weight_elem {
u8 vl; /* Only low 4 bits, upper 4 bits reserved */
u8 weight;
};
#define IB_VLARB_LOWPRI_0_31 1
#define IB_VLARB_LOWPRI_32_63 2
#define IB_VLARB_HIGHPRI_0_31 3
#define IB_VLARB_HIGHPRI_32_63 4
/*
* PMA class portinfo capability mask bits
*/
#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
struct ib_perf {
u8 base_version;
u8 mgmt_class;
u8 class_version;
u8 method;
__be16 status;
__be16 unused;
__be64 tid;
__be16 attr_id;
__be16 resv;
__be32 attr_mod;
u8 reserved[40];
u8 data[192];
} __attribute__ ((packed));
struct ib_pma_classportinfo {
u8 base_version;
u8 class_version;
__be16 cap_mask;
u8 reserved[3];
u8 resp_time_value; /* only lower 5 bits */
union ib_gid redirect_gid;
__be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
__be16 redirect_lid;
__be16 redirect_pkey;
__be32 redirect_qp; /* only lower 24 bits */
__be32 redirect_qkey;
union ib_gid trap_gid;
__be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
__be16 trap_lid;
__be16 trap_pkey;
__be32 trap_hl_qp; /* 8, 24 bits respectively */
__be32 trap_qkey;
} __attribute__ ((packed));
struct ib_pma_portsamplescontrol {
u8 opcode;
u8 port_select;
u8 tick;
u8 counter_width; /* only lower 3 bits */
__be32 counter_mask0_9; /* 2, 10 * 3, bits */
__be16 counter_mask10_14; /* 1, 5 * 3, bits */
u8 sample_mechanisms;
u8 sample_status; /* only lower 2 bits */
__be64 option_mask;
__be64 vendor_mask;
__be32 sample_start;
__be32 sample_interval;
__be16 tag;
__be16 counter_select[15];
} __attribute__ ((packed));
struct ib_pma_portsamplesresult {
__be16 tag;
__be16 sample_status; /* only lower 2 bits */
__be32 counter[15];
} __attribute__ ((packed));
struct ib_pma_portsamplesresult_ext {
__be16 tag;
__be16 sample_status; /* only lower 2 bits */
__be32 extended_width; /* only upper 2 bits */
__be64 counter[15];
} __attribute__ ((packed));
struct ib_pma_portcounters {
u8 reserved;
u8 port_select;
__be16 counter_select;
__be16 symbol_error_counter;
u8 link_error_recovery_counter;
u8 link_downed_counter;
__be16 port_rcv_errors;
__be16 port_rcv_remphys_errors;
__be16 port_rcv_switch_relay_errors;
__be16 port_xmit_discards;
u8 port_xmit_constraint_errors;
u8 port_rcv_constraint_errors;
u8 reserved1;
u8 lli_ebor_errors; /* 4, 4, bits */
__be16 reserved2;
__be16 vl15_dropped;
__be32 port_xmit_data;
__be32 port_rcv_data;
__be32 port_xmit_packets;
__be32 port_rcv_packets;
} __attribute__ ((packed));
struct ib_pma_portcounters_cong {
u8 reserved;
u8 reserved1;
__be16 port_check_rate;
__be16 symbol_error_counter;
u8 link_error_recovery_counter;
u8 link_downed_counter;
__be16 port_rcv_errors;
__be16 port_rcv_remphys_errors;
__be16 port_rcv_switch_relay_errors;
__be16 port_xmit_discards;
u8 port_xmit_constraint_errors;
u8 port_rcv_constraint_errors;
u8 reserved2;
u8 lli_ebor_errors; /* 4, 4, bits */
__be16 reserved3;
__be16 vl15_dropped;
__be64 port_xmit_data;
__be64 port_rcv_data;
__be64 port_xmit_packets;
__be64 port_rcv_packets;
__be64 port_xmit_wait;
__be64 port_adr_events;
} __attribute__ ((packed));
#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
#define QIB_XMIT_RATE_UNSUPPORTED 0x0
#define QIB_XMIT_RATE_PICO 0x7
/* number of 4nsec cycles equaling 2secs */
#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
#define IB_PMA_SEL_CONG_ALL 0x01
#define IB_PMA_SEL_CONG_PORT_DATA 0x02
#define IB_PMA_SEL_CONG_XMIT 0x04
#define IB_PMA_SEL_CONG_ROUTING 0x08
struct ib_pma_portcounters_ext {
u8 reserved;
u8 port_select;
__be16 counter_select;
__be32 reserved1;
__be64 port_xmit_data;
__be64 port_rcv_data;
__be64 port_xmit_packets;
__be64 port_rcv_packets;
__be64 port_unicast_xmit_packets;
__be64 port_unicast_rcv_packets;
__be64 port_multicast_xmit_packets;
__be64 port_multicast_rcv_packets;
} __attribute__ ((packed));
#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
* We support 5 counters which only count the mandatory quantities.
*/
#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
#define COUNTER_MASK0_9 \
cpu_to_be32(COUNTER_MASK(1, 0) | \
COUNTER_MASK(1, 1) | \
COUNTER_MASK(1, 2) | \
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))

View File

@ -0,0 +1,174 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <asm/pgtable.h>
#include "qib_verbs.h"
/**
* qib_release_mmap_info - free mmap info structure
* @ref: a pointer to the kref within struct qib_mmap_info
*/
void qib_release_mmap_info(struct kref *ref)
{
struct qib_mmap_info *ip =
container_of(ref, struct qib_mmap_info, ref);
struct qib_ibdev *dev = to_idev(ip->context->device);
spin_lock_irq(&dev->pending_lock);
list_del(&ip->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
vfree(ip->obj);
kfree(ip);
}
/*
* open and close keep track of how many times the CQ is mapped,
* to avoid releasing it.
*/
static void qib_vma_open(struct vm_area_struct *vma)
{
struct qib_mmap_info *ip = vma->vm_private_data;
kref_get(&ip->ref);
}
static void qib_vma_close(struct vm_area_struct *vma)
{
struct qib_mmap_info *ip = vma->vm_private_data;
kref_put(&ip->ref, qib_release_mmap_info);
}
static struct vm_operations_struct qib_vm_ops = {
.open = qib_vma_open,
.close = qib_vma_close,
};
/**
* qib_mmap - create a new mmap region
* @context: the IB user context of the process making the mmap() call
* @vma: the VMA to be initialized
* Return zero if the mmap is OK. Otherwise, return an errno.
*/
int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct qib_ibdev *dev = to_idev(context->device);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
struct qib_mmap_info *ip, *pp;
int ret = -EINVAL;
/*
* Search the device's list of objects waiting for a mmap call.
* Normally, this list is very short since a call to create a
* CQ, QP, or SRQ is soon followed by a call to mmap().
*/
spin_lock_irq(&dev->pending_lock);
list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
pending_mmaps) {
/* Only the creator is allowed to mmap the object */
if (context != ip->context || (__u64) offset != ip->offset)
continue;
/* Don't allow a mmap larger than the object. */
if (size > ip->size)
break;
list_del_init(&ip->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret)
goto done;
vma->vm_ops = &qib_vm_ops;
vma->vm_private_data = ip;
qib_vma_open(vma);
goto done;
}
spin_unlock_irq(&dev->pending_lock);
done:
return ret;
}
/*
* Allocate information for qib_mmap
*/
struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
u32 size,
struct ib_ucontext *context,
void *obj) {
struct qib_mmap_info *ip;
ip = kmalloc(sizeof *ip, GFP_KERNEL);
if (!ip)
goto bail;
size = PAGE_ALIGN(size);
spin_lock_irq(&dev->mmap_offset_lock);
if (dev->mmap_offset == 0)
dev->mmap_offset = PAGE_SIZE;
ip->offset = dev->mmap_offset;
dev->mmap_offset += size;
spin_unlock_irq(&dev->mmap_offset_lock);
INIT_LIST_HEAD(&ip->pending_mmaps);
ip->size = size;
ip->context = context;
ip->obj = obj;
kref_init(&ip->ref);
bail:
return ip;
}
void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
u32 size, void *obj)
{
size = PAGE_ALIGN(size);
spin_lock_irq(&dev->mmap_offset_lock);
if (dev->mmap_offset == 0)
dev->mmap_offset = PAGE_SIZE;
ip->offset = dev->mmap_offset;
dev->mmap_offset += size;
spin_unlock_irq(&dev->mmap_offset_lock);
ip->size = size;
ip->obj = obj;
}

View File

@ -0,0 +1,503 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_umem.h>
#include <rdma/ib_smi.h>
#include "qib.h"
/* Fast memory region */
struct qib_fmr {
struct ib_fmr ibfmr;
u8 page_shift;
struct qib_mregion mr; /* must be last */
};
static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct qib_fmr, ibfmr);
}
/**
* qib_get_dma_mr - get a DMA memory region
* @pd: protection domain for this memory region
* @acc: access flags
*
* Returns the memory region on success, otherwise returns an errno.
* Note that all DMA addresses should be created via the
* struct ib_dma_mapping_ops functions (see qib_dma.c).
*/
struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct qib_ibdev *dev = to_idev(pd->device);
struct qib_mr *mr;
struct ib_mr *ret;
unsigned long flags;
if (to_ipd(pd)->user) {
ret = ERR_PTR(-EPERM);
goto bail;
}
mr = kzalloc(sizeof *mr, GFP_KERNEL);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.access_flags = acc;
atomic_set(&mr->mr.refcount, 0);
spin_lock_irqsave(&dev->lk_table.lock, flags);
if (!dev->dma_mr)
dev->dma_mr = &mr->mr;
spin_unlock_irqrestore(&dev->lk_table.lock, flags);
ret = &mr->ibmr;
bail:
return ret;
}
static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
{
struct qib_mr *mr;
int m, i = 0;
/* Allocate struct plus pointers to first level page tables. */
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
if (!mr)
goto done;
/* Allocate first level page tables. */
for (; i < m; i++) {
mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
if (!mr->mr.map[i])
goto bail;
}
mr->mr.mapsz = m;
mr->mr.max_segs = count;
/*
* ib_reg_phys_mr() will initialize mr->ibmr except for
* lkey and rkey.
*/
if (!qib_alloc_lkey(lk_table, &mr->mr))
goto bail;
mr->ibmr.lkey = mr->mr.lkey;
mr->ibmr.rkey = mr->mr.lkey;
atomic_set(&mr->mr.refcount, 0);
goto done;
bail:
while (i)
kfree(mr->mr.map[--i]);
kfree(mr);
mr = NULL;
done:
return mr;
}
/**
* qib_reg_phys_mr - register a physical memory region
* @pd: protection domain for this memory region
* @buffer_list: pointer to the list of physical buffers to register
* @num_phys_buf: the number of physical buffers to register
* @iova_start: the starting address passed over IB which maps to this MR
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start)
{
struct qib_mr *mr;
int n, m, i;
struct ib_mr *ret;
mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
if (mr == NULL) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = *iova_start;
mr->mr.iova = *iova_start;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = acc;
mr->umem = NULL;
m = 0;
n = 0;
for (i = 0; i < num_phys_buf; i++) {
mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
mr->mr.map[m]->segs[n].length = buffer_list[i].size;
mr->mr.length += buffer_list[i].size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_reg_user_mr - register a userspace memory region
* @pd: protection domain for this memory region
* @start: starting userspace address
* @length: length of region to register
* @virt_addr: virtual address to use (from HCA's point of view)
* @mr_access_flags: access flags for this memory region
* @udata: unused by the QLogic_IB driver
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata)
{
struct qib_mr *mr;
struct ib_umem *umem;
struct ib_umem_chunk *chunk;
int n, m, i;
struct ib_mr *ret;
if (length == 0) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem))
return (void *) umem;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
ib_umem_release(umem);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
mr->mr.offset = umem->offset;
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
m = 0;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) {
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_dereg_mr - unregister and free a memory region
* @ibmr: the memory region to free
*
* Returns 0 on success.
*
* Note that this is called to free MRs created by qib_get_dma_mr()
* or qib_reg_user_mr().
*/
int qib_dereg_mr(struct ib_mr *ibmr)
{
struct qib_mr *mr = to_imr(ibmr);
struct qib_ibdev *dev = to_idev(ibmr->device);
int ret;
int i;
ret = qib_free_lkey(dev, &mr->mr);
if (ret)
return ret;
i = mr->mr.mapsz;
while (i)
kfree(mr->mr.map[--i]);
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
return 0;
}
/*
* Allocate a memory region usable with the
* IB_WR_FAST_REG_MR send work request.
*
* Return the memory region on success, otherwise return an errno.
*/
struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
struct qib_mr *mr;
mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->mr.pd = pd;
mr->mr.user_base = 0;
mr->mr.iova = 0;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = 0;
mr->umem = NULL;
return &mr->ibmr;
}
struct ib_fast_reg_page_list *
qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
{
unsigned size = page_list_len * sizeof(u64);
struct ib_fast_reg_page_list *pl;
if (size > PAGE_SIZE)
return ERR_PTR(-EINVAL);
pl = kmalloc(sizeof *pl, GFP_KERNEL);
if (!pl)
return ERR_PTR(-ENOMEM);
pl->page_list = kmalloc(size, GFP_KERNEL);
if (!pl->page_list)
goto err_free;
return pl;
err_free:
kfree(pl);
return ERR_PTR(-ENOMEM);
}
void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
{
kfree(pl->page_list);
kfree(pl);
}
/**
* qib_alloc_fmr - allocate a fast memory region
* @pd: the protection domain for this memory region
* @mr_access_flags: access flags for this memory region
* @fmr_attr: fast memory region attributes
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct qib_fmr *fmr;
int m, i = 0;
struct ib_fmr *ret;
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
if (!fmr)
goto bail;
/* Allocate first level page tables. */
for (; i < m; i++) {
fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
GFP_KERNEL);
if (!fmr->mr.map[i])
goto bail;
}
fmr->mr.mapsz = m;
/*
* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
* rkey.
*/
if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
goto bail;
fmr->ibfmr.rkey = fmr->mr.lkey;
fmr->ibfmr.lkey = fmr->mr.lkey;
/*
* Resources are allocated but no valid mapping (RKEY can't be
* used).
*/
fmr->mr.pd = pd;
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
fmr->page_shift = fmr_attr->page_shift;
atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr;
goto done;
bail:
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
ret = ERR_PTR(-ENOMEM);
done:
return ret;
}
/**
* qib_map_phys_fmr - set up a fast memory region
* @ibmfr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
*
* This may be called from interrupt context.
*/
int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
struct qib_lkey_table *rkt;
unsigned long flags;
int m, n, i;
u32 ps;
int ret;
if (atomic_read(&fmr->mr.refcount))
return -EBUSY;
if (list_len > fmr->mr.max_segs) {
ret = -EINVAL;
goto bail;
}
rkt = &to_idev(ibfmr->device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = iova;
fmr->mr.iova = iova;
ps = 1 << fmr->page_shift;
fmr->mr.length = list_len * ps;
m = 0;
n = 0;
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
if (++n == QIB_SEGSZ) {
m++;
n = 0;
}
}
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 0;
bail:
return ret;
}
/**
* qib_unmap_fmr - unmap fast memory regions
* @fmr_list: the list of fast memory regions to unmap
*
* Returns 0 on success.
*/
int qib_unmap_fmr(struct list_head *fmr_list)
{
struct qib_fmr *fmr;
struct qib_lkey_table *rkt;
unsigned long flags;
list_for_each_entry(fmr, fmr_list, ibfmr.list) {
rkt = &to_idev(fmr->ibfmr.device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
spin_unlock_irqrestore(&rkt->lock, flags);
}
return 0;
}
/**
* qib_dealloc_fmr - deallocate a fast memory region
* @ibfmr: the fast memory region to deallocate
*
* Returns 0 on success.
*/
int qib_dealloc_fmr(struct ib_fmr *ibfmr)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
int ret;
int i;
ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
if (ret)
return ret;
i = fmr->mr.mapsz;
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
return 0;
}

View File

@ -0,0 +1,738 @@
/*
* Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/aer.h>
#include "qib.h"
/*
* This file contains PCIe utility routines that are common to the
* various QLogic InfiniPath adapters
*/
/*
* Code to adjust PCIe capabilities.
* To minimize the change footprint, we call it
* from qib_pcie_params, which every chip-specific
* file calls, even though this violates some
* expectations of harmlessness.
*/
static int qib_tune_pcie_caps(struct qib_devdata *);
static int qib_tune_pcie_coalesce(struct qib_devdata *);
/*
* Do all the common PCIe setup and initialization.
* devdata is not yet allocated, and is not allocated until after this
* routine returns success. Therefore qib_dev_err() can't be used for error
* printing.
*/
int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
ret = pci_enable_device(pdev);
if (ret) {
/*
* This can happen (in theory) iff:
* We did a chip reset, and then failed to reprogram the
* BAR, or the chip reset due to an internal error. We then
* unloaded the driver and reloaded it.
*
* Both reset cases set the BAR back to initial state. For
* the latter case, the AER sticky error bit at offset 0x718
* should be set, but the Linux kernel doesn't yet know
* about that, it appears. If the original BAR was retained
* in the kernel data structures, this may be OK.
*/
qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
-ret);
goto done;
}
ret = pci_request_regions(pdev, QIB_DRV_NAME);
if (ret) {
qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
goto bail;
}
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret) {
/*
* If the 64 bit setup fails, try 32 bit. Some systems
* do not setup 64 bit maps on systems with 2GB or less
* memory installed.
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
goto bail;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret)
qib_early_err(&pdev->dev,
"Unable to set DMA consistent mask: %d\n", ret);
pci_set_master(pdev);
ret = pci_enable_pcie_error_reporting(pdev);
if (ret)
qib_early_err(&pdev->dev,
"Unable to enable pcie error reporting: %d\n",
ret);
goto done;
bail:
pci_disable_device(pdev);
pci_release_regions(pdev);
done:
return ret;
}
/*
* Do remaining PCIe setup, once dd is allocated, and save away
* fields required to re-initialize after a chip reset, or for
* various other purposes
*/
int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long len;
resource_size_t addr;
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
#if defined(__powerpc__)
/* There isn't a generic way to specify writethrough mappings */
dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
#else
dd->kregbase = ioremap_nocache(addr, len);
#endif
if (!dd->kregbase)
return -ENOMEM;
dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
dd->physaddr = addr; /* used for io_remap, etc. */
/*
* Save BARs to rewrite after device reset. Save all 64 bits of
* BAR, just in case.
*/
dd->pcibar0 = addr;
dd->pcibar1 = addr >> 32;
dd->deviceid = ent->device; /* save for later use */
dd->vendorid = ent->vendor;
return 0;
}
/*
* Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
* to releasing the dd memory.
* void because none of the core pcie cleanup returns are void
*/
void qib_pcie_ddcleanup(struct qib_devdata *dd)
{
u64 __iomem *base = (void __iomem *) dd->kregbase;
dd->kregbase = NULL;
iounmap(base);
if (dd->piobase)
iounmap(dd->piobase);
if (dd->userbase)
iounmap(dd->userbase);
pci_disable_device(dd->pcidev);
pci_release_regions(dd->pcidev);
pci_set_drvdata(dd->pcidev, NULL);
}
static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
struct msix_entry *msix_entry)
{
int ret;
u32 tabsize = 0;
u16 msix_flags;
pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
if (tabsize > *msixcnt)
tabsize = *msixcnt;
ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
if (ret > 0) {
tabsize = ret;
ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
}
if (ret) {
qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
"falling back to INTx\n", tabsize, ret);
tabsize = 0;
}
*msixcnt = tabsize;
if (ret)
qib_enable_intx(dd->pcidev);
}
/**
* We save the msi lo and hi values, so we can restore them after
* chip reset (the kernel PCI infrastructure doesn't yet handle that
* correctly.
*/
static int qib_msi_setup(struct qib_devdata *dd, int pos)
{
struct pci_dev *pdev = dd->pcidev;
u16 control;
int ret;
ret = pci_enable_msi(pdev);
if (ret)
qib_dev_err(dd, "pci_enable_msi failed: %d, "
"interrupts may not work\n", ret);
/* continue even if it fails, we may still be OK... */
pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
&dd->msi_lo);
pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
&dd->msi_hi);
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
/* now save the data (vector) info */
pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
? 12 : 8),
&dd->msi_data);
return ret;
}
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
struct msix_entry *entry)
{
u16 linkstat, speed;
int pos = 0, pose, ret = 1;
pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
if (!pose) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
dd->lbus_width = 1;
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
goto bail;
}
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
if (nent && *nent && pos) {
qib_msix_setup(dd, pos, nent, entry);
ret = 0; /* did it, either MSIx or INTx */
} else {
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
if (pos)
ret = qib_msi_setup(dd, pos);
else
qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
}
if (!pos)
qib_enable_intx(dd->pcidev);
pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
/*
* speed is bits 0-3, linkwidth is bits 4-8
* no defines for them in headers
*/
speed = linkstat & 0xf;
linkstat >>= 4;
linkstat &= 0x1f;
dd->lbus_width = linkstat;
switch (speed) {
case 1:
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
break;
case 2:
dd->lbus_speed = 5000; /* Gen1, 5GHz */
break;
default: /* not defined, assume gen1 */
dd->lbus_speed = 2500;
break;
}
/*
* Check against expected pcie width and complain if "wrong"
* on first initialization, not afterwards (i.e., reset).
*/
if (minw && linkstat < minw)
qib_dev_err(dd,
"PCIe width %u (x%u HCA), performance reduced\n",
linkstat, minw);
qib_tune_pcie_caps(dd);
qib_tune_pcie_coalesce(dd);
bail:
/* fill in string, even on errors */
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
"PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
return ret;
}
/*
* Setup pcie interrupt stuff again after a reset. I'd like to just call
* pci_enable_msi() again for msi, but when I do that,
* the MSI enable bit doesn't get set in the command word, and
* we switch to to a different interrupt vector, which is confusing,
* so I instead just do it all inline. Perhaps somehow can tie this
* into the PCIe hotplug support at some point
*/
int qib_reinit_intr(struct qib_devdata *dd)
{
int pos;
u16 control;
int ret = 0;
/* If we aren't using MSI, don't restore it */
if (!dd->msi_lo)
goto bail;
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
if (!pos) {
qib_dev_err(dd, "Can't find MSI capability, "
"can't restore MSI settings\n");
ret = 0;
/* nothing special for MSIx, just MSI */
goto bail;
}
pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
dd->msi_lo);
pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
dd->msi_hi);
pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
if (!(control & PCI_MSI_FLAGS_ENABLE)) {
control |= PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
control);
}
/* now rewrite the data (vector) info */
pci_write_config_word(dd->pcidev, pos +
((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
dd->msi_data);
ret = 1;
bail:
if (!ret && (dd->flags & QIB_HAS_INTX)) {
qib_enable_intx(dd->pcidev);
ret = 1;
}
/* and now set the pci master bit again */
pci_set_master(dd->pcidev);
return ret;
}
/*
* Disable msi interrupt if enabled, and clear msi_lo.
* This is used primarily for the fallback to INTx, but
* is also used in reinit after reset, and during cleanup.
*/
void qib_nomsi(struct qib_devdata *dd)
{
dd->msi_lo = 0;
pci_disable_msi(dd->pcidev);
}
/*
* Same as qib_nosmi, but for MSIx.
*/
void qib_nomsix(struct qib_devdata *dd)
{
pci_disable_msix(dd->pcidev);
}
/*
* Similar to pci_intx(pdev, 1), except that we make sure
* msi(x) is off.
*/
void qib_enable_intx(struct pci_dev *pdev)
{
u16 cw, new;
int pos;
/* first, turn on INTx */
pci_read_config_word(pdev, PCI_COMMAND, &cw);
new = cw & ~PCI_COMMAND_INTX_DISABLE;
if (new != cw)
pci_write_config_word(pdev, PCI_COMMAND, new);
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
if (pos) {
/* then turn off MSI */
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
new = cw & ~PCI_MSI_FLAGS_ENABLE;
if (new != cw)
pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
}
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
if (pos) {
/* then turn off MSIx */
pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
new = cw & ~PCI_MSIX_FLAGS_ENABLE;
if (new != cw)
pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
}
}
/*
* These two routines are helper routines for the device reset code
* to move all the pcie code out of the chip-specific driver code.
*/
void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
{
pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
}
void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
{
int r;
r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
dd->pcibar0);
if (r)
qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
dd->pcibar1);
if (r)
qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
/* now re-enable memory access, and restore cosmetic settings */
pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
r = pci_enable_device(dd->pcidev);
if (r)
qib_dev_err(dd, "pci_enable_device failed after "
"reset: %d\n", r);
}
/* code to adjust PCIe capabilities. */
static int fld2val(int wd, int mask)
{
int lsbmask;
if (!mask)
return 0;
wd &= mask;
lsbmask = mask ^ (mask & (mask - 1));
wd /= lsbmask;
return wd;
}
static int val2fld(int wd, int mask)
{
int lsbmask;
if (!mask)
return 0;
lsbmask = mask ^ (mask & (mask - 1));
wd *= lsbmask;
return wd;
}
static int qib_pcie_coalesce;
module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
/*
* Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
* chipsets. This is known to be unsafe for some revisions of some
* of these chipsets, with some BIOS settings, and enabling it on those
* systems may result in the system crashing, and/or data corruption.
*/
static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
{
int r;
struct pci_dev *parent;
int ppos;
u16 devid;
u32 mask, bits, val;
if (!qib_pcie_coalesce)
return 0;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
if (parent->bus->parent) {
qib_devinfo(dd->pcidev, "Parent not root\n");
return 1;
}
ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
if (!ppos)
return 1;
if (parent->vendor != 0x8086)
return 1;
/*
* - bit 12: Max_rdcmp_Imt_EN: need to set to 1
* - bit 11: COALESCE_FORCE: need to set to 0
* - bit 10: COALESCE_EN: need to set to 1
* (but limitations on some on some chipsets)
*
* On the Intel 5000, 5100, and 7300 chipsets, there is
* also: - bit 25:24: COALESCE_MODE, need to set to 0
*/
devid = parent->device;
if (devid >= 0x25e2 && devid <= 0x25fa) {
u8 rev;
/* 5000 P/V/X/Z */
pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
if (rev <= 0xb2)
bits = 1U << 10;
else
bits = 7U << 10;
mask = (3U << 24) | (7U << 10);
} else if (devid >= 0x65e2 && devid <= 0x65fa) {
/* 5100 */
bits = 1U << 10;
mask = (3U << 24) | (7U << 10);
} else if (devid >= 0x4021 && devid <= 0x402e) {
/* 5400 */
bits = 7U << 10;
mask = 7U << 10;
} else if (devid >= 0x3604 && devid <= 0x360a) {
/* 7300 */
bits = 7U << 10;
mask = (3U << 24) | (7U << 10);
} else {
/* not one of the chipsets that we know about */
return 1;
}
pci_read_config_dword(parent, 0x48, &val);
val &= ~mask;
val |= bits;
r = pci_write_config_dword(parent, 0x48, val);
return 0;
}
/*
* BIOS may not set PCIe bus-utilization parameters for best performance.
* Check and optionally adjust them to maximize our throughput.
*/
static int qib_pcie_caps;
module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
static int qib_tune_pcie_caps(struct qib_devdata *dd)
{
int ret = 1; /* Assume the worst */
struct pci_dev *parent;
int ppos, epos;
u16 pcaps, pctl, ecaps, ectl;
int rc_sup, ep_sup;
int rc_cur, ep_cur;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
if (parent->bus->parent) {
qib_devinfo(dd->pcidev, "Parent not root\n");
goto bail;
}
ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
if (ppos) {
pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
} else
goto bail;
/* Find out supported and configured values for endpoint (us) */
epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
if (epos) {
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
} else
goto bail;
ret = 0;
/* Find max payload supported by root, endpoint */
rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
if (rc_sup > ep_sup)
rc_sup = ep_sup;
rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
/* If Supported greater than limit in module param, limit it */
if (rc_sup > (qib_pcie_caps & 7))
rc_sup = qib_pcie_caps & 7;
/* If less than (allowed, supported), bump root payload */
if (rc_sup > rc_cur) {
rc_cur = rc_sup;
pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
}
/* If less than (allowed, supported), bump endpoint payload */
if (rc_sup > ep_cur) {
ep_cur = rc_sup;
ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
}
/*
* Now the Read Request size.
* No field for max supported, but PCIe spec limits it to 4096,
* which is code '5' (log2(4096) - 7)
*/
rc_sup = 5;
if (rc_sup > ((qib_pcie_caps >> 4) & 7))
rc_sup = (qib_pcie_caps >> 4) & 7;
rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
if (rc_sup > rc_cur) {
rc_cur = rc_sup;
pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
}
if (rc_sup > ep_cur) {
ep_cur = rc_sup;
ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
}
bail:
return ret;
}
/* End of PCIe capability tuning */
/*
* From here through qib_pci_err_handler definition is invoked via
* PCI error infrastructure, registered via pci
*/
static pci_ers_result_t
qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
switch (state) {
case pci_channel_io_normal:
qib_devinfo(pdev, "State Normal, ignoring\n");
break;
case pci_channel_io_frozen:
qib_devinfo(pdev, "State Frozen, requesting reset\n");
pci_disable_device(pdev);
ret = PCI_ERS_RESULT_NEED_RESET;
break;
case pci_channel_io_perm_failure:
qib_devinfo(pdev, "State Permanent Failure, disabling\n");
if (dd) {
/* no more register accesses! */
dd->flags &= ~QIB_PRESENT;
qib_disable_after_error(dd);
}
/* else early, or other problem */
ret = PCI_ERS_RESULT_DISCONNECT;
break;
default: /* shouldn't happen */
qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
state);
break;
}
return ret;
}
static pci_ers_result_t
qib_pci_mmio_enabled(struct pci_dev *pdev)
{
u64 words = 0U;
struct qib_devdata *dd = pci_get_drvdata(pdev);
pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
if (dd && dd->pport) {
words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
if (words == ~0ULL)
ret = PCI_ERS_RESULT_NEED_RESET;
}
qib_devinfo(pdev, "QIB mmio_enabled function called, "
"read wordscntr %Lx, returning %d\n", words, ret);
return ret;
}
static pci_ers_result_t
qib_pci_slot_reset(struct pci_dev *pdev)
{
qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
return PCI_ERS_RESULT_CAN_RECOVER;
}
static pci_ers_result_t
qib_pci_link_reset(struct pci_dev *pdev)
{
qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
return PCI_ERS_RESULT_CAN_RECOVER;
}
static void
qib_pci_resume(struct pci_dev *pdev)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
qib_devinfo(pdev, "QIB resume function called\n");
pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
* doing nothing.
*/
qib_init(dd, 1); /* same as re-init after reset */
}
struct pci_error_handlers qib_pci_err_handler = {
.error_detected = qib_pci_error_detected,
.mmio_enabled = qib_pci_mmio_enabled,
.link_reset = qib_pci_link_reset,
.slot_reset = qib_pci_slot_reset,
.resume = qib_pci_resume,
};

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2009 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "qib.h"
/**
* qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
* @to: destination, in MMIO space (must be 64-bit aligned)
* @from: source (must be 64-bit aligned)
* @count: number of 32-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in multiples of 32 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void qib_pio_copy(void __iomem *to, const void *from, size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
const u64 *src = from;
const u64 *end = src + (count >> 1);
while (src < end)
__raw_writeq(*src++, dst++);
if (count & 1)
__raw_writel(*(const u32 *)src, dst);
#else
u32 __iomem *dst = to;
const u32 *src = from;
const u32 *end = src + count;
while (src < end)
__raw_writel(*src++, dst++);
#endif
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,564 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include "qib.h"
#include "qib_qsfp.h"
/*
* QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
* in qib_twsi.c
*/
#define QSFP_MAX_RETRY 4
static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
{
struct qib_devdata *dd = ppd->dd;
u32 out, mask;
int ret, cnt, pass = 0;
int stuck = 0;
u8 *buff = bp;
ret = mutex_lock_interruptible(&dd->eep_lock);
if (ret)
goto no_unlock;
if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
ret = -ENXIO;
goto bail;
}
/*
* We presume, if we are called at all, that this board has
* QSFP. This is on the same i2c chain as the legacy parts,
* but only responds if the module is selected via GPIO pins.
* Further, there are very long setup and hold requirements
* on MODSEL.
*/
mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
if (ppd->hw_pidx) {
mask <<= QSFP_GPIO_PORT2_SHIFT;
out <<= QSFP_GPIO_PORT2_SHIFT;
}
dd->f_gpio_mod(dd, out, mask, mask);
/*
* Module could take up to 2 Msec to respond to MOD_SEL, and there
* is no way to tell if it is ready, so we must wait.
*/
msleep(2);
/* Make sure TWSI bus is in sane state. */
ret = qib_twsi_reset(dd);
if (ret) {
qib_dev_porterr(dd, ppd->port,
"QSFP interface Reset for read failed\n");
ret = -EIO;
stuck = 1;
goto deselect;
}
/* All QSFP modules are at A0 */
cnt = 0;
while (cnt < len) {
unsigned in_page;
int wlen = len - cnt;
in_page = addr % QSFP_PAGESIZE;
if ((in_page + wlen) > QSFP_PAGESIZE)
wlen = QSFP_PAGESIZE - in_page;
ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
/* Some QSFP's fail first try. Retry as experiment */
if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
continue;
if (ret) {
/* qib_twsi_blk_rd() 1 for error, else 0 */
ret = -EIO;
goto deselect;
}
addr += wlen;
cnt += wlen;
}
ret = cnt;
deselect:
/*
* Module could take up to 10 uSec after transfer before
* ready to respond to MOD_SEL negation, and there is no way
* to tell if it is ready, so we must wait.
*/
udelay(10);
/* set QSFP MODSEL, RST. LP all high */
dd->f_gpio_mod(dd, mask, mask, mask);
/*
* Module could take up to 2 Msec to respond to MOD_SEL
* going away, and there is no way to tell if it is ready.
* so we must wait.
*/
if (stuck)
qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
if (pass >= QSFP_MAX_RETRY && ret)
qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
else if (pass)
qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
msleep(2);
bail:
mutex_unlock(&dd->eep_lock);
no_unlock:
return ret;
}
/*
* qsfp_write
* We do not ordinarily write the QSFP, but this is needed to select
* the page on non-flat QSFPs, and possibly later unusual cases
*/
static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
int len)
{
struct qib_devdata *dd = ppd->dd;
u32 out, mask;
int ret, cnt;
u8 *buff = bp;
ret = mutex_lock_interruptible(&dd->eep_lock);
if (ret)
goto no_unlock;
if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
ret = -ENXIO;
goto bail;
}
/*
* We presume, if we are called at all, that this board has
* QSFP. This is on the same i2c chain as the legacy parts,
* but only responds if the module is selected via GPIO pins.
* Further, there are very long setup and hold requirements
* on MODSEL.
*/
mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
if (ppd->hw_pidx) {
mask <<= QSFP_GPIO_PORT2_SHIFT;
out <<= QSFP_GPIO_PORT2_SHIFT;
}
dd->f_gpio_mod(dd, out, mask, mask);
/*
* Module could take up to 2 Msec to respond to MOD_SEL,
* and there is no way to tell if it is ready, so we must wait.
*/
msleep(2);
/* Make sure TWSI bus is in sane state. */
ret = qib_twsi_reset(dd);
if (ret) {
qib_dev_porterr(dd, ppd->port,
"QSFP interface Reset for write failed\n");
ret = -EIO;
goto deselect;
}
/* All QSFP modules are at A0 */
cnt = 0;
while (cnt < len) {
unsigned in_page;
int wlen = len - cnt;
in_page = addr % QSFP_PAGESIZE;
if ((in_page + wlen) > QSFP_PAGESIZE)
wlen = QSFP_PAGESIZE - in_page;
ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
if (ret) {
/* qib_twsi_blk_wr() 1 for error, else 0 */
ret = -EIO;
goto deselect;
}
addr += wlen;
cnt += wlen;
}
ret = cnt;
deselect:
/*
* Module could take up to 10 uSec after transfer before
* ready to respond to MOD_SEL negation, and there is no way
* to tell if it is ready, so we must wait.
*/
udelay(10);
/* set QSFP MODSEL, RST, LP high */
dd->f_gpio_mod(dd, mask, mask, mask);
/*
* Module could take up to 2 Msec to respond to MOD_SEL
* going away, and there is no way to tell if it is ready.
* so we must wait.
*/
msleep(2);
bail:
mutex_unlock(&dd->eep_lock);
no_unlock:
return ret;
}
/*
* For validation, we want to check the checksums, even of the
* fields we do not otherwise use. This function reads the bytes from
* <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
*/
static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
{
int ret;
u16 cks;
u8 bval;
cks = 0;
while (first < next) {
ret = qsfp_read(ppd, first, &bval, 1);
if (ret < 0)
goto bail;
cks += bval;
++first;
}
ret = cks & 0xFF;
bail:
return ret;
}
int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
{
int ret;
int idx;
u16 cks;
u32 mask;
u8 peek[4];
/* ensure sane contents on invalid reads, for cable swaps */
memset(cp, 0, sizeof(*cp));
mask = QSFP_GPIO_MOD_PRS_N;
if (ppd->hw_pidx)
mask <<= QSFP_GPIO_PORT2_SHIFT;
ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
if (ret & mask) {
ret = -ENODEV;
goto bail;
}
ret = qsfp_read(ppd, 0, peek, 3);
if (ret < 0)
goto bail;
if ((peek[0] & 0xFE) != 0x0C)
qib_dev_porterr(ppd->dd, ppd->port,
"QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
if ((peek[2] & 2) == 0) {
/*
* If cable is paged, rather than "flat memory", we need to
* set the page to zero, Even if it already appears to be zero.
*/
u8 poke = 0;
ret = qib_qsfp_write(ppd, 127, &poke, 1);
udelay(50);
if (ret != 1) {
qib_dev_porterr(ppd->dd, ppd->port,
"Failed QSFP Page set\n");
goto bail;
}
}
ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
if (ret < 0)
goto bail;
if ((cp->id & 0xFE) != 0x0C)
qib_dev_porterr(ppd->dd, ppd->port,
"QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
cks = cp->id;
ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
if (ret < 0)
goto bail;
cks += cp->pwr;
ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
if (ret < 0)
goto bail;
cks += ret;
ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
if (ret < 0)
goto bail;
cks += cp->len;
ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
if (ret < 0)
goto bail;
cks += cp->tech;
ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
cks += cp->vendor[idx];
ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
if (ret < 0)
goto bail;
cks += cp->xt_xcv;
ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
cks += cp->oui[idx];
ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_PN_LEN; ++idx)
cks += cp->partnum[idx];
ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_REV_LEN; ++idx)
cks += cp->rev[idx];
ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
cks += cp->atten[idx];
ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
if (ret < 0)
goto bail;
cks += ret;
cks &= 0xFF;
ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
if (ret < 0)
goto bail;
if (cks != cp->cks1)
qib_dev_porterr(ppd->dd, ppd->port,
"QSFP cks1 is %02X, computed %02X\n", cp->cks1,
cks);
/* Second checksum covers 192 to (serial, date, lot) */
ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
if (ret < 0)
goto bail;
cks = ret;
ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_SN_LEN; ++idx)
cks += cp->serial[idx];
ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
cks += cp->date[idx];
ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
if (ret < 0)
goto bail;
for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
cks += cp->lot[idx];
ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
if (ret < 0)
goto bail;
cks += ret;
ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
if (ret < 0)
goto bail;
cks &= 0xFF;
if (cks != cp->cks2)
qib_dev_porterr(ppd->dd, ppd->port,
"QSFP cks2 is %02X, computed %02X\n", cp->cks2,
cks);
return 0;
bail:
cp->id = 0;
return ret;
}
const char * const qib_qsfp_devtech[16] = {
"850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
"1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
"Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
"Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
};
#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
#define QSFP_DEFAULT_HDR_CNT 224
static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
/*
* Initialize structures that control access to QSFP. Called once per port
* on cards that support QSFP.
*/
void qib_qsfp_init(struct qib_qsfp_data *qd,
void (*fevent)(struct work_struct *))
{
u32 mask, highs;
int pins;
struct qib_devdata *dd = qd->ppd->dd;
/* Initialize work struct for later QSFP events */
INIT_WORK(&qd->work, fevent);
/*
* Later, we may want more validation. For now, just set up pins and
* blip reset. If module is present, call qib_refresh_qsfp_cache(),
* to do further init.
*/
mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
highs = mask - QSFP_GPIO_MOD_RST_N;
if (qd->ppd->hw_pidx) {
mask <<= QSFP_GPIO_PORT2_SHIFT;
highs <<= QSFP_GPIO_PORT2_SHIFT;
}
dd->f_gpio_mod(dd, highs, mask, mask);
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
/* Spec says module can take up to two seconds! */
mask = QSFP_GPIO_MOD_PRS_N;
if (qd->ppd->hw_pidx)
mask <<= QSFP_GPIO_PORT2_SHIFT;
/* Do not try to wait here. Better to let event handle it */
pins = dd->f_gpio_mod(dd, 0, 0, 0);
if (pins & mask)
goto bail;
/* We see a module, but it may be unwise to look yet. Just schedule */
qd->t_insert = get_jiffies_64();
schedule_work(&qd->work);
bail:
return;
}
void qib_qsfp_deinit(struct qib_qsfp_data *qd)
{
/*
* There is nothing to do here for now. our
* work is scheduled with schedule_work(), and
* flush_scheduled_work() from remove_one will
* block until all work ssetup with schedule_work()
* completes.
*/
}
int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
{
struct qib_qsfp_cache cd;
u8 bin_buff[QSFP_DUMP_CHUNK];
char lenstr[6];
int sofar, ret;
int bidx = 0;
sofar = 0;
ret = qib_refresh_qsfp_cache(ppd, &cd);
if (ret < 0)
goto bail;
lenstr[0] = ' ';
lenstr[1] = '\0';
if (QSFP_IS_CU(cd.tech))
sprintf(lenstr, "%dM ", cd.len);
sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
(QSFP_PWR(cd.pwr) * 4));
sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
qib_qsfp_devtech[cd.tech >> 4]);
sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
QSFP_VEND_LEN, cd.vendor);
sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
QSFP_OUI(cd.oui));
sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
QSFP_PN_LEN, cd.partnum);
sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
QSFP_REV_LEN, cd.rev);
if (QSFP_IS_CU(cd.tech))
sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
QSFP_ATTEN_SDR(cd.atten),
QSFP_ATTEN_DDR(cd.atten));
sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
QSFP_SN_LEN, cd.serial);
sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
QSFP_DATE_LEN, cd.date);
sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
QSFP_LOT_LEN, cd.date);
while (bidx < QSFP_DEFAULT_HDR_CNT) {
int iidx;
ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
if (ret < 0)
goto bail;
for (iidx = 0; iidx < ret; ++iidx) {
sofar += scnprintf(buf + sofar, len-sofar, " %02X",
bin_buff[iidx]);
}
sofar += scnprintf(buf + sofar, len - sofar, "\n");
bidx += QSFP_DUMP_CHUNK;
}
ret = sofar;
bail:
return ret;
}

View File

@ -0,0 +1,184 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* QSFP support common definitions, for ib_qib driver */
#define QSFP_DEV 0xA0
#define QSFP_PWR_LAG_MSEC 2000
/*
* Below are masks for various QSFP signals, for Port 1.
* Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
* _N means asserted low
*/
#define QSFP_GPIO_MOD_SEL_N (4)
#define QSFP_GPIO_MOD_PRS_N (8)
#define QSFP_GPIO_INT_N (0x10)
#define QSFP_GPIO_MOD_RST_N (0x20)
#define QSFP_GPIO_LP_MODE (0x40)
#define QSFP_GPIO_PORT2_SHIFT 5
#define QSFP_PAGESIZE 128
/* Defined fields that QLogic requires of qualified cables */
/* Byte 0 is Identifier, not checked */
/* Byte 1 is reserved "status MSB" */
/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
/*
* Rest of first 128 not used, although 127 is reserved for page select
* if module is not "Flat memory".
*/
/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
#define QSFP_MOD_ID_OFFS 128
/*
* Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
* 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
*/
#define QSFP_MOD_PWR_OFFS 129
/* Byte 130 is Connector type. Not QLogic req'd */
/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
/* Byte 141 is Extended Rate Select. Not QLogic req'd */
/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
/* Byte 146 is length for Copper. Units of 1 meter */
#define QSFP_MOD_LEN_OFFS 146
/*
* Byte 147 is Device technology. D0..3 not Qlogc req'd
* D4..7 select from 15 choices, translated by table:
*/
#define QSFP_MOD_TECH_OFFS 147
extern const char *const qib_qsfp_devtech[16];
/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
/* Attenuation should be valid for copper other than full/near Eq */
#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
/* Length is only valid if technology is "copper" */
#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
#define QSFP_TECH_1490 9
#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
oui[2])
#define QSFP_OUI_AMPHENOL 0x415048
#define QSFP_OUI_FINISAR 0x009065
#define QSFP_OUI_GORE 0x002177
/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
#define QSFP_VEND_OFFS 148
#define QSFP_VEND_LEN 16
/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
#define QSFP_IBXCV_OFFS 164
/* Bytes 165..167 are Vendor OUI number */
#define QSFP_VOUI_OFFS 165
#define QSFP_VOUI_LEN 3
/* Bytes 168..183 are Vendor Part Number, string */
#define QSFP_PN_OFFS 168
#define QSFP_PN_LEN 16
/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
#define QSFP_REV_OFFS 184
#define QSFP_REV_LEN 2
/*
* Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
* If copper, they are attenuation in dB:
* Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
*/
#define QSFP_ATTEN_OFFS 186
#define QSFP_ATTEN_LEN 2
/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
/* Byte 190 is Max Case Temp. Not QLogic req'd */
/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
#define QSFP_CC_OFFS 191
/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
/* Bytes 196..211 are Serial Number, String */
#define QSFP_SN_OFFS 196
#define QSFP_SN_LEN 16
/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
#define QSFP_DATE_OFFS 212
#define QSFP_DATE_LEN 6
/* Bytes 218,219 are optional lot-code, string */
#define QSFP_LOT_OFFS 218
#define QSFP_LOT_LEN 2
/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
/* Byte 223 is LSB of sum of bytes 192..222 */
#define QSFP_CC_EXT_OFFS 223
/*
* struct qib_qsfp_data encapsulates state of QSFP device for one port.
* it will be part of port-chip-specific data if a board supports QSFP.
*
* Since multiple board-types use QSFP, and their pport_data structs
* differ (in the chip-specific section), we need a pointer to its head.
*
* Avoiding premature optimization, we will have one work_struct per port,
* and let the (increasingly inaccurately named) eep_lock arbitrate
* access to common resources.
*
*/
/*
* Hold the parts of the onboard EEPROM that we care about, so we aren't
* coonstantly bit-boffing
*/
struct qib_qsfp_cache {
u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
u8 pwr; /* in D6,7 */
u8 len; /* in meters, Cu only */
u8 tech;
char vendor[QSFP_VEND_LEN];
u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
u8 oui[QSFP_VOUI_LEN];
u8 partnum[QSFP_PN_LEN];
u8 rev[QSFP_REV_LEN];
u8 atten[QSFP_ATTEN_LEN];
u8 cks1; /* Checksum of bytes 128..190 */
u8 serial[QSFP_SN_LEN];
u8 date[QSFP_DATE_LEN];
u8 lot[QSFP_LOT_LEN];
u8 cks2; /* Checsum of bytes 192..222 */
};
#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
struct qib_qsfp_data {
/* Helps to find our way */
struct qib_pportdata *ppd;
struct work_struct work;
struct qib_qsfp_cache cache;
u64 t_insert;
};
extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
struct qib_qsfp_cache *cp);
extern void qib_qsfp_init(struct qib_qsfp_data *qd,
void (*fevent)(struct work_struct *));
extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,817 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/spinlock.h>
#include "qib.h"
#include "qib_mad.h"
/*
* Convert the AETH RNR timeout code into the number of microseconds.
*/
const u32 ib_qib_rnr_table[32] = {
655360, /* 00: 655.36 */
10, /* 01: .01 */
20, /* 02 .02 */
30, /* 03: .03 */
40, /* 04: .04 */
60, /* 05: .06 */
80, /* 06: .08 */
120, /* 07: .12 */
160, /* 08: .16 */
240, /* 09: .24 */
320, /* 0A: .32 */
480, /* 0B: .48 */
640, /* 0C: .64 */
960, /* 0D: .96 */
1280, /* 0E: 1.28 */
1920, /* 0F: 1.92 */
2560, /* 10: 2.56 */
3840, /* 11: 3.84 */
5120, /* 12: 5.12 */
7680, /* 13: 7.68 */
10240, /* 14: 10.24 */
15360, /* 15: 15.36 */
20480, /* 16: 20.48 */
30720, /* 17: 30.72 */
40960, /* 18: 40.96 */
61440, /* 19: 61.44 */
81920, /* 1A: 81.92 */
122880, /* 1B: 122.88 */
163840, /* 1C: 163.84 */
245760, /* 1D: 245.76 */
327680, /* 1E: 327.68 */
491520 /* 1F: 491.52 */
};
/*
* Validate a RWQE and fill in the SGE state.
* Return 1 if OK.
*/
static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
{
int i, j, ret;
struct ib_wc wc;
struct qib_lkey_table *rkt;
struct qib_pd *pd;
struct qib_sge_state *ss;
rkt = &to_idev(qp->ibqp.device)->lk_table;
pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
ss = &qp->r_sge;
ss->sg_list = qp->r_sg_list;
qp->r_len = 0;
for (i = j = 0; i < wqe->num_sge; i++) {
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
&wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
j++;
}
ss->num_sge = j;
ss->total_len = qp->r_len;
ret = 1;
goto bail;
bad_lkey:
while (j) {
struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
atomic_dec(&sge->mr->refcount);
}
ss->num_sge = 0;
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr_id;
wc.status = IB_WC_LOC_PROT_ERR;
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
/* Signal solicited completion event. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
bail:
return ret;
}
/**
* qib_get_rwqe - copy the next RWQE into the QP's RWQE
* @qp: the QP
* @wr_id_only: update qp->r_wr_id only, not qp->r_sge
*
* Return -1 if there is a local error, 0 if no RWQE is available,
* otherwise return 1.
*
* Can be called from interrupt level.
*/
int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
{
unsigned long flags;
struct qib_rq *rq;
struct qib_rwq *wq;
struct qib_srq *srq;
struct qib_rwqe *wqe;
void (*handler)(struct ib_event *, void *);
u32 tail;
int ret;
if (qp->ibqp.srq) {
srq = to_isrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler;
rq = &srq->rq;
} else {
srq = NULL;
handler = NULL;
rq = &qp->r_rq;
}
spin_lock_irqsave(&rq->lock, flags);
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ret = 0;
goto unlock;
}
wq = rq->wq;
tail = wq->tail;
/* Validate tail before using it since it is user writable. */
if (tail >= rq->size)
tail = 0;
if (unlikely(tail == wq->head)) {
ret = 0;
goto unlock;
}
/* Make sure entry is read after head index is read. */
smp_rmb();
wqe = get_rwqe_ptr(rq, tail);
/*
* Even though we update the tail index in memory, the verbs
* consumer is not supposed to post more entries until a
* completion is generated.
*/
if (++tail >= rq->size)
tail = 0;
wq->tail = tail;
if (!wr_id_only && !qib_init_sge(qp, wqe)) {
ret = -1;
goto unlock;
}
qp->r_wr_id = wqe->wr_id;
ret = 1;
set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
if (handler) {
u32 n;
/*
* Validate head pointer value and compute
* the number of remaining WQEs.
*/
n = wq->head;
if (n >= rq->size)
n = 0;
if (n < tail)
n += rq->size - tail;
else
n -= tail;
if (n < srq->limit) {
struct ib_event ev;
srq->limit = 0;
spin_unlock_irqrestore(&rq->lock, flags);
ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
handler(&ev, srq->ibsrq.srq_context);
goto bail;
}
}
unlock:
spin_unlock_irqrestore(&rq->lock, flags);
bail:
return ret;
}
/*
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
void qib_migrate_qp(struct qib_qp *qp)
{
struct ib_event ev;
qp->s_mig_state = IB_MIG_MIGRATED;
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_PATH_MIG;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
{
if (!index) {
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
return ppd->guid;
} else
return ibp->guids[index - 1];
}
static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
{
return (gid->global.interface_id == id &&
(gid->global.subnet_prefix == gid_prefix ||
gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
}
/*
*
* This should be called with the QP s_lock held.
*/
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, struct qib_qp *qp, u32 bth0)
{
__be64 guid;
if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
if (!has_grh) {
if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
goto err;
} else {
if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
goto err;
guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
qp->alt_ah_attr.grh.dgid.global.interface_id))
goto err;
}
if (!qib_pkey_ok((u16)bth0,
qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
(u16)bth0,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
0, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
goto err;
qib_migrate_qp(qp);
} else {
if (!has_grh) {
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
goto err;
} else {
if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
goto err;
guid = get_sguid(ibp,
qp->remote_ah_attr.grh.sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
qp->remote_ah_attr.grh.dgid.global.interface_id))
goto err;
}
if (!qib_pkey_ok((u16)bth0,
qib_get_pkey(ibp, qp->s_pkey_index))) {
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
(u16)bth0,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
0, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 */
if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
ppd_from_ibp(ibp)->port != qp->port_num)
goto err;
if (qp->s_mig_state == IB_MIG_REARM &&
!(bth0 & IB_BTH_MIG_REQ))
qp->s_mig_state = IB_MIG_ARMED;
}
return 0;
err:
return 1;
}
/**
* qib_ruc_loopback - handle UC and RC lookback requests
* @sqp: the sending QP
*
* This is called from qib_do_send() to
* forward a WQE addressed to the same HCA.
* Note that although we are single threaded due to the tasklet, we still
* have to protect against post_send(). We don't have to worry about
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
*/
static void qib_ruc_loopback(struct qib_qp *sqp)
{
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
struct qib_qp *qp;
struct qib_swqe *wqe;
struct qib_sge *sge;
unsigned long flags;
struct ib_wc wc;
u64 sdata;
atomic64_t *maddr;
enum ib_wc_status send_status;
int release;
int ret;
/*
* Note that we check the responder QP state after
* checking the requester's state.
*/
qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
goto unlock;
sqp->s_flags |= QIB_S_BUSY;
again:
if (sqp->s_last == sqp->s_head)
goto clr_busy;
wqe = get_swqe_ptr(sqp, sqp->s_last);
/* Return if it is not OK to start a new work reqeust. */
if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
goto clr_busy;
/* We are in the error state, flush the work request. */
send_status = IB_WC_WR_FLUSH_ERR;
goto flush_send;
}
/*
* We can rely on the entry not changing without the s_lock
* being held until we update s_last.
* We increment s_cur to indicate s_last is in progress.
*/
if (sqp->s_last == sqp->s_cur) {
if (++sqp->s_cur >= sqp->s_size)
sqp->s_cur = 0;
}
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
qp->ibqp.qp_type != sqp->ibqp.qp_type) {
ibp->n_pkt_drops++;
/*
* For RC, the requester would timeout and retry so
* shortcut the timeouts and just signal too many retries.
*/
if (sqp->ibqp.qp_type == IB_QPT_RC)
send_status = IB_WC_RETRY_EXC_ERR;
else
send_status = IB_WC_SUCCESS;
goto serr;
}
memset(&wc, 0, sizeof wc);
send_status = IB_WC_SUCCESS;
release = 1;
sqp->s_sge.sge = wqe->sg_list[0];
sqp->s_sge.sg_list = wqe->sg_list + 1;
sqp->s_sge.num_sge = wqe->wr.num_sge;
sqp->s_len = wqe->length;
switch (wqe->wr.opcode) {
case IB_WR_SEND_WITH_IMM:
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
ret = qib_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
if (!ret)
goto rnr_nak;
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto op_err;
if (!ret)
goto rnr_nak;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
goto inv_err;
if (wqe->length == 0)
break;
if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
IB_ACCESS_REMOTE_WRITE)))
goto acc_err;
qp->r_sge.sg_list = NULL;
qp->r_sge.num_sge = 1;
qp->r_sge.total_len = wqe->length;
break;
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
goto inv_err;
if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
IB_ACCESS_REMOTE_READ)))
goto acc_err;
release = 0;
sqp->s_sge.sg_list = NULL;
sqp->s_sge.num_sge = 1;
qp->r_sge.sge = wqe->sg_list[0];
qp->r_sge.sg_list = wqe->sg_list + 1;
qp->r_sge.num_sge = wqe->wr.num_sge;
qp->r_sge.total_len = wqe->length;
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->wr.wr.atomic.remote_addr,
wqe->wr.wr.atomic.rkey,
IB_ACCESS_REMOTE_ATOMIC)))
goto acc_err;
/* Perform atomic OP and save result. */
maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
sdata = wqe->wr.wr.atomic.compare_add;
*(u64 *) sqp->s_sge.sge.vaddr =
(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->wr.wr.atomic.swap);
atomic_dec(&qp->r_sge.sge.mr->refcount);
qp->r_sge.num_sge = 0;
goto send_comp;
default:
send_status = IB_WC_LOC_QP_OP_ERR;
goto serr;
}
sge = &sqp->s_sge.sge;
while (sqp->s_len) {
u32 len = sqp->s_len;
if (len > sge->length)
len = sge->length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (!release)
atomic_dec(&sge->mr->refcount);
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
if (++sge->n >= QIB_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
sqp->s_len -= len;
}
if (release)
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
else
wc.opcode = IB_WC_RECV;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp:
spin_lock_irqsave(&sqp->s_lock, flags);
ibp->n_loop_pkts++;
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
qib_send_complete(sqp, wqe, send_status);
goto again;
rnr_nak:
/* Handle RNR NAK */
if (qp->ibqp.qp_type == IB_QPT_UC)
goto send_comp;
ibp->n_rnr_naks++;
/*
* Note: we don't need the s_lock held since the BUSY flag
* makes this single threaded.
*/
if (sqp->s_rnr_retry == 0) {
send_status = IB_WC_RNR_RETRY_EXC_ERR;
goto serr;
}
if (sqp->s_rnr_retry_cnt < 7)
sqp->s_rnr_retry--;
spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
goto clr_busy;
sqp->s_flags |= QIB_S_WAIT_RNR;
sqp->s_timer.function = qib_rc_rnr_retry;
sqp->s_timer.expires = jiffies +
usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
add_timer(&sqp->s_timer);
goto clr_busy;
op_err:
send_status = IB_WC_REM_OP_ERR;
wc.status = IB_WC_LOC_QP_OP_ERR;
goto err;
inv_err:
send_status = IB_WC_REM_INV_REQ_ERR;
wc.status = IB_WC_LOC_QP_OP_ERR;
goto err;
acc_err:
send_status = IB_WC_REM_ACCESS_ERR;
wc.status = IB_WC_LOC_PROT_ERR;
err:
/* responder goes to error state */
qib_rc_error(qp, wc.status);
serr:
spin_lock_irqsave(&sqp->s_lock, flags);
qib_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
sqp->s_flags &= ~QIB_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
ev.device = sqp->ibqp.device;
ev.element.qp = &sqp->ibqp;
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
}
goto done;
}
clr_busy:
sqp->s_flags &= ~QIB_S_BUSY;
unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
if (qp && atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
/**
* qib_make_grh - construct a GRH header
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
* @grh: the global route address to send to
* @hwords: the number of 32 bit words of header being sent
* @nwords: the number of 32 bit words of data being sent
*
* Return the size of the header in 32 bit words.
*/
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords)
{
hdr->version_tclass_flow =
cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
(grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
(grh->flow_label << IB_GRH_FLOW_SHIFT));
hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
/* next_hdr is defined by C8-7 in ch. 8.4.1 */
hdr->next_hdr = IB_GRH_NEXT_HDR;
hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
hdr->sgid.global.interface_id = grh->sgid_index ?
ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
hdr->dgid = grh->dgid;
/* GRH header size in 32-bit words. */
return sizeof(struct ib_grh) / sizeof(u32);
}
void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
u16 lrh0;
u32 nwords;
u32 extra_bytes;
/* Construct the header. */
extra_bytes = -qp->s_cur_size & 3;
nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = QIB_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
&qp->remote_ah_attr.grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
}
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
qp->remote_ah_attr.sl << 4;
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
qp->remote_ah_attr.src_path_bits);
bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(bth2);
}
/**
* qib_do_send - perform a send on a QP
* @work: contains a pointer to the QP
*
* Process entries in the send work queue until credit or queue is
* exhausted. Only allow one CPU to send a packet per QP (tasklet).
* Otherwise, two threads could send packets out of order.
*/
void qib_do_send(struct work_struct *work)
{
struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
int (*make_req)(struct qib_qp *qp);
unsigned long flags;
if ((qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC) &&
(qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
qib_ruc_loopback(qp);
return;
}
if (qp->ibqp.qp_type == IB_QPT_RC)
make_req = qib_make_rc_req;
else if (qp->ibqp.qp_type == IB_QPT_UC)
make_req = qib_make_uc_req;
else
make_req = qib_make_ud_req;
spin_lock_irqsave(&qp->s_lock, flags);
/* Return if we are already busy processing a work request. */
if (!qib_send_ok(qp)) {
spin_unlock_irqrestore(&qp->s_lock, flags);
return;
}
qp->s_flags |= QIB_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags);
do {
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
/*
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
*/
if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
qp->s_cur_sge, qp->s_cur_size))
break;
/* Record that s_hdr is empty. */
qp->s_hdrwords = 0;
}
} while (make_req(qp));
}
/*
* This should be called with s_lock held.
*/
void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
unsigned i;
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
return;
for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i];
atomic_dec(&sge->mr->refcount);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
memset(&wc, 0, sizeof wc);
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.qp = &qp->ibqp;
if (status == IB_WC_SUCCESS)
wc.byte_len = wqe->length;
qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
status != IB_WC_SUCCESS);
}
last = qp->s_last;
old_last = last;
if (++last >= qp->s_size)
last = 0;
qp->s_last = last;
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
qp->s_cur = last;
if (qp->s_tail == old_last)
qp->s_tail = last;
if (qp->state == IB_QPS_SQD && last == qp->s_cur)
qp->s_draining = 0;
}

View File

@ -38,11 +38,10 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include "ipath_kernel.h"
#include "ipath_registers.h"
#include "ipath_7220.h"
#include "qib.h"
#include "qib_7220.h"
static unsigned char ipath_sd7220_ib_img[] = {
static unsigned char qib_sd7220_ib_img[] = {
/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = {
0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
};
int ipath_sd7220_ib_load(struct ipath_devdata *dd)
int qib_sd7220_ib_load(struct qib_devdata *dd)
{
return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
sizeof(ipath_sd7220_ib_img), 0);
return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img,
sizeof(qib_sd7220_ib_img), 0);
}
int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
int qib_sd7220_ib_vfy(struct qib_devdata *dd)
{
return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
sizeof(ipath_sd7220_ib_img), 0);
return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img,
sizeof(qib_sd7220_ib_img), 0);
}

View File

@ -0,0 +1,973 @@
/*
* Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include "qib.h"
#include "qib_common.h"
/* default pio off, sdma on */
static ushort sdma_descq_cnt = 256;
module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
/*
* Bits defined in the send DMA descriptor.
*/
#define SDMA_DESC_LAST (1ULL << 11)
#define SDMA_DESC_FIRST (1ULL << 12)
#define SDMA_DESC_DMA_HEAD (1ULL << 13)
#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
#define SDMA_DESC_INTR (1ULL << 15)
#define SDMA_DESC_COUNT_LSB 16
#define SDMA_DESC_GEN_LSB 30
char *qib_sdma_state_names[] = {
[qib_sdma_state_s00_hw_down] = "s00_HwDown",
[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
[qib_sdma_state_s20_idle] = "s20_Idle",
[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
[qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
[qib_sdma_state_s99_running] = "s99_Running",
};
char *qib_sdma_event_names[] = {
[qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
[qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
[qib_sdma_event_e20_hw_started] = "e20_HwStarted",
[qib_sdma_event_e30_go_running] = "e30_GoRunning",
[qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
[qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
[qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
[qib_sdma_event_e70_go_idle] = "e70_GoIdle",
[qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
[qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
[qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
};
/* declare all statics here rather than keep sorting */
static int alloc_sdma(struct qib_pportdata *);
static void sdma_complete(struct kref *);
static void sdma_finalput(struct qib_sdma_state *);
static void sdma_get(struct qib_sdma_state *);
static void sdma_put(struct qib_sdma_state *);
static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
static void sdma_start_sw_clean_up(struct qib_pportdata *);
static void sdma_sw_clean_up_task(unsigned long);
static void unmap_desc(struct qib_pportdata *, unsigned);
static void sdma_get(struct qib_sdma_state *ss)
{
kref_get(&ss->kref);
}
static void sdma_complete(struct kref *kref)
{
struct qib_sdma_state *ss =
container_of(kref, struct qib_sdma_state, kref);
complete(&ss->comp);
}
static void sdma_put(struct qib_sdma_state *ss)
{
kref_put(&ss->kref, sdma_complete);
}
static void sdma_finalput(struct qib_sdma_state *ss)
{
sdma_put(ss);
wait_for_completion(&ss->comp);
}
/*
* Complete all the sdma requests on the active list, in the correct
* order, and with appropriate processing. Called when cleaning up
* after sdma shutdown, and when new sdma requests are submitted for
* a link that is down. This matches what is done for requests
* that complete normally, it's just the full list.
*
* Must be called with sdma_lock held
*/
static void clear_sdma_activelist(struct qib_pportdata *ppd)
{
struct qib_sdma_txreq *txp, *txp_next;
list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
list_del_init(&txp->list);
if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
unsigned idx;
idx = txp->start_idx;
while (idx != txp->next_descq_idx) {
unmap_desc(ppd, idx);
if (++idx == ppd->sdma_descq_cnt)
idx = 0;
}
}
if (txp->callback)
(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
}
}
static void sdma_sw_clean_up_task(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
/*
* At this point, the following should always be true:
* - We are halted, so no more descriptors are getting retired.
* - We are not running, so no one is submitting new work.
* - Only we can send the e40_sw_cleaned, so we can't start
* running again until we say so. So, the active list and
* descq are ours to play with.
*/
/* Process all retired requests. */
qib_sdma_make_progress(ppd);
clear_sdma_activelist(ppd);
/*
* Resync count of added and removed. It is VERY important that
* sdma_descq_removed NEVER decrement - user_sdma depends on it.
*/
ppd->sdma_descq_removed = ppd->sdma_descq_added;
/*
* Reset our notion of head and tail.
* Note that the HW registers will be reset when switching states
* due to calling __qib_sdma_process_event() below.
*/
ppd->sdma_descq_tail = 0;
ppd->sdma_descq_head = 0;
ppd->sdma_head_dma[0] = 0;
ppd->sdma_generation = 0;
__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
/*
* This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
* as a result of send buffer errors or send DMA descriptor errors.
* We want to disarm the buffers in these cases.
*/
static void sdma_hw_start_up(struct qib_pportdata *ppd)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
unsigned bufno;
for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
ppd->dd->f_sdma_hw_start_up(ppd);
}
static void sdma_sw_tear_down(struct qib_pportdata *ppd)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
/* Releasing this reference means the state machine has stopped. */
sdma_put(ss);
}
static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
{
tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
}
static void sdma_set_state(struct qib_pportdata *ppd,
enum qib_sdma_states next_state)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
struct sdma_set_state_action *action = ss->set_state_action;
unsigned op = 0;
/* debugging bookkeeping */
ss->previous_state = ss->current_state;
ss->previous_op = ss->current_op;
ss->current_state = next_state;
if (action[next_state].op_enable)
op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
if (action[next_state].op_intenable)
op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
if (action[next_state].op_halt)
op |= QIB_SDMA_SENDCTRL_OP_HALT;
if (action[next_state].op_drain)
op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
if (action[next_state].go_s99_running_tofalse)
ss->go_s99_running = 0;
if (action[next_state].go_s99_running_totrue)
ss->go_s99_running = 1;
ss->current_op = op;
ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
}
static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
{
__le64 *descqp = &ppd->sdma_descq[head].qw[0];
u64 desc[2];
dma_addr_t addr;
size_t len;
desc[0] = le64_to_cpu(descqp[0]);
desc[1] = le64_to_cpu(descqp[1]);
addr = (desc[1] << 32) | (desc[0] >> 32);
len = (desc[0] >> 14) & (0x7ffULL << 2);
dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
}
static int alloc_sdma(struct qib_pportdata *ppd)
{
ppd->sdma_descq_cnt = sdma_descq_cnt;
if (!ppd->sdma_descq_cnt)
ppd->sdma_descq_cnt = 256;
/* Allocate memory for SendDMA descriptor FIFO */
ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
GFP_KERNEL);
if (!ppd->sdma_descq) {
qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
"FIFO memory\n");
goto bail;
}
/* Allocate memory for DMA of head register to memory */
ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
if (!ppd->sdma_head_dma) {
qib_dev_err(ppd->dd, "failed to allocate SendDMA "
"head memory\n");
goto cleanup_descq;
}
ppd->sdma_head_dma[0] = 0;
return 0;
cleanup_descq:
dma_free_coherent(&ppd->dd->pcidev->dev,
ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
ppd->sdma_descq_phys);
ppd->sdma_descq = NULL;
ppd->sdma_descq_phys = 0;
bail:
ppd->sdma_descq_cnt = 0;
return -ENOMEM;
}
static void free_sdma(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
if (ppd->sdma_head_dma) {
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
(void *)ppd->sdma_head_dma,
ppd->sdma_head_phys);
ppd->sdma_head_dma = NULL;
ppd->sdma_head_phys = 0;
}
if (ppd->sdma_descq) {
dma_free_coherent(&dd->pcidev->dev,
ppd->sdma_descq_cnt * sizeof(u64[2]),
ppd->sdma_descq, ppd->sdma_descq_phys);
ppd->sdma_descq = NULL;
ppd->sdma_descq_phys = 0;
}
}
static inline void make_sdma_desc(struct qib_pportdata *ppd,
u64 *sdmadesc, u64 addr, u64 dwlen,
u64 dwoffset)
{
WARN_ON(addr & 3);
/* SDmaPhyAddr[47:32] */
sdmadesc[1] = addr >> 32;
/* SDmaPhyAddr[31:0] */
sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
/* SDmaGeneration[1:0] */
sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
SDMA_DESC_GEN_LSB;
/* SDmaDwordCount[10:0] */
sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
/* SDmaBufOffset[12:2] */
sdmadesc[0] |= dwoffset & 0x7ffULL;
}
/* sdma_lock must be held */
int qib_sdma_make_progress(struct qib_pportdata *ppd)
{
struct list_head *lp = NULL;
struct qib_sdma_txreq *txp = NULL;
struct qib_devdata *dd = ppd->dd;
int progress = 0;
u16 hwhead;
u16 idx = 0;
hwhead = dd->f_sdma_gethead(ppd);
/* The reason for some of the complexity of this code is that
* not all descriptors have corresponding txps. So, we have to
* be able to skip over descs until we wander into the range of
* the next txp on the list.
*/
if (!list_empty(&ppd->sdma_activelist)) {
lp = ppd->sdma_activelist.next;
txp = list_entry(lp, struct qib_sdma_txreq, list);
idx = txp->start_idx;
}
while (ppd->sdma_descq_head != hwhead) {
/* if desc is part of this txp, unmap if needed */
if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
(idx == ppd->sdma_descq_head)) {
unmap_desc(ppd, ppd->sdma_descq_head);
if (++idx == ppd->sdma_descq_cnt)
idx = 0;
}
/* increment dequed desc count */
ppd->sdma_descq_removed++;
/* advance head, wrap if needed */
if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
ppd->sdma_descq_head = 0;
/* if now past this txp's descs, do the callback */
if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
/* remove from active list */
list_del_init(&txp->list);
if (txp->callback)
(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
/* see if there is another txp */
if (list_empty(&ppd->sdma_activelist))
txp = NULL;
else {
lp = ppd->sdma_activelist.next;
txp = list_entry(lp, struct qib_sdma_txreq,
list);
idx = txp->start_idx;
}
}
progress = 1;
}
if (progress)
qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
return progress;
}
/*
* This is called from interrupt context.
*/
void qib_sdma_intr(struct qib_pportdata *ppd)
{
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
__qib_sdma_intr(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
void __qib_sdma_intr(struct qib_pportdata *ppd)
{
if (__qib_sdma_running(ppd))
qib_sdma_make_progress(ppd);
}
int qib_setup_sdma(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
unsigned long flags;
int ret = 0;
ret = alloc_sdma(ppd);
if (ret)
goto bail;
/* set consistent sdma state */
ppd->dd->f_sdma_init_early(ppd);
spin_lock_irqsave(&ppd->sdma_lock, flags);
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
/* set up reference counting */
kref_init(&ppd->sdma_state.kref);
init_completion(&ppd->sdma_state.comp);
ppd->sdma_generation = 0;
ppd->sdma_descq_head = 0;
ppd->sdma_descq_removed = 0;
ppd->sdma_descq_added = 0;
INIT_LIST_HEAD(&ppd->sdma_activelist);
tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
(unsigned long)ppd);
ret = dd->f_init_sdma_regs(ppd);
if (ret)
goto bail_alloc;
qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
return 0;
bail_alloc:
qib_teardown_sdma(ppd);
bail:
return ret;
}
void qib_teardown_sdma(struct qib_pportdata *ppd)
{
qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
/*
* This waits for the state machine to exit so it is not
* necessary to kill the sdma_sw_clean_up_task to make sure
* it is not running.
*/
sdma_finalput(&ppd->sdma_state);
free_sdma(ppd);
}
int qib_sdma_running(struct qib_pportdata *ppd)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&ppd->sdma_lock, flags);
ret = __qib_sdma_running(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
/*
* Complete a request when sdma not running; likely only request
* but to simplify the code, always queue it, then process the full
* activelist. We process the entire list to ensure that this particular
* request does get it's callback, but in the correct order.
* Must be called with sdma_lock held
*/
static void complete_sdma_err_req(struct qib_pportdata *ppd,
struct qib_verbs_txreq *tx)
{
atomic_inc(&tx->qp->s_dma_busy);
/* no sdma descriptors, so no unmap_desc */
tx->txreq.start_idx = 0;
tx->txreq.next_descq_idx = 0;
list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
clear_sdma_activelist(ppd);
}
/*
* This function queues one IB packet onto the send DMA queue per call.
* The caller is responsible for checking:
* 1) The number of send DMA descriptor entries is less than the size of
* the descriptor queue.
* 2) The IB SGE addresses and lengths are 32-bit aligned
* (except possibly the last SGE's length)
* 3) The SGE addresses are suitable for passing to dma_map_single().
*/
int qib_sdma_verbs_send(struct qib_pportdata *ppd,
struct qib_sge_state *ss, u32 dwords,
struct qib_verbs_txreq *tx)
{
unsigned long flags;
struct qib_sge *sge;
struct qib_qp *qp;
int ret = 0;
u16 tail;
__le64 *descqp;
u64 sdmadesc[2];
u32 dwoffset;
dma_addr_t addr;
spin_lock_irqsave(&ppd->sdma_lock, flags);
retry:
if (unlikely(!__qib_sdma_running(ppd))) {
complete_sdma_err_req(ppd, tx);
goto unlock;
}
if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
if (qib_sdma_make_progress(ppd))
goto retry;
if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
ppd->dd->f_sdma_set_desc_cnt(ppd,
ppd->sdma_descq_cnt / 2);
goto busy;
}
dwoffset = tx->hdr_dwords;
make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
sdmadesc[0] |= SDMA_DESC_FIRST;
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
/* write to the descq */
tail = ppd->sdma_descq_tail;
descqp = &ppd->sdma_descq[tail].qw[0];
*descqp++ = cpu_to_le64(sdmadesc[0]);
*descqp++ = cpu_to_le64(sdmadesc[1]);
/* increment the tail */
if (++tail == ppd->sdma_descq_cnt) {
tail = 0;
descqp = &ppd->sdma_descq[0].qw[0];
++ppd->sdma_generation;
}
tx->txreq.start_idx = tail;
sge = &ss->sge;
while (dwords) {
u32 dw;
u32 len;
len = dwords << 2;
if (len > sge->length)
len = sge->length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
dw = (len + 3) >> 2;
addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
dw << 2, DMA_TO_DEVICE);
if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
goto unmap;
sdmadesc[0] = 0;
make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
/* SDmaUseLargeBuf has to be set in every descriptor */
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
/* write to the descq */
*descqp++ = cpu_to_le64(sdmadesc[0]);
*descqp++ = cpu_to_le64(sdmadesc[1]);
/* increment the tail */
if (++tail == ppd->sdma_descq_cnt) {
tail = 0;
descqp = &ppd->sdma_descq[0].qw[0];
++ppd->sdma_generation;
}
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
if (++sge->n >= QIB_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
dwoffset += dw;
dwords -= dw;
}
if (!tail)
descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
descqp -= 2;
descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
atomic_inc(&tx->qp->s_dma_busy);
tx->txreq.next_descq_idx = tail;
ppd->dd->f_sdma_update_tail(ppd, tail);
ppd->sdma_descq_added += tx->txreq.sg_count;
list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
goto unlock;
unmap:
for (;;) {
if (!tail)
tail = ppd->sdma_descq_cnt - 1;
else
tail--;
if (tail == ppd->sdma_descq_tail)
break;
unmap_desc(ppd, tail);
}
qp = tx->qp;
qib_put_txreq(tx);
spin_lock(&qp->s_lock);
if (qp->ibqp.qp_type == IB_QPT_RC) {
/* XXX what about error sending RDMA read responses? */
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
qib_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
/* return zero to process the next send work request */
goto unlock;
busy:
qp = tx->qp;
spin_lock(&qp->s_lock);
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
struct qib_ibdev *dev;
/*
* If we couldn't queue the DMA request, save the info
* and try again later rather than destroying the
* buffer and undoing the side effects of the copy.
*/
tx->ss = ss;
tx->dwords = dwords;
qp->s_tx = tx;
dev = &ppd->dd->verbs_dev;
spin_lock(&dev->pending_lock);
if (list_empty(&qp->iowait)) {
struct qib_ibport *ibp;
ibp = &ppd->ibport_data;
ibp->n_dmawait++;
qp->s_flags |= QIB_S_WAIT_DMA_DESC;
list_add_tail(&qp->iowait, &dev->dmawait);
}
spin_unlock(&dev->pending_lock);
qp->s_flags &= ~QIB_S_BUSY;
spin_unlock(&qp->s_lock);
ret = -EBUSY;
} else {
spin_unlock(&qp->s_lock);
qib_put_txreq(tx);
}
unlock:
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
void qib_sdma_process_event(struct qib_pportdata *ppd,
enum qib_sdma_events event)
{
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
__qib_sdma_process_event(ppd, event);
if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
void __qib_sdma_process_event(struct qib_pportdata *ppd,
enum qib_sdma_events event)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
switch (ss->current_state) {
case qib_sdma_state_s00_hw_down:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
break;
case qib_sdma_event_e30_go_running:
/*
* If down, but running requested (usually result
* of link up, then we need to start up.
* This can happen when hw down is requested while
* bringing the link up with traffic active on
* 7220, e.g. */
ss->go_s99_running = 1;
/* fall through and start dma engine */
case qib_sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&ppd->sdma_state);
sdma_set_state(ppd,
qib_sdma_state_s10_hw_start_up_wait);
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e40_sw_cleaned:
sdma_sw_tear_down(ppd);
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s10_hw_start_up_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_sw_tear_down(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
sdma_set_state(ppd, ss->go_s99_running ?
qib_sdma_state_s99_running :
qib_sdma_state_s20_idle);
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s20_idle:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_sw_tear_down(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
sdma_set_state(ppd, qib_sdma_state_s99_running);
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s30_sw_clean_up_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
sdma_set_state(ppd,
qib_sdma_state_s10_hw_start_up_wait);
sdma_hw_start_up(ppd);
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s40_hw_clean_up_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
sdma_set_state(ppd,
qib_sdma_state_s30_sw_clean_up_wait);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s50_hw_halt_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
sdma_set_state(ppd,
qib_sdma_state_s40_hw_clean_up_wait);
ppd->dd->f_sdma_hw_clean_up(ppd);
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s99_running:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
sdma_set_state(ppd,
qib_sdma_state_s30_sw_clean_up_wait);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e70_go_idle:
sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
sdma_set_state(ppd,
qib_sdma_state_s30_sw_clean_up_wait);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e7322_err_halted:
sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
}
ss->last_event = event;
}

View File

@ -0,0 +1,375 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "qib_verbs.h"
/**
* qib_post_srq_receive - post a receive on a shared receive queue
* @ibsrq: the SRQ to post the receive on
* @wr: the list of work requests to post
* @bad_wr: A pointer to the first WR to cause a problem is put here
*
* This may be called from interrupt context.
*/
int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct qib_srq *srq = to_isrq(ibsrq);
struct qib_rwq *wq;
unsigned long flags;
int ret;
for (; wr; wr = wr->next) {
struct qib_rwqe *wqe;
u32 next;
int i;
if ((unsigned) wr->num_sge > srq->rq.max_sge) {
*bad_wr = wr;
ret = -EINVAL;
goto bail;
}
spin_lock_irqsave(&srq->rq.lock, flags);
wq = srq->rq.wq;
next = wq->head + 1;
if (next >= srq->rq.size)
next = 0;
if (next == wq->tail) {
spin_unlock_irqrestore(&srq->rq.lock, flags);
*bad_wr = wr;
ret = -ENOMEM;
goto bail;
}
wqe = get_rwqe_ptr(&srq->rq, wq->head);
wqe->wr_id = wr->wr_id;
wqe->num_sge = wr->num_sge;
for (i = 0; i < wr->num_sge; i++)
wqe->sg_list[i] = wr->sg_list[i];
/* Make sure queue entry is written before the head index. */
smp_wmb();
wq->head = next;
spin_unlock_irqrestore(&srq->rq.lock, flags);
}
ret = 0;
bail:
return ret;
}
/**
* qib_create_srq - create a shared receive queue
* @ibpd: the protection domain of the SRQ to create
* @srq_init_attr: the attributes of the SRQ
* @udata: data from libibverbs when creating a user SRQ
*/
struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
struct qib_ibdev *dev = to_idev(ibpd->device);
struct qib_srq *srq;
u32 sz;
struct ib_srq *ret;
if (srq_init_attr->attr.max_sge == 0 ||
srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
srq_init_attr->attr.max_wr == 0 ||
srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
ret = ERR_PTR(-EINVAL);
goto done;
}
srq = kmalloc(sizeof(*srq), GFP_KERNEL);
if (!srq) {
ret = ERR_PTR(-ENOMEM);
goto done;
}
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
srq->rq.size = srq_init_attr->attr.max_wr + 1;
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct qib_rwqe);
srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
}
/*
* Return the address of the RWQ as the offset to mmap.
* See qib_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
srq->ip =
qib_create_mmap_info(dev, s, ibpd->uobject->context,
srq->rq.wq);
if (!srq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
}
err = ib_copy_to_udata(udata, &srq->ip->offset,
sizeof(srq->ip->offset));
if (err) {
ret = ERR_PTR(err);
goto bail_ip;
}
} else
srq->ip = NULL;
/*
* ib_create_srq() will initialize srq->ibsrq.
*/
spin_lock_init(&srq->rq.lock);
srq->rq.wq->head = 0;
srq->rq.wq->tail = 0;
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
if (dev->n_srqs_allocated == ib_qib_max_srqs) {
spin_unlock(&dev->n_srqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
dev->n_srqs_allocated++;
spin_unlock(&dev->n_srqs_lock);
if (srq->ip) {
spin_lock_irq(&dev->pending_lock);
list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
ret = &srq->ibsrq;
goto done;
bail_ip:
kfree(srq->ip);
bail_wq:
vfree(srq->rq.wq);
bail_srq:
kfree(srq);
done:
return ret;
}
/**
* qib_modify_srq - modify a shared receive queue
* @ibsrq: the SRQ to modify
* @attr: the new attributes of the SRQ
* @attr_mask: indicates which attributes to modify
* @udata: user data for libibverbs.so
*/
int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask,
struct ib_udata *udata)
{
struct qib_srq *srq = to_isrq(ibsrq);
struct qib_rwq *wq;
int ret = 0;
if (attr_mask & IB_SRQ_MAX_WR) {
struct qib_rwq *owq;
struct qib_rwqe *p;
u32 sz, size, n, head, tail;
/* Check that the requested sizes are below the limits. */
if ((attr->max_wr > ib_qib_max_srq_wrs) ||
((attr_mask & IB_SRQ_LIMIT) ?
attr->srq_limit : srq->limit) > attr->max_wr) {
ret = -EINVAL;
goto bail;
}
sz = sizeof(struct qib_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
if (!wq) {
ret = -ENOMEM;
goto bail;
}
/* Check that we can write the offset to mmap. */
if (udata && udata->inlen >= sizeof(__u64)) {
__u64 offset_addr;
__u64 offset = 0;
ret = ib_copy_from_udata(&offset_addr, udata,
sizeof(offset_addr));
if (ret)
goto bail_free;
udata->outbuf =
(void __user *) (unsigned long) offset_addr;
ret = ib_copy_to_udata(udata, &offset,
sizeof(offset));
if (ret)
goto bail_free;
}
spin_lock_irq(&srq->rq.lock);
/*
* validate head and tail pointer values and compute
* the number of remaining WQEs.
*/
owq = srq->rq.wq;
head = owq->head;
tail = owq->tail;
if (head >= srq->rq.size || tail >= srq->rq.size) {
ret = -EINVAL;
goto bail_unlock;
}
n = head;
if (n < tail)
n += srq->rq.size - tail;
else
n -= tail;
if (size <= n) {
ret = -EINVAL;
goto bail_unlock;
}
n = 0;
p = wq->wq;
while (tail != head) {
struct qib_rwqe *wqe;
int i;
wqe = get_rwqe_ptr(&srq->rq, tail);
p->wr_id = wqe->wr_id;
p->num_sge = wqe->num_sge;
for (i = 0; i < wqe->num_sge; i++)
p->sg_list[i] = wqe->sg_list[i];
n++;
p = (struct qib_rwqe *)((char *) p + sz);
if (++tail >= srq->rq.size)
tail = 0;
}
srq->rq.wq = wq;
srq->rq.size = size;
wq->head = n;
wq->tail = 0;
if (attr_mask & IB_SRQ_LIMIT)
srq->limit = attr->srq_limit;
spin_unlock_irq(&srq->rq.lock);
vfree(owq);
if (srq->ip) {
struct qib_mmap_info *ip = srq->ip;
struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
u32 s = sizeof(struct qib_rwq) + size * sz;
qib_update_mmap_info(dev, ip, s, wq);
/*
* Return the offset to mmap.
* See qib_mmap() for details.
*/
if (udata && udata->inlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
goto bail;
}
/*
* Put user mapping info onto the pending list
* unless it already is on the list.
*/
spin_lock_irq(&dev->pending_lock);
if (list_empty(&ip->pending_mmaps))
list_add(&ip->pending_mmaps,
&dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
} else if (attr_mask & IB_SRQ_LIMIT) {
spin_lock_irq(&srq->rq.lock);
if (attr->srq_limit >= srq->rq.size)
ret = -EINVAL;
else
srq->limit = attr->srq_limit;
spin_unlock_irq(&srq->rq.lock);
}
goto bail;
bail_unlock:
spin_unlock_irq(&srq->rq.lock);
bail_free:
vfree(wq);
bail:
return ret;
}
int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
{
struct qib_srq *srq = to_isrq(ibsrq);
attr->max_wr = srq->rq.size - 1;
attr->max_sge = srq->rq.max_sge;
attr->srq_limit = srq->limit;
return 0;
}
/**
* qib_destroy_srq - destroy a shared receive queue
* @ibsrq: the SRQ to destroy
*/
int qib_destroy_srq(struct ib_srq *ibsrq)
{
struct qib_srq *srq = to_isrq(ibsrq);
struct qib_ibdev *dev = to_idev(ibsrq->device);
spin_lock(&dev->n_srqs_lock);
dev->n_srqs_allocated--;
spin_unlock(&dev->n_srqs_lock);
if (srq->ip)
kref_put(&srq->ip->ref, qib_release_mmap_info);
else
vfree(srq->rq.wq);
kfree(srq);
return 0;
}

View File

@ -0,0 +1,691 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/ctype.h>
#include "qib.h"
/**
* qib_parse_ushort - parse an unsigned short value in an arbitrary base
* @str: the string containing the number
* @valp: where to put the result
*
* Returns the number of bytes consumed, or negative value on error.
*/
static int qib_parse_ushort(const char *str, unsigned short *valp)
{
unsigned long val;
char *end;
int ret;
if (!isdigit(str[0])) {
ret = -EINVAL;
goto bail;
}
val = simple_strtoul(str, &end, 0);
if (val > 0xffff) {
ret = -EINVAL;
goto bail;
}
*valp = val;
ret = end + 1 - str;
if (ret == 0)
ret = -EINVAL;
bail:
return ret;
}
/* start of per-port functions */
/*
* Get/Set heartbeat enable. OR of 1=enabled, 2=auto
*/
static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
{
struct qib_devdata *dd = ppd->dd;
int ret;
ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
return ret;
}
static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
size_t count)
{
struct qib_devdata *dd = ppd->dd;
int ret;
u16 val;
ret = qib_parse_ushort(buf, &val);
/*
* Set the "intentional" heartbeat enable per either of
* "Enable" and "Auto", as these are normally set together.
* This bit is consulted when leaving loopback mode,
* because entering loopback mode overrides it and automatically
* disables heartbeat.
*/
if (ret >= 0)
ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
if (ret < 0)
qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
return ret < 0 ? ret : count;
}
static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
size_t count)
{
struct qib_devdata *dd = ppd->dd;
int ret = count, r;
r = dd->f_set_ib_loopback(ppd, buf);
if (r < 0)
ret = r;
return ret;
}
static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
size_t count)
{
struct qib_devdata *dd = ppd->dd;
int ret;
u16 val;
ret = qib_parse_ushort(buf, &val);
if (ret > 0)
qib_set_led_override(ppd, val);
else
qib_dev_err(dd, "attempt to set invalid LED override\n");
return ret < 0 ? ret : count;
}
static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
{
ssize_t ret;
if (!ppd->statusp)
ret = -EINVAL;
else
ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long) *(ppd->statusp));
return ret;
}
/*
* For userland compatibility, these offsets must remain fixed.
* They are strings for QIB_STATUS_*
*/
static const char *qib_status_str[] = {
"Initted",
"",
"",
"",
"",
"Present",
"IB_link_up",
"IB_configured",
"",
"Fatal_Hardware_Error",
NULL,
};
static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
{
int i, any;
u64 s;
ssize_t ret;
if (!ppd->statusp) {
ret = -EINVAL;
goto bail;
}
s = *(ppd->statusp);
*buf = '\0';
for (any = i = 0; s && qib_status_str[i]; i++) {
if (s & 1) {
/* if overflow */
if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
break;
if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
PAGE_SIZE)
break;
any = 1;
}
s >>= 1;
}
if (any)
strlcat(buf, "\n", PAGE_SIZE);
ret = strlen(buf);
bail:
return ret;
}
/* end of per-port functions */
/*
* Start of per-port file structures and support code
* Because we are fitting into other infrastructure, we have to supply the
* full set of kobject/sysfs_ops structures and routines.
*/
#define QIB_PORT_ATTR(name, mode, show, store) \
static struct qib_port_attr qib_port_attr_##name = \
__ATTR(name, mode, show, store)
struct qib_port_attr {
struct attribute attr;
ssize_t (*show)(struct qib_pportdata *, char *);
ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
};
QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
store_hrtbt_enb);
QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
static struct attribute *port_default_attributes[] = {
&qib_port_attr_loopback.attr,
&qib_port_attr_led_override.attr,
&qib_port_attr_hrtbt_enable.attr,
&qib_port_attr_status.attr,
&qib_port_attr_status_str.attr,
NULL
};
static ssize_t qib_portattr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct qib_port_attr *pattr =
container_of(attr, struct qib_port_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
return pattr->show(ppd, buf);
}
static ssize_t qib_portattr_store(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t len)
{
struct qib_port_attr *pattr =
container_of(attr, struct qib_port_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
return pattr->store(ppd, buf, len);
}
static void qib_port_release(struct kobject *kobj)
{
/* nothing to do since memory is freed by qib_free_devdata() */
}
static const struct sysfs_ops qib_port_ops = {
.show = qib_portattr_show,
.store = qib_portattr_store,
};
static struct kobj_type qib_port_ktype = {
.release = qib_port_release,
.sysfs_ops = &qib_port_ops,
.default_attrs = port_default_attributes
};
/* Start sl2vl */
#define QIB_SL2VL_ATTR(N) \
static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0444 }, \
.sl = N \
}
struct qib_sl2vl_attr {
struct attribute attr;
int sl;
};
QIB_SL2VL_ATTR(0);
QIB_SL2VL_ATTR(1);
QIB_SL2VL_ATTR(2);
QIB_SL2VL_ATTR(3);
QIB_SL2VL_ATTR(4);
QIB_SL2VL_ATTR(5);
QIB_SL2VL_ATTR(6);
QIB_SL2VL_ATTR(7);
QIB_SL2VL_ATTR(8);
QIB_SL2VL_ATTR(9);
QIB_SL2VL_ATTR(10);
QIB_SL2VL_ATTR(11);
QIB_SL2VL_ATTR(12);
QIB_SL2VL_ATTR(13);
QIB_SL2VL_ATTR(14);
QIB_SL2VL_ATTR(15);
static struct attribute *sl2vl_default_attributes[] = {
&qib_sl2vl_attr_0.attr,
&qib_sl2vl_attr_1.attr,
&qib_sl2vl_attr_2.attr,
&qib_sl2vl_attr_3.attr,
&qib_sl2vl_attr_4.attr,
&qib_sl2vl_attr_5.attr,
&qib_sl2vl_attr_6.attr,
&qib_sl2vl_attr_7.attr,
&qib_sl2vl_attr_8.attr,
&qib_sl2vl_attr_9.attr,
&qib_sl2vl_attr_10.attr,
&qib_sl2vl_attr_11.attr,
&qib_sl2vl_attr_12.attr,
&qib_sl2vl_attr_13.attr,
&qib_sl2vl_attr_14.attr,
&qib_sl2vl_attr_15.attr,
NULL
};
static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct qib_sl2vl_attr *sattr =
container_of(attr, struct qib_sl2vl_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, sl2vl_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
}
static const struct sysfs_ops qib_sl2vl_ops = {
.show = sl2vl_attr_show,
};
static struct kobj_type qib_sl2vl_ktype = {
.release = qib_port_release,
.sysfs_ops = &qib_sl2vl_ops,
.default_attrs = sl2vl_default_attributes
};
/* End sl2vl */
/* Start diag_counters */
#define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0444 }, \
.counter = offsetof(struct qib_ibport, n_##N) \
}
struct qib_diagc_attr {
struct attribute attr;
size_t counter;
};
QIB_DIAGC_ATTR(rc_resends);
QIB_DIAGC_ATTR(rc_acks);
QIB_DIAGC_ATTR(rc_qacks);
QIB_DIAGC_ATTR(rc_delayed_comp);
QIB_DIAGC_ATTR(seq_naks);
QIB_DIAGC_ATTR(rdma_seq);
QIB_DIAGC_ATTR(rnr_naks);
QIB_DIAGC_ATTR(other_naks);
QIB_DIAGC_ATTR(rc_timeouts);
QIB_DIAGC_ATTR(loop_pkts);
QIB_DIAGC_ATTR(pkt_drops);
QIB_DIAGC_ATTR(dmawait);
QIB_DIAGC_ATTR(unaligned);
QIB_DIAGC_ATTR(rc_dupreq);
QIB_DIAGC_ATTR(rc_seqnak);
static struct attribute *diagc_default_attributes[] = {
&qib_diagc_attr_rc_resends.attr,
&qib_diagc_attr_rc_acks.attr,
&qib_diagc_attr_rc_qacks.attr,
&qib_diagc_attr_rc_delayed_comp.attr,
&qib_diagc_attr_seq_naks.attr,
&qib_diagc_attr_rdma_seq.attr,
&qib_diagc_attr_rnr_naks.attr,
&qib_diagc_attr_other_naks.attr,
&qib_diagc_attr_rc_timeouts.attr,
&qib_diagc_attr_loop_pkts.attr,
&qib_diagc_attr_pkt_drops.attr,
&qib_diagc_attr_dmawait.attr,
&qib_diagc_attr_unaligned.attr,
&qib_diagc_attr_rc_dupreq.attr,
&qib_diagc_attr_rc_seqnak.attr,
NULL
};
static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct qib_diagc_attr *dattr =
container_of(attr, struct qib_diagc_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
}
static const struct sysfs_ops qib_diagc_ops = {
.show = diagc_attr_show,
};
static struct kobj_type qib_diagc_ktype = {
.release = qib_port_release,
.sysfs_ops = &qib_diagc_ops,
.default_attrs = diagc_default_attributes
};
/* End diag_counters */
/* end of per-port file structures and support code */
/*
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
if (!dd->boardname)
ret = -EINVAL;
else
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
static ssize_t show_version(struct device *device,
struct device_attribute *attr, char *buf)
{
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
}
static ssize_t show_boardversion(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
static ssize_t show_localbus_info(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
}
static ssize_t show_nctxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
dd->first_user_ctxt);
}
static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
buf[sizeof dd->serial] = '\0';
memcpy(buf, dd->serial, sizeof dd->serial);
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t store_chip_reset(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
ret = -EINVAL;
goto bail;
}
ret = qib_reset_device(dd->unit);
bail:
return ret < 0 ? ret : count;
}
static ssize_t show_logged_errs(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int idx, count;
/* force consistency with actual EEPROM */
if (qib_update_eeprom_log(dd) != 0)
return -ENXIO;
count = 0;
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
dd->eep_st_errs[idx],
idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
}
return count;
}
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
static ssize_t show_tempsense(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
int idx;
u8 regvals[8];
ret = -ENXIO;
for (idx = 0; idx < 8; ++idx) {
if (idx == 6)
continue;
ret = dd->f_tempsense_rd(dd, idx);
if (ret < 0)
break;
regvals[idx] = ret;
}
if (idx == 8)
ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
*(signed char *)(regvals),
*(signed char *)(regvals + 1),
regvals[2], regvals[3],
*(signed char *)(regvals + 5),
*(signed char *)(regvals + 7));
return ret;
}
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
*/
/* start of per-unit file structures and support code */
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
static struct device_attribute *qib_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_hca_type,
&dev_attr_board_id,
&dev_attr_version,
&dev_attr_nctxts,
&dev_attr_serial,
&dev_attr_boardversion,
&dev_attr_logged_errors,
&dev_attr_tempsense,
&dev_attr_localbus_info,
&dev_attr_chip_reset,
};
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
struct kobject *kobj)
{
struct qib_pportdata *ppd;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
int ret;
if (!port_num || port_num > dd->num_pports) {
qib_dev_err(dd, "Skipping infiniband class with "
"invalid port %u\n", port_num);
ret = -ENODEV;
goto bail;
}
ppd = &dd->pport[port_num - 1];
ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
"linkcontrol");
if (ret) {
qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
"(err %d) port %u\n", ret, port_num);
goto bail;
}
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
"sl2vl");
if (ret) {
qib_dev_err(dd, "Skipping sl2vl sysfs info, "
"(err %d) port %u\n", ret, port_num);
goto bail_sl;
}
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
"diag_counters");
if (ret) {
qib_dev_err(dd, "Skipping diag_counters sysfs info, "
"(err %d) port %u\n", ret, port_num);
goto bail_diagc;
}
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
return 0;
bail_diagc:
kobject_put(&ppd->sl2vl_kobj);
bail_sl:
kobject_put(&ppd->pport_kobj);
bail:
return ret;
}
/*
* Register and create our files in /sys/class/infiniband.
*/
int qib_verbs_register_sysfs(struct qib_devdata *dd)
{
struct ib_device *dev = &dd->verbs_dev.ibdev;
int i, ret;
for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
ret = device_create_file(&dev->dev, qib_attributes[i]);
if (ret)
return ret;
}
return 0;
}
/*
* Unregister and remove our files in /sys/class/infiniband.
*/
void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
{
struct qib_pportdata *ppd;
int i;
for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i];
kobject_put(&ppd->pport_kobj);
kobject_put(&ppd->sl2vl_kobj);
}
}

View File

@ -0,0 +1,498 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include "qib.h"
/*
* QLogic_IB "Two Wire Serial Interface" driver.
* Originally written for a not-quite-i2c serial eeprom, which is
* still used on some supported boards. Later boards have added a
* variety of other uses, most board-specific, so teh bit-boffing
* part has been split off to this file, while the other parts
* have been moved to chip-specific files.
*
* We have also dropped all pretense of fully generic (e.g. pretend
* we don't know whether '1' is the higher voltage) interface, as
* the restrictions of the generic i2c interface (e.g. no access from
* driver itself) make it unsuitable for this use.
*/
#define READ_CMD 1
#define WRITE_CMD 0
/**
* i2c_wait_for_writes - wait for a write
* @dd: the qlogic_ib device
*
* We use this instead of udelay directly, so we can make sure
* that previous register writes have been flushed all the way
* to the chip. Since we are delaying anyway, the cost doesn't
* hurt, and makes the bit twiddling more regular
*/
static void i2c_wait_for_writes(struct qib_devdata *dd)
{
/*
* implicit read of EXTStatus is as good as explicit
* read of scratch, if all we want to do is flush
* writes.
*/
dd->f_gpio_mod(dd, 0, 0, 0);
rmb(); /* inlined, so prevent compiler reordering */
}
/*
* QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
* for "almost compliant" modules
*/
#define SCL_WAIT_USEC 1000
/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
* Should be 20, but some chips need more.
*/
#define TWSI_BUF_WAIT_USEC 60
static void scl_out(struct qib_devdata *dd, u8 bit)
{
u32 mask;
udelay(1);
mask = 1UL << dd->gpio_scl_num;
/* SCL is meant to be bare-drain, so never set "OUT", just DIR */
dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
/*
* Allow for slow slaves by simple
* delay for falling edge, sampling on rise.
*/
if (!bit)
udelay(2);
else {
int rise_usec;
for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
break;
udelay(2);
}
if (rise_usec <= 0)
qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
SCL_WAIT_USEC);
}
i2c_wait_for_writes(dd);
}
static void sda_out(struct qib_devdata *dd, u8 bit)
{
u32 mask;
mask = 1UL << dd->gpio_sda_num;
/* SDA is meant to be bare-drain, so never set "OUT", just DIR */
dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
i2c_wait_for_writes(dd);
udelay(2);
}
static u8 sda_in(struct qib_devdata *dd, int wait)
{
int bnum;
u32 read_val, mask;
bnum = dd->gpio_sda_num;
mask = (1UL << bnum);
/* SDA is meant to be bare-drain, so never set "OUT", just DIR */
dd->f_gpio_mod(dd, 0, 0, mask);
read_val = dd->f_gpio_mod(dd, 0, 0, 0);
if (wait)
i2c_wait_for_writes(dd);
return (read_val & mask) >> bnum;
}
/**
* i2c_ackrcv - see if ack following write is true
* @dd: the qlogic_ib device
*/
static int i2c_ackrcv(struct qib_devdata *dd)
{
u8 ack_received;
/* AT ENTRY SCL = LOW */
/* change direction, ignore data */
ack_received = sda_in(dd, 1);
scl_out(dd, 1);
ack_received = sda_in(dd, 1) == 0;
scl_out(dd, 0);
return ack_received;
}
static void stop_cmd(struct qib_devdata *dd);
/**
* rd_byte - read a byte, sending STOP on last, else ACK
* @dd: the qlogic_ib device
*
* Returns byte shifted out of device
*/
static int rd_byte(struct qib_devdata *dd, int last)
{
int bit_cntr, data;
data = 0;
for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
data <<= 1;
scl_out(dd, 1);
data |= sda_in(dd, 0);
scl_out(dd, 0);
}
if (last) {
scl_out(dd, 1);
stop_cmd(dd);
} else {
sda_out(dd, 0);
scl_out(dd, 1);
scl_out(dd, 0);
sda_out(dd, 1);
}
return data;
}
/**
* wr_byte - write a byte, one bit at a time
* @dd: the qlogic_ib device
* @data: the byte to write
*
* Returns 0 if we got the following ack, otherwise 1
*/
static int wr_byte(struct qib_devdata *dd, u8 data)
{
int bit_cntr;
u8 bit;
for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
bit = (data >> bit_cntr) & 1;
sda_out(dd, bit);
scl_out(dd, 1);
scl_out(dd, 0);
}
return (!i2c_ackrcv(dd)) ? 1 : 0;
}
/*
* issue TWSI start sequence:
* (both clock/data high, clock high, data low while clock is high)
*/
static void start_seq(struct qib_devdata *dd)
{
sda_out(dd, 1);
scl_out(dd, 1);
sda_out(dd, 0);
udelay(1);
scl_out(dd, 0);
}
/**
* stop_seq - transmit the stop sequence
* @dd: the qlogic_ib device
*
* (both clock/data low, clock high, data high while clock is high)
*/
static void stop_seq(struct qib_devdata *dd)
{
scl_out(dd, 0);
sda_out(dd, 0);
scl_out(dd, 1);
sda_out(dd, 1);
}
/**
* stop_cmd - transmit the stop condition
* @dd: the qlogic_ib device
*
* (both clock/data low, clock high, data high while clock is high)
*/
static void stop_cmd(struct qib_devdata *dd)
{
stop_seq(dd);
udelay(TWSI_BUF_WAIT_USEC);
}
/**
* qib_twsi_reset - reset I2C communication
* @dd: the qlogic_ib device
*/
int qib_twsi_reset(struct qib_devdata *dd)
{
int clock_cycles_left = 9;
int was_high = 0;
u32 pins, mask;
/* Both SCL and SDA should be high. If not, there
* is something wrong.
*/
mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
/*
* Force pins to desired innocuous state.
* This is the default power-on state with out=0 and dir=0,
* So tri-stated and should be floating high (barring HW problems)
*/
dd->f_gpio_mod(dd, 0, 0, mask);
/*
* Clock nine times to get all listeners into a sane state.
* If SDA does not go high at any point, we are wedged.
* One vendor recommends then issuing START followed by STOP.
* we cannot use our "normal" functions to do that, because
* if SCL drops between them, another vendor's part will
* wedge, dropping SDA and keeping it low forever, at the end of
* the next transaction (even if it was not the device addressed).
* So our START and STOP take place with SCL held high.
*/
while (clock_cycles_left--) {
scl_out(dd, 0);
scl_out(dd, 1);
/* Note if SDA is high, but keep clocking to sync slave */
was_high |= sda_in(dd, 0);
}
if (was_high) {
/*
* We saw a high, which we hope means the slave is sync'd.
* Issue START, STOP, pause for T_BUF.
*/
pins = dd->f_gpio_mod(dd, 0, 0, 0);
if ((pins & mask) != mask)
qib_dev_err(dd, "GPIO pins not at rest: %d\n",
pins & mask);
/* Drop SDA to issue START */
udelay(1); /* Guarantee .6 uSec setup */
sda_out(dd, 0);
udelay(1); /* Guarantee .6 uSec hold */
/* At this point, SCL is high, SDA low. Raise SDA for STOP */
sda_out(dd, 1);
udelay(TWSI_BUF_WAIT_USEC);
}
return !was_high;
}
#define QIB_TWSI_START 0x100
#define QIB_TWSI_STOP 0x200
/* Write byte to TWSI, optionally prefixed with START or suffixed with
* STOP.
* returns 0 if OK (ACK received), else != 0
*/
static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
{
int ret = 1;
if (flags & QIB_TWSI_START)
start_seq(dd);
ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
if (flags & QIB_TWSI_STOP)
stop_cmd(dd);
return ret;
}
/* Added functionality for IBA7220-based cards */
#define QIB_TEMP_DEV 0x98
/*
* qib_twsi_blk_rd
* Formerly called qib_eeprom_internal_read, and only used for eeprom,
* but now the general interface for data transfer from twsi devices.
* One vestige of its former role is that it recognizes a device
* QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
* which responded to all TWSI device codes, interpreting them as
* address within device. On all other devices found on board handled by
* this driver, the device is followed by a one-byte "address" which selects
* the "register" or "offset" within the device from which data should
* be read.
*/
int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
void *buffer, int len)
{
int ret;
u8 *bp = buffer;
ret = 1;
if (dev == QIB_TWSI_NO_DEV) {
/* legacy not-really-I2C */
addr = (addr << 1) | READ_CMD;
ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
} else {
/* Actual I2C */
ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
if (ret) {
stop_cmd(dd);
ret = 1;
goto bail;
}
/*
* SFF spec claims we do _not_ stop after the addr
* but simply issue a start with the "read" dev-addr.
* Since we are implicitely waiting for ACK here,
* we need t_buf (nominally 20uSec) before that start,
* and cannot rely on the delay built in to the STOP
*/
ret = qib_twsi_wr(dd, addr, 0);
udelay(TWSI_BUF_WAIT_USEC);
if (ret) {
qib_dev_err(dd,
"Failed to write interface read addr %02X\n",
addr);
ret = 1;
goto bail;
}
ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
}
if (ret) {
stop_cmd(dd);
ret = 1;
goto bail;
}
/*
* block devices keeps clocking data out as long as we ack,
* automatically incrementing the address. Some have "pages"
* whose boundaries will not be crossed, but the handling
* of these is left to the caller, who is in a better
* position to know.
*/
while (len-- > 0) {
/*
* Get and store data, sending ACK if length remaining,
* else STOP
*/
*bp++ = rd_byte(dd, !len);
}
ret = 0;
bail:
return ret;
}
/*
* qib_twsi_blk_wr
* Formerly called qib_eeprom_internal_write, and only used for eeprom,
* but now the general interface for data transfer to twsi devices.
* One vestige of its former role is that it recognizes a device
* QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
* which responded to all TWSI device codes, interpreting them as
* address within device. On all other devices found on board handled by
* this driver, the device is followed by a one-byte "address" which selects
* the "register" or "offset" within the device to which data should
* be written.
*/
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
const void *buffer, int len)
{
int sub_len;
const u8 *bp = buffer;
int max_wait_time, i;
int ret;
ret = 1;
while (len > 0) {
if (dev == QIB_TWSI_NO_DEV) {
if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
QIB_TWSI_START)) {
goto failed_write;
}
} else {
/* Real I2C */
if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
goto failed_write;
ret = qib_twsi_wr(dd, addr, 0);
if (ret) {
qib_dev_err(dd, "Failed to write interface"
" write addr %02X\n", addr);
goto failed_write;
}
}
sub_len = min(len, 4);
addr += sub_len;
len -= sub_len;
for (i = 0; i < sub_len; i++)
if (qib_twsi_wr(dd, *bp++, 0))
goto failed_write;
stop_cmd(dd);
/*
* Wait for write complete by waiting for a successful
* read (the chip replies with a zero after the write
* cmd completes, and before it writes to the eeprom.
* The startcmd for the read will fail the ack until
* the writes have completed. We do this inline to avoid
* the debug prints that are in the real read routine
* if the startcmd fails.
* We also use the proper device address, so it doesn't matter
* whether we have real eeprom_dev. Legacy likes any address.
*/
max_wait_time = 100;
while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
stop_cmd(dd);
if (!--max_wait_time)
goto failed_write;
}
/* now read (and ignore) the resulting byte */
rd_byte(dd, 1);
}
ret = 0;
goto bail;
failed_write:
stop_cmd(dd);
ret = 1;
bail:
return ret;
}

View File

@ -0,0 +1,557 @@
/*
* Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "qib.h"
static unsigned qib_hol_timeout_ms = 3000;
module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
MODULE_PARM_DESC(hol_timeout_ms,
"duration of user app suspension after link failure");
unsigned qib_sdma_fetch_arb = 1;
module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
/**
* qib_disarm_piobufs - cancel a range of PIO buffers
* @dd: the qlogic_ib device
* @first: the first PIO buffer to cancel
* @cnt: the number of PIO buffers to cancel
*
* Cancel a range of PIO buffers. Used at user process close,
* in case it died while writing to a PIO buffer.
*/
void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
{
unsigned long flags;
unsigned i;
unsigned last;
last = first + cnt;
spin_lock_irqsave(&dd->pioavail_lock, flags);
for (i = first; i < last; i++) {
__clear_bit(i, dd->pio_need_disarm);
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
}
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
}
/*
* This is called by a user process when it sees the DISARM_BUFS event
* bit is set.
*/
int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
{
struct qib_devdata *dd = rcd->dd;
unsigned i;
unsigned last;
unsigned n = 0;
last = rcd->pio_base + rcd->piocnt;
/*
* Don't need uctxt_lock here, since user has called in to us.
* Clear at start in case more interrupts set bits while we
* are disarming
*/
if (rcd->user_event_mask) {
/*
* subctxt_cnt is 0 if not shared, so do base
* separately, first, then remaining subctxt, if any
*/
clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
for (i = 1; i < rcd->subctxt_cnt; i++)
clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[i]);
}
spin_lock_irq(&dd->pioavail_lock);
for (i = rcd->pio_base; i < last; i++) {
if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
n++;
dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
}
}
spin_unlock_irq(&dd->pioavail_lock);
return 0;
}
static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
{
struct qib_pportdata *ppd;
unsigned pidx;
for (pidx = 0; pidx < dd->num_pports; pidx++) {
ppd = dd->pport + pidx;
if (i >= ppd->sdma_state.first_sendbuf &&
i < ppd->sdma_state.last_sendbuf)
return ppd;
}
return NULL;
}
/*
* Return true if send buffer is being used by a user context.
* Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
*/
static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
{
struct qib_ctxtdata *rcd;
unsigned ctxt;
int ret = 0;
spin_lock(&dd->uctxt_lock);
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
rcd = dd->rcd[ctxt];
if (!rcd || bufn < rcd->pio_base ||
bufn >= rcd->pio_base + rcd->piocnt)
continue;
if (rcd->user_event_mask) {
int i;
/*
* subctxt_cnt is 0 if not shared, so do base
* separately, first, then remaining subctxt, if any
*/
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[0]);
for (i = 1; i < rcd->subctxt_cnt; i++)
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[i]);
}
ret = 1;
break;
}
spin_unlock(&dd->uctxt_lock);
return ret;
}
/*
* Disarm a set of send buffers. If the buffer might be actively being
* written to, mark the buffer to be disarmed later when it is not being
* written to.
*
* This should only be called from the IRQ error handler.
*/
void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
unsigned cnt)
{
struct qib_pportdata *ppd, *pppd[dd->num_pports];
unsigned i;
unsigned long flags;
for (i = 0; i < dd->num_pports; i++)
pppd[i] = NULL;
for (i = 0; i < cnt; i++) {
int which;
if (!test_bit(i, mask))
continue;
/*
* If the buffer is owned by the DMA hardware,
* reset the DMA engine.
*/
ppd = is_sdma_buf(dd, i);
if (ppd) {
pppd[ppd->port] = ppd;
continue;
}
/*
* If the kernel is writing the buffer or the buffer is
* owned by a user process, we can't clear it yet.
*/
spin_lock_irqsave(&dd->pioavail_lock, flags);
if (test_bit(i, dd->pio_writing) ||
(!test_bit(i << 1, dd->pioavailkernel) &&
find_ctxt(dd, i))) {
__set_bit(i, dd->pio_need_disarm);
which = 0;
} else {
which = 1;
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
}
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
}
/* do cancel_sends once per port that had sdma piobufs in error */
for (i = 0; i < dd->num_pports; i++)
if (pppd[i])
qib_cancel_sends(pppd[i]);
}
/**
* update_send_bufs - update shadow copy of the PIO availability map
* @dd: the qlogic_ib device
*
* called whenever our local copy indicates we have run out of send buffers
*/
static void update_send_bufs(struct qib_devdata *dd)
{
unsigned long flags;
unsigned i;
const unsigned piobregs = dd->pioavregs;
/*
* If the generation (check) bits have changed, then we update the
* busy bit for the corresponding PIO buffer. This algorithm will
* modify positions to the value they already have in some cases
* (i.e., no change), but it's faster than changing only the bits
* that have changed.
*
* We would like to do this atomicly, to avoid spinlocks in the
* critical send path, but that's not really possible, given the
* type of changes, and that this routine could be called on
* multiple cpu's simultaneously, so we lock in this routine only,
* to avoid conflicting updates; all we change is the shadow, and
* it's a single 64 bit memory location, so by definition the update
* is atomic in terms of what other cpu's can see in testing the
* bits. The spin_lock overhead isn't too bad, since it only
* happens when all buffers are in use, so only cpu overhead, not
* latency or bandwidth is affected.
*/
if (!dd->pioavailregs_dma)
return;
spin_lock_irqsave(&dd->pioavail_lock, flags);
for (i = 0; i < piobregs; i++) {
u64 pchbusy, pchg, piov, pnew;
piov = le64_to_cpu(dd->pioavailregs_dma[i]);
pchg = dd->pioavailkernel[i] &
~(dd->pioavailshadow[i] ^ piov);
pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
if (pchg && (pchbusy & dd->pioavailshadow[i])) {
pnew = dd->pioavailshadow[i] & ~pchbusy;
pnew |= piov & pchbusy;
dd->pioavailshadow[i] = pnew;
}
}
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
}
/*
* Debugging code and stats updates if no pio buffers available.
*/
static noinline void no_send_bufs(struct qib_devdata *dd)
{
dd->upd_pio_shadow = 1;
/* not atomic, but if we lose a stat count in a while, that's OK */
qib_stats.sps_nopiobufs++;
}
/*
* Common code for normal driver send buffer allocation, and reserved
* allocation.
*
* Do appropriate marking as busy, etc.
* Returns buffer pointer if one is found, otherwise NULL.
*/
u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
u32 first, u32 last)
{
unsigned i, j, updated = 0;
unsigned nbufs;
unsigned long flags;
unsigned long *shadow = dd->pioavailshadow;
u32 __iomem *buf;
if (!(dd->flags & QIB_PRESENT))
return NULL;
nbufs = last - first + 1; /* number in range to check */
if (dd->upd_pio_shadow) {
/*
* Minor optimization. If we had no buffers on last call,
* start out by doing the update; continue and do scan even
* if no buffers were updated, to be paranoid.
*/
update_send_bufs(dd);
updated++;
}
i = first;
rescan:
/*
* While test_and_set_bit() is atomic, we do that and then the
* change_bit(), and the pair is not. See if this is the cause
* of the remaining armlaunch errors.
*/
spin_lock_irqsave(&dd->pioavail_lock, flags);
for (j = 0; j < nbufs; j++, i++) {
if (i > last)
i = first;
if (__test_and_set_bit((2 * i) + 1, shadow))
continue;
/* flip generation bit */
__change_bit(2 * i, shadow);
/* remember that the buffer can be written to now */
__set_bit(i, dd->pio_writing);
break;
}
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
if (j == nbufs) {
if (!updated) {
/*
* First time through; shadow exhausted, but may be
* buffers available, try an update and then rescan.
*/
update_send_bufs(dd);
updated++;
i = first;
goto rescan;
}
no_send_bufs(dd);
buf = NULL;
} else {
if (i < dd->piobcnt2k)
buf = (u32 __iomem *)(dd->pio2kbase +
i * dd->palign);
else
buf = (u32 __iomem *)(dd->pio4kbase +
(i - dd->piobcnt2k) * dd->align4k);
if (pbufnum)
*pbufnum = i;
dd->upd_pio_shadow = 0;
}
return buf;
}
/*
* Record that the caller is finished writing to the buffer so we don't
* disarm it while it is being written and disarm it now if needed.
*/
void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
{
unsigned long flags;
spin_lock_irqsave(&dd->pioavail_lock, flags);
__clear_bit(n, dd->pio_writing);
if (__test_and_clear_bit(n, dd->pio_need_disarm))
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
}
/**
* qib_chg_pioavailkernel - change which send buffers are available for kernel
* @dd: the qlogic_ib device
* @start: the starting send buffer number
* @len: the number of send buffers
* @avail: true if the buffers are available for kernel use, false otherwise
*/
void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
unsigned len, u32 avail, struct qib_ctxtdata *rcd)
{
unsigned long flags;
unsigned end;
unsigned ostart = start;
/* There are two bits per send buffer (busy and generation) */
start *= 2;
end = start + len * 2;
spin_lock_irqsave(&dd->pioavail_lock, flags);
/* Set or clear the busy bit in the shadow. */
while (start < end) {
if (avail) {
unsigned long dma;
int i;
/*
* The BUSY bit will never be set, because we disarm
* the user buffers before we hand them back to the
* kernel. We do have to make sure the generation
* bit is set correctly in shadow, since it could
* have changed many times while allocated to user.
* We can't use the bitmap functions on the full
* dma array because it is always little-endian, so
* we have to flip to host-order first.
* BITS_PER_LONG is slightly wrong, since it's
* always 64 bits per register in chip...
* We only work on 64 bit kernels, so that's OK.
*/
i = start / BITS_PER_LONG;
__clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
dd->pioavailshadow);
dma = (unsigned long)
le64_to_cpu(dd->pioavailregs_dma[i]);
if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
start) % BITS_PER_LONG, &dma))
__set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
start, dd->pioavailshadow);
else
__clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
+ start, dd->pioavailshadow);
__set_bit(start, dd->pioavailkernel);
} else {
__set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
dd->pioavailshadow);
__clear_bit(start, dd->pioavailkernel);
}
start += 2;
}
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd->f_txchk_change(dd, ostart, len, avail, rcd);
}
/*
* Flush all sends that might be in the ready to send state, as well as any
* that are in the process of being sent. Used whenever we need to be
* sure the send side is idle. Cleans up all buffer state by canceling
* all pio buffers, and issuing an abort, which cleans up anything in the
* launch fifo. The cancel is superfluous on some chip versions, but
* it's safer to always do it.
* PIOAvail bits are updated by the chip as if a normal send had happened.
*/
void qib_cancel_sends(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
struct qib_ctxtdata *rcd;
unsigned long flags;
unsigned ctxt;
unsigned i;
unsigned last;
/*
* Tell PSM to disarm buffers again before trying to reuse them.
* We need to be sure the rcd doesn't change out from under us
* while we do so. We hold the two locks sequentially. We might
* needlessly set some need_disarm bits as a result, if the
* context is closed after we release the uctxt_lock, but that's
* fairly benign, and safer than nesting the locks.
*/
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
spin_lock_irqsave(&dd->uctxt_lock, flags);
rcd = dd->rcd[ctxt];
if (rcd && rcd->ppd == ppd) {
last = rcd->pio_base + rcd->piocnt;
if (rcd->user_event_mask) {
/*
* subctxt_cnt is 0 if not shared, so do base
* separately, first, then remaining subctxt,
* if any
*/
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[0]);
for (i = 1; i < rcd->subctxt_cnt; i++)
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[i]);
}
i = rcd->pio_base;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
spin_lock_irqsave(&dd->pioavail_lock, flags);
for (; i < last; i++)
__set_bit(i, dd->pio_need_disarm);
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
} else
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
}
if (!(dd->flags & QIB_HAS_SEND_DMA))
dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
QIB_SENDCTRL_FLUSH);
}
/*
* Force an update of in-memory copy of the pioavail registers, when
* needed for any of a variety of reasons.
* If already off, this routine is a nop, on the assumption that the
* caller (or set of callers) will "do the right thing".
* This is a per-device operation, so just the first port.
*/
void qib_force_pio_avail_update(struct qib_devdata *dd)
{
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
}
void qib_hol_down(struct qib_pportdata *ppd)
{
/*
* Cancel sends when the link goes DOWN so that we aren't doing it
* at INIT when we might be trying to send SMI packets.
*/
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
qib_cancel_sends(ppd);
}
/*
* Link is at INIT.
* We start the HoL timer so we can detect stuck packets blocking SMP replies.
* Timer may already be running, so use mod_timer, not add_timer.
*/
void qib_hol_init(struct qib_pportdata *ppd)
{
if (ppd->hol_state != QIB_HOL_INIT) {
ppd->hol_state = QIB_HOL_INIT;
mod_timer(&ppd->hol_timer,
jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
}
}
/*
* Link is up, continue any user processes, and ensure timer
* is a nop, if running. Let timer keep running, if set; it
* will nop when it sees the link is up.
*/
void qib_hol_up(struct qib_pportdata *ppd)
{
ppd->hol_state = QIB_HOL_UP;
}
/*
* This is only called via the timer.
*/
void qib_hol_event(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
/* If hardware error, etc, skip. */
if (!(ppd->dd->flags & QIB_INITTED))
return;
if (ppd->hol_state != QIB_HOL_UP) {
/*
* Try to flush sends in case a stuck packet is blocking
* SMP replies.
*/
qib_hol_down(ppd);
mod_timer(&ppd->hol_timer,
jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
}
}

View File

@ -0,0 +1,555 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "qib.h"
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_UC_##x
/**
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
*
* Return 1 if constructed; otherwise, return 0.
*/
int qib_make_uc_req(struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
struct qib_swqe *wqe;
unsigned long flags;
u32 hwords;
u32 bth0;
u32 len;
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags);
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
if (qp->s_last == qp->s_head)
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
ohdr = &qp->s_hdr.u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr.u.l.oth;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
bth0 = 0;
/* Get the next send request. */
wqe = get_swqe_ptr(qp, qp->s_cur);
qp->s_wqe = NULL;
switch (qp->s_state) {
default:
if (!(ib_qib_state_ops[qp->state] &
QIB_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
if (qp->s_cur == qp->s_head)
goto bail;
/*
* Start a new request.
*/
wqe->psn = qp->s_next_psn;
qp->s_psn = qp->s_next_psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
qp->s_sge.total_len = wqe->length;
len = wqe->length;
qp->s_len = len;
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
if (len > pmtu) {
qp->s_state = OP(SEND_FIRST);
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_SEND)
qp->s_state = OP(SEND_ONLY);
else {
qp->s_state =
OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
ohdr->u.rc.reth.rkey =
cpu_to_be32(wqe->wr.wr.rdma.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
hwords += sizeof(struct ib_reth) / 4;
if (len > pmtu) {
qp->s_state = OP(RDMA_WRITE_FIRST);
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
qp->s_state = OP(RDMA_WRITE_ONLY);
else {
qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the RETH */
ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
}
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
default:
goto bail;
}
break;
case OP(SEND_FIRST):
qp->s_state = OP(SEND_MIDDLE);
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_SEND)
qp->s_state = OP(SEND_LAST);
else {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
case OP(RDMA_WRITE_FIRST):
qp->s_state = OP(RDMA_WRITE_MIDDLE);
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
qp->s_state = OP(RDMA_WRITE_LAST);
else {
qp->s_state =
OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
}
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
}
qp->s_len -= len;
qp->s_hdrwords = hwords;
qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len;
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
qp->s_next_psn++ & QIB_PSN_MASK);
done:
ret = 1;
goto unlock;
bail:
qp->s_flags &= ~QIB_S_BUSY;
unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
/**
* qib_uc_rcv - handle an incoming UC packet
* @ibp: the port the packet came in on
* @hdr: the header of the packet
* @has_grh: true if the packet has a GRH
* @data: the packet data
* @tlen: the length of the packet
* @qp: the QP for this packet.
*
* This is called from qib_qp_rcv() to process an incoming UC packet
* for the given QP.
* Called at interrupt level.
*/
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
unsigned long flags;
u32 opcode;
u32 hdrsize;
u32 psn;
u32 pad;
struct ib_wc wc;
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
struct ib_reth *reth;
int ret;
/* Check for GRH */
if (!has_grh) {
ohdr = &hdr->u.oth;
hdrsize = 8 + 12; /* LRH + BTH */
} else {
ohdr = &hdr->u.l.oth;
hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
}
opcode = be32_to_cpu(ohdr->bth[0]);
spin_lock_irqsave(&qp->s_lock, flags);
if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
goto sunlock;
spin_unlock_irqrestore(&qp->s_lock, flags);
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
memset(&wc, 0, sizeof wc);
/* Prevent simultaneous processing after APM on different CPUs */
spin_lock(&qp->r_lock);
/* Compare the PSN verses the expected PSN. */
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
/*
* Handle a sequence error.
* Silently drop any current message.
*/
qp->r_psn = psn;
inv:
if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) {
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
} else
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
qp->r_state = OP(SEND_LAST);
switch (opcode) {
case OP(SEND_FIRST):
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
goto send_first;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_ONLY):
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
goto rdma_first;
default:
goto drop;
}
}
/* Check for opcode sequence errors. */
switch (qp->r_state) {
case OP(SEND_FIRST):
case OP(SEND_MIDDLE):
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
opcode == OP(SEND_LAST_WITH_IMMEDIATE))
break;
goto inv;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_MIDDLE):
if (opcode == OP(RDMA_WRITE_MIDDLE) ||
opcode == OP(RDMA_WRITE_LAST) ||
opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
break;
goto inv;
default:
if (opcode == OP(SEND_FIRST) ||
opcode == OP(SEND_ONLY) ||
opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
opcode == OP(RDMA_WRITE_FIRST) ||
opcode == OP(RDMA_WRITE_ONLY) ||
opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
break;
goto inv;
}
if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
qp->r_flags |= QIB_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_COMM_EST;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
ret = qib_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
if (!ret)
goto drop;
/*
* qp->s_rdma_read_sge will be the owner
* of the mr references.
*/
qp->s_rdma_read_sge = qp->r_sge;
}
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto send_last;
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
goto send_last_imm;
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
goto rewind;
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
qib_copy_sge(&qp->r_sge, data, pmtu, 0);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
/* FALLTHROUGH */
case OP(SEND_LAST):
send_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto rewind;
/* Don't count the CRC. */
tlen -= (hdrsize + pad + 4);
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
last_imm:
qib_copy_sge(&qp->r_sge, data, tlen, 0);
while (qp->s_rdma_read_sge.num_sge) {
atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
if (--qp->s_rdma_read_sge.num_sge)
qp->s_rdma_read_sge.sge =
*qp->s_rdma_read_sge.sg_list++;
}
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
break;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_ONLY):
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
rdma_first:
if (unlikely(!(qp->qp_access_flags &
IB_ACCESS_REMOTE_WRITE))) {
goto drop;
}
reth = &ohdr->u.rc.reth;
hdrsize += sizeof(*reth);
qp->r_len = be32_to_cpu(reth->length);
qp->r_rcv_len = 0;
qp->r_sge.sg_list = NULL;
if (qp->r_len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
u64 vaddr = be64_to_cpu(reth->vaddr);
int ok;
/* Check rkey */
ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto drop;
qp->r_sge.num_sge = 1;
} else {
qp->r_sge.num_sge = 0;
qp->r_sge.sge.mr = NULL;
qp->r_sge.sge.vaddr = NULL;
qp->r_sge.sge.length = 0;
qp->r_sge.sge.sge_length = 0;
}
if (opcode == OP(RDMA_WRITE_ONLY))
goto rdma_last;
else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
goto rdma_last_imm;
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
goto drop;
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
qib_copy_sge(&qp->r_sge, data, pmtu, 1);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
rdma_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
while (qp->s_rdma_read_sge.num_sge) {
atomic_dec(&qp->s_rdma_read_sge.sge.mr->
refcount);
if (--qp->s_rdma_read_sge.num_sge)
qp->s_rdma_read_sge.sge =
*qp->s_rdma_read_sge.sg_list++;
}
else {
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto op_err;
if (!ret)
goto drop;
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
goto last_imm;
case OP(RDMA_WRITE_LAST):
rdma_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
break;
default:
/* Drop packet for unknown opcodes. */
goto drop;
}
qp->r_psn++;
qp->r_state = opcode;
spin_unlock(&qp->r_lock);
return;
rewind:
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
drop:
ibp->n_pkt_drops++;
spin_unlock(&qp->r_lock);
return;
op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
spin_unlock(&qp->r_lock);
return;
sunlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
}

View File

@ -0,0 +1,607 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_smi.h>
#include "qib.h"
#include "qib_mad.h"
/**
* qib_ud_loopback - handle send on loopback QPs
* @sqp: the sending QP
* @swqe: the send work request
*
* This is called from qib_make_ud_req() to forward a WQE addressed
* to the same HCA.
* Note that the receive interrupt handler may be calling qib_ud_rcv()
* while this is being called.
*/
static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
{
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
struct qib_pportdata *ppd;
struct qib_qp *qp;
struct ib_ah_attr *ah_attr;
unsigned long flags;
struct qib_sge_state ssge;
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
}
ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
ppd = ppd_from_ibp(ibp);
if (qp->ibqp.qp_num > 1) {
u16 pkey1;
u16 pkey2;
u16 lid;
pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
lid = ppd->lid | (ah_attr->src_path_bits &
((1 << ppd->lmc) - 1));
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
ah_attr->sl,
sqp->ibqp.qp_num, qp->ibqp.qp_num,
cpu_to_be16(lid),
cpu_to_be16(ah_attr->dlid));
goto drop;
}
}
/*
* Check that the qkey matches (except for QP0, see 9.6.1.4.1).
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
*/
if (qp->ibqp.qp_num) {
u32 qkey;
qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
sqp->qkey : swqe->wr.wr.ud.remote_qkey;
if (unlikely(qkey != qp->qkey)) {
u16 lid;
lid = ppd->lid | (ah_attr->src_path_bits &
((1 << ppd->lmc) - 1));
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
ah_attr->sl,
sqp->ibqp.qp_num, qp->ibqp.qp_num,
cpu_to_be16(lid),
cpu_to_be16(ah_attr->dlid));
goto drop;
}
}
/*
* A GRH is expected to preceed the data even if not
* present on the wire.
*/
length = swqe->length;
memset(&wc, 0, sizeof wc);
wc.byte_len = length + sizeof(struct ib_grh);
if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = swqe->wr.ex.imm_data;
}
spin_lock_irqsave(&qp->r_lock, flags);
/*
* Get the next work request entry to find where to put the data.
*/
if (qp->r_flags & QIB_R_REUSE_SGE)
qp->r_flags &= ~QIB_R_REUSE_SGE;
else {
int ret;
ret = qib_get_rwqe(qp, 0);
if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++;
goto bail_unlock;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++;
goto bail_unlock;
}
if (ah_attr->ah_flags & IB_AH_GRH) {
qib_copy_sge(&qp->r_sge, &ah_attr->grh,
sizeof(struct ib_grh), 1);
wc.wc_flags |= IB_WC_GRH;
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
ssge.sg_list = swqe->sg_list + 1;
ssge.sge = *swqe->sg_list;
ssge.num_sge = swqe->wr.num_sge;
sge = &ssge.sge;
while (length) {
u32 len = sge->length;
if (len > length)
len = length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--ssge.num_sge)
*sge = *ssge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
if (++sge->n >= QIB_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
length -= len;
}
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
wc.src_qp = sqp->ibqp.qp_num;
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
swqe->wr.wr.ud.pkey_index : 0;
wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
wc.sl = ah_attr->sl;
wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
swqe->wr.send_flags & IB_SEND_SOLICITED);
ibp->n_loop_pkts++;
bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags);
drop:
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
/**
* qib_make_ud_req - construct a UD request packet
* @qp: the QP
*
* Return 1 if constructed; otherwise, return 0.
*/
int qib_make_ud_req(struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
struct qib_swqe *wqe;
unsigned long flags;
u32 nwords;
u32 extra_bytes;
u32 bth0;
u16 lrh0;
u16 lid;
int ret = 0;
int next_cur;
spin_lock_irqsave(&qp->s_lock, flags);
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
if (qp->s_last == qp->s_head)
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
if (qp->s_cur == qp->s_head)
goto bail;
wqe = get_swqe_ptr(qp, qp->s_cur);
next_cur = qp->s_cur + 1;
if (next_cur >= qp->s_size)
next_cur = 0;
/* Construct the header. */
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
if (ah_attr->dlid != QIB_PERMISSIVE_LID)
ibp->n_multicast_xmit++;
else
ibp->n_unicast_xmit++;
} else {
ibp->n_unicast_xmit++;
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) {
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
* it would be out of order.
* XXX Instead of waiting, we could queue a
* zero length descriptor so we get a callback.
*/
if (atomic_read(&qp->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
qp->s_cur = next_cur;
spin_unlock_irqrestore(&qp->s_lock, flags);
qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, flags);
qib_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done;
}
}
qp->s_cur = next_cur;
extra_bytes = -wqe->length & 3;
nwords = (wqe->length + extra_bytes) >> 2;
/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
qp->s_hdrwords = 7;
qp->s_cur_size = wqe->length;
qp->s_cur_sge = &qp->s_sge;
qp->s_srate = ah_attr->static_rate;
qp->s_wqe = wqe;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
qp->s_sge.total_len = wqe->length;
if (ah_attr->ah_flags & IB_AH_GRH) {
/* Header size in 32-bit words. */
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
&ah_attr->grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
ohdr = &qp->s_hdr.u.l.oth;
/*
* Don't worry about sending to locally attached multicast
* QPs. It is unspecified by the spec. what happens.
*/
} else {
/* Header size in 32-bit words. */
lrh0 = QIB_LRH_BTH;
ohdr = &qp->s_hdr.u.oth;
}
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_hdrwords++;
ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
} else
bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
lrh0 |= ah_attr->sl << 4;
if (qp->ibqp.qp_type == IB_QPT_SMI)
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
else
lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
lid = ppd->lid;
if (lid) {
lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
qp->s_hdr.lrh[3] = cpu_to_be16(lid);
} else
qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
bth0 |= extra_bytes << 20;
bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
ohdr->bth[0] = cpu_to_be32(bth0);
/*
* Use the multicast QP if the destination LID is a multicast LID.
*/
ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
ah_attr->dlid != QIB_PERMISSIVE_LID ?
cpu_to_be32(QIB_MULTICAST_QPN) :
cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
*/
ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
qp->qkey : wqe->wr.wr.ud.remote_qkey);
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
done:
ret = 1;
goto unlock;
bail:
qp->s_flags &= ~QIB_S_BUSY;
unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
{
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = ppd->dd;
unsigned ctxt = ppd->hw_pidx;
unsigned i;
pkey &= 0x7fff; /* remove limited/full membership bit */
for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
return i;
/*
* Should not get here, this means hardware failed to validate pkeys.
* Punt and return index 0.
*/
return 0;
}
/**
* qib_ud_rcv - receive an incoming UD packet
* @ibp: the port the packet came in on
* @hdr: the packet header
* @has_grh: true if the packet has a GRH
* @data: the packet data
* @tlen: the packet length
* @qp: the QP the packet came on
*
* This is called from qib_qp_rcv() to process an incoming UD packet
* for the given QP.
* Called at interrupt level.
*/
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
int opcode;
u32 hdrsize;
u32 pad;
struct ib_wc wc;
u32 qkey;
u32 src_qp;
u16 dlid;
/* Check for GRH */
if (!has_grh) {
ohdr = &hdr->u.oth;
hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
} else {
ohdr = &hdr->u.l.oth;
hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
}
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
if (unlikely(tlen < (hdrsize + pad + 4))) {
/* Drop incomplete packets. */
ibp->n_pkt_drops++;
goto bail;
}
tlen -= hdrsize + pad + 4;
/*
* Check that the permissive LID is only used on QP0
* and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
*/
if (qp->ibqp.qp_num) {
if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE)) {
ibp->n_pkt_drops++;
goto bail;
}
if (qp->ibqp.qp_num > 1) {
u16 pkey1, pkey2;
pkey1 = be32_to_cpu(ohdr->bth[0]);
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
pkey1,
(be16_to_cpu(hdr->lrh[0]) >> 4) &
0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
goto bail;
}
}
if (unlikely(qkey != qp->qkey)) {
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
goto bail;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
(tlen != 256 ||
(be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
ibp->n_pkt_drops++;
goto bail;
}
} else {
struct ib_smp *smp;
/* Drop invalid MAD packets (see 13.5.3.1). */
if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
ibp->n_pkt_drops++;
goto bail;
}
smp = (struct ib_smp *) data;
if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE) &&
smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
ibp->n_pkt_drops++;
goto bail;
}
}
/*
* The opcode is in the low byte when its in network order
* (top byte when in host order).
*/
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
if (qp->ibqp.qp_num > 1 &&
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
hdrsize += sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
} else {
ibp->n_pkt_drops++;
goto bail;
}
/*
* A GRH is expected to preceed the data even if not
* present on the wire.
*/
wc.byte_len = tlen + sizeof(struct ib_grh);
/*
* We need to serialize getting a receive work queue entry and
* generating a completion for it against QPs sending to this QP
* locally.
*/
spin_lock(&qp->r_lock);
/*
* Get the next work request entry to find where to put the data.
*/
if (qp->r_flags & QIB_R_REUSE_SGE)
qp->r_flags &= ~QIB_R_REUSE_SGE;
else {
int ret;
ret = qib_get_rwqe(qp, 0);
if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++;
goto bail_unlock;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++;
goto bail_unlock;
}
if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
sizeof(struct ib_grh), 1);
wc.wc_flags |= IB_WC_GRH;
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
while (qp->r_sge.num_sge) {
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
wc.qp = &qp->ibqp;
wc.src_qp = src_qp;
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
wc.slid = be16_to_cpu(hdr->lrh[3]);
wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
dlid = be16_to_cpu(hdr->lrh[1]);
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
*/
wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
bail_unlock:
spin_unlock(&qp->r_lock);
bail:;
}

View File

@ -0,0 +1,157 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mm.h>
#include <linux/device.h>
#include "qib.h"
static void __qib_release_user_pages(struct page **p, size_t num_pages,
int dirty)
{
size_t i;
for (i = 0; i < num_pages; i++) {
if (dirty)
set_page_dirty_lock(p[i]);
put_page(p[i]);
}
}
/*
* Call with current->mm->mmap_sem held.
*/
static int __get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p, struct vm_area_struct **vma)
{
unsigned long lock_limit;
size_t got;
int ret;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
goto bail;
}
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
p + got, vma);
if (ret < 0)
goto bail_release;
}
current->mm->locked_vm += num_pages;
ret = 0;
goto bail;
bail_release:
__qib_release_user_pages(p, got, 0);
bail:
return ret;
}
/**
* qib_map_page - a safety wrapper around pci_map_page()
*
* A dma_addr of all 0's is interpreted by the chip as "disabled".
* Unfortunately, it can also be a valid dma_addr returned on some
* architectures.
*
* The powerpc iommu assigns dma_addrs in ascending order, so we don't
* have to bother with retries or mapping a dummy page to insure we
* don't just get the same mapping again.
*
* I'm sure we won't be so lucky with other iommu's, so FIXME.
*/
dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
dma_addr_t phys;
phys = pci_map_page(hwdev, page, offset, size, direction);
if (phys == 0) {
pci_unmap_page(hwdev, phys, size, direction);
phys = pci_map_page(hwdev, page, offset, size, direction);
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
return phys;
}
/**
* qib_get_user_pages - lock user pages into memory
* @start_page: the start page
* @num_pages: the number of pages
* @p: the output page structures
*
* This function takes a given start page (page aligned user virtual
* address) and pins it and the following specified number of pages. For
* now, num_pages is always 1, but that will probably change at some point
* (because caller is doing expected sends on a single virtually contiguous
* buffer, so we can do all pages at once).
*/
int qib_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{
int ret;
down_write(&current->mm->mmap_sem);
ret = __get_user_pages(start_page, num_pages, p, NULL);
up_write(&current->mm->mmap_sem);
return ret;
}
void qib_release_user_pages(struct page **p, size_t num_pages)
{
if (current->mm) /* during close after signal, mm can be NULL */
down_write(&current->mm->mmap_sem);
__qib_release_user_pages(p, num_pages, 1);
if (current->mm) {
current->mm->locked_vm -= num_pages;
up_write(&current->mm->mmap_sem);
}
}

View File

@ -0,0 +1,897 @@
/*
* Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/uio.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include "qib.h"
#include "qib_user_sdma.h"
/* minimum size of header */
#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
/* expected size of headers (for dma_pool) */
#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
/* attempt to drain the queue for 5secs */
#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
struct qib_user_sdma_pkt {
u8 naddr; /* dimension of addr (1..3) ... */
u32 counter; /* sdma pkts queued counter for this entry */
u64 added; /* global descq number of entries */
struct {
u32 offset; /* offset for kvaddr, addr */
u32 length; /* length in page */
u8 put_page; /* should we put_page? */
u8 dma_mapped; /* is page dma_mapped? */
struct page *page; /* may be NULL (coherent mem) */
void *kvaddr; /* FIXME: only for pio hack */
dma_addr_t addr;
} addr[4]; /* max pages, any more and we coalesce */
struct list_head list; /* list element */
};
struct qib_user_sdma_queue {
/*
* pkts sent to dma engine are queued on this
* list head. the type of the elements of this
* list are struct qib_user_sdma_pkt...
*/
struct list_head sent;
/* headers with expected length are allocated from here... */
char header_cache_name[64];
struct dma_pool *header_cache;
/* packets are allocated from the slab cache... */
char pkt_slab_name[64];
struct kmem_cache *pkt_slab;
/* as packets go on the queued queue, they are counted... */
u32 counter;
u32 sent_counter;
/* dma page table */
struct rb_root dma_pages_root;
/* protect everything above... */
struct mutex lock;
};
struct qib_user_sdma_queue *
qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
{
struct qib_user_sdma_queue *pq =
kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
if (!pq)
goto done;
pq->counter = 0;
pq->sent_counter = 0;
INIT_LIST_HEAD(&pq->sent);
mutex_init(&pq->lock);
snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
"qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
sizeof(struct qib_user_sdma_pkt),
0, 0, NULL);
if (!pq->pkt_slab)
goto err_kfree;
snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
"qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
pq->header_cache = dma_pool_create(pq->header_cache_name,
dev,
QIB_USER_SDMA_EXP_HEADER_LENGTH,
4, 0);
if (!pq->header_cache)
goto err_slab;
pq->dma_pages_root = RB_ROOT;
goto done;
err_slab:
kmem_cache_destroy(pq->pkt_slab);
err_kfree:
kfree(pq);
pq = NULL;
done:
return pq;
}
static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
int i, size_t offset, size_t len,
int put_page, int dma_mapped,
struct page *page,
void *kvaddr, dma_addr_t dma_addr)
{
pkt->addr[i].offset = offset;
pkt->addr[i].length = len;
pkt->addr[i].put_page = put_page;
pkt->addr[i].dma_mapped = dma_mapped;
pkt->addr[i].page = page;
pkt->addr[i].kvaddr = kvaddr;
pkt->addr[i].addr = dma_addr;
}
static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
u32 counter, size_t offset,
size_t len, int dma_mapped,
struct page *page,
void *kvaddr, dma_addr_t dma_addr)
{
pkt->naddr = 1;
pkt->counter = counter;
qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
kvaddr, dma_addr);
}
/* we've too many pages in the iovec, coalesce to a single page */
static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
struct qib_user_sdma_pkt *pkt,
const struct iovec *iov,
unsigned long niov)
{
int ret = 0;
struct page *page = alloc_page(GFP_KERNEL);
void *mpage_save;
char *mpage;
int i;
int len = 0;
dma_addr_t dma_addr;
if (!page) {
ret = -ENOMEM;
goto done;
}
mpage = kmap(page);
mpage_save = mpage;
for (i = 0; i < niov; i++) {
int cfur;
cfur = copy_from_user(mpage,
iov[i].iov_base, iov[i].iov_len);
if (cfur) {
ret = -EFAULT;
goto free_unmap;
}
mpage += iov[i].iov_len;
len += iov[i].iov_len;
}
dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_unmap;
}
qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
dma_addr);
pkt->naddr = 2;
goto done;
free_unmap:
kunmap(page);
__free_page(page);
done:
return ret;
}
/*
* How many pages in this iovec element?
*/
static int qib_user_sdma_num_pages(const struct iovec *iov)
{
const unsigned long addr = (unsigned long) iov->iov_base;
const unsigned long len = iov->iov_len;
const unsigned long spage = addr & PAGE_MASK;
const unsigned long epage = (addr + len - 1) & PAGE_MASK;
return 1 + ((epage - spage) >> PAGE_SHIFT);
}
/*
* Truncate length to page boundry.
*/
static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
{
const unsigned long offset = addr & ~PAGE_MASK;
return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
}
static void qib_user_sdma_free_pkt_frag(struct device *dev,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
int frag)
{
const int i = frag;
if (pkt->addr[i].page) {
if (pkt->addr[i].dma_mapped)
dma_unmap_page(dev,
pkt->addr[i].addr,
pkt->addr[i].length,
DMA_TO_DEVICE);
if (pkt->addr[i].kvaddr)
kunmap(pkt->addr[i].page);
if (pkt->addr[i].put_page)
put_page(pkt->addr[i].page);
else
__free_page(pkt->addr[i].page);
} else if (pkt->addr[i].kvaddr)
/* free coherent mem from cache... */
dma_pool_free(pq->header_cache,
pkt->addr[i].kvaddr, pkt->addr[i].addr);
}
/* return number of pages pinned... */
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
struct qib_user_sdma_pkt *pkt,
unsigned long addr, int tlen, int npages)
{
struct page *pages[2];
int j;
int ret;
ret = get_user_pages(current, current->mm, addr,
npages, 0, 1, pages, NULL);
if (ret != npages) {
int i;
for (i = 0; i < ret; i++)
put_page(pages[i]);
ret = -ENOMEM;
goto done;
}
for (j = 0; j < npages; j++) {
/* map the pages... */
const int flen = qib_user_sdma_page_length(addr, tlen);
dma_addr_t dma_addr =
dma_map_page(&dd->pcidev->dev,
pages[j], 0, flen, DMA_TO_DEVICE);
unsigned long fofs = addr & ~PAGE_MASK;
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto done;
}
qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
pages[j], kmap(pages[j]), dma_addr);
pkt->naddr++;
addr += flen;
tlen -= flen;
}
done:
return ret;
}
static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
const struct iovec *iov,
unsigned long niov)
{
int ret = 0;
unsigned long idx;
for (idx = 0; idx < niov; idx++) {
const int npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
ret = qib_user_sdma_pin_pages(dd, pkt, addr,
iov[idx].iov_len, npages);
if (ret < 0)
goto free_pkt;
}
goto done;
free_pkt:
for (idx = 0; idx < pkt->naddr; idx++)
qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
done:
return ret;
}
static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
const struct iovec *iov,
unsigned long niov, int npages)
{
int ret = 0;
if (npages >= ARRAY_SIZE(pkt->addr))
ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
else
ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
return ret;
}
/* free a packet list -- return counter value of last packet */
static void qib_user_sdma_free_pkt_list(struct device *dev,
struct qib_user_sdma_queue *pq,
struct list_head *list)
{
struct qib_user_sdma_pkt *pkt, *pkt_next;
list_for_each_entry_safe(pkt, pkt_next, list, list) {
int i;
for (i = 0; i < pkt->naddr; i++)
qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
kmem_cache_free(pq->pkt_slab, pkt);
}
}
/*
* copy headers, coalesce etc -- pq->lock must be held
*
* we queue all the packets to list, returning the
* number of bytes total. list must be empty initially,
* as, if there is an error we clean it...
*/
static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct list_head *list,
const struct iovec *iov,
unsigned long niov,
int maxpkts)
{
unsigned long idx = 0;
int ret = 0;
int npkts = 0;
struct page *page = NULL;
__le32 *pbc;
dma_addr_t dma_addr;
struct qib_user_sdma_pkt *pkt = NULL;
size_t len;
size_t nw;
u32 counter = pq->counter;
int dma_mapped = 0;
while (idx < niov && npkts < maxpkts) {
const unsigned long addr = (unsigned long) iov[idx].iov_base;
const unsigned long idx_save = idx;
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
int npages = 0;
int cfur;
dma_mapped = 0;
len = iov[idx].iov_len;
nw = len >> 2;
page = NULL;
pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
if (!pkt) {
ret = -ENOMEM;
goto free_list;
}
if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
len > PAGE_SIZE || len & 3 || addr & 3) {
ret = -EINVAL;
goto free_pkt;
}
if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
&dma_addr);
else
pbc = NULL;
if (!pbc) {
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto free_pkt;
}
pbc = kmap(page);
}
cfur = copy_from_user(pbc, iov[idx].iov_base, len);
if (cfur) {
ret = -EFAULT;
goto free_pbc;
}
/*
* This assignment is a bit strange. it's because the
* the pbc counts the number of 32 bit words in the full
* packet _except_ the first word of the pbc itself...
*/
pktnwc = nw - 1;
/*
* pktnw computation yields the number of 32 bit words
* that the caller has indicated in the PBC. note that
* this is one less than the total number of words that
* goes to the send DMA engine as the first 32 bit word
* of the PBC itself is not counted. Armed with this count,
* we can verify that the packet is consistent with the
* iovec lengths.
*/
pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
ret = -EINVAL;
goto free_pbc;
}
idx++;
while (pktnwc < pktnw && idx < niov) {
const size_t slen = iov[idx].iov_len;
const unsigned long faddr =
(unsigned long) iov[idx].iov_base;
if (slen & 3 || faddr & 3 || !slen ||
slen > PAGE_SIZE) {
ret = -EINVAL;
goto free_pbc;
}
npages++;
if ((faddr & PAGE_MASK) !=
((faddr + slen - 1) & PAGE_MASK))
npages++;
pktnwc += slen >> 2;
idx++;
nfrags++;
}
if (pktnwc != pktnw) {
ret = -EINVAL;
goto free_pbc;
}
if (page) {
dma_addr = dma_map_page(&dd->pcidev->dev,
page, 0, len, DMA_TO_DEVICE);
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_pbc;
}
dma_mapped = 1;
}
qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
page, pbc, dma_addr);
if (nfrags) {
ret = qib_user_sdma_init_payload(dd, pq, pkt,
iov + idx_save + 1,
nfrags, npages);
if (ret < 0)
goto free_pbc_dma;
}
counter++;
npkts++;
list_add_tail(&pkt->list, list);
}
ret = idx;
goto done;
free_pbc_dma:
if (dma_mapped)
dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
free_pbc:
if (page) {
kunmap(page);
__free_page(page);
} else
dma_pool_free(pq->header_cache, pbc, dma_addr);
free_pkt:
kmem_cache_free(pq->pkt_slab, pkt);
free_list:
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
done:
return ret;
}
static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
u32 c)
{
pq->sent_counter = c;
}
/* try to clean out queue -- needs pq->lock */
static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq)
{
struct qib_devdata *dd = ppd->dd;
struct list_head free_list;
struct qib_user_sdma_pkt *pkt;
struct qib_user_sdma_pkt *pkt_prev;
int ret = 0;
INIT_LIST_HEAD(&free_list);
list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
s64 descd = ppd->sdma_descq_removed - pkt->added;
if (descd < 0)
break;
list_move_tail(&pkt->list, &free_list);
/* one more packet cleaned */
ret++;
}
if (!list_empty(&free_list)) {
u32 counter;
pkt = list_entry(free_list.prev,
struct qib_user_sdma_pkt, list);
counter = pkt->counter;
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
qib_user_sdma_set_complete_counter(pq, counter);
}
return ret;
}
void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
{
if (!pq)
return;
kmem_cache_destroy(pq->pkt_slab);
dma_pool_destroy(pq->header_cache);
kfree(pq);
}
/* clean descriptor queue, returns > 0 if some elements cleaned */
static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
ret = qib_sdma_make_progress(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
/* we're in close, drain packets so that we can cleanup successfully... */
void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq)
{
struct qib_devdata *dd = ppd->dd;
int i;
if (!pq)
return;
for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
mutex_lock(&pq->lock);
if (list_empty(&pq->sent)) {
mutex_unlock(&pq->lock);
break;
}
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
mutex_unlock(&pq->lock);
msleep(10);
}
if (!list_empty(&pq->sent)) {
struct list_head free_list;
qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
INIT_LIST_HEAD(&free_list);
mutex_lock(&pq->lock);
list_splice_init(&pq->sent, &free_list);
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
mutex_unlock(&pq->lock);
}
}
static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
u64 addr, u64 dwlen, u64 dwoffset)
{
u8 tmpgen;
tmpgen = ppd->sdma_generation;
return cpu_to_le64(/* SDmaPhyAddr[31:0] */
((addr & 0xfffffffcULL) << 32) |
/* SDmaGeneration[1:0] */
((tmpgen & 3ULL) << 30) |
/* SDmaDwordCount[10:0] */
((dwlen & 0x7ffULL) << 16) |
/* SDmaBufOffset[12:2] */
(dwoffset & 0x7ffULL));
}
static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
{
return descq | cpu_to_le64(1ULL << 12);
}
static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
{
/* last */ /* dma head */
return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
}
static inline __le64 qib_sdma_make_desc1(u64 addr)
{
/* SDmaPhyAddr[47:32] */
return cpu_to_le64(addr >> 32);
}
static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
struct qib_user_sdma_pkt *pkt, int idx,
unsigned ofs, u16 tail)
{
const u64 addr = (u64) pkt->addr[idx].addr +
(u64) pkt->addr[idx].offset;
const u64 dwlen = (u64) pkt->addr[idx].length / 4;
__le64 *descqp;
__le64 descq0;
descqp = &ppd->sdma_descq[tail].qw[0];
descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
if (idx == 0)
descq0 = qib_sdma_make_first_desc0(descq0);
if (idx == pkt->naddr - 1)
descq0 = qib_sdma_make_last_desc0(descq0);
descqp[0] = descq0;
descqp[1] = qib_sdma_make_desc1(addr);
}
/* pq->lock must be held, get packets on the wire... */
static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq,
struct list_head *pktlist)
{
struct qib_devdata *dd = ppd->dd;
int ret = 0;
unsigned long flags;
u16 tail;
u8 generation;
u64 descq_added;
if (list_empty(pktlist))
return 0;
if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
return -ECOMM;
spin_lock_irqsave(&ppd->sdma_lock, flags);
/* keep a copy for restoring purposes in case of problems */
generation = ppd->sdma_generation;
descq_added = ppd->sdma_descq_added;
if (unlikely(!__qib_sdma_running(ppd))) {
ret = -ECOMM;
goto unlock;
}
tail = ppd->sdma_descq_tail;
while (!list_empty(pktlist)) {
struct qib_user_sdma_pkt *pkt =
list_entry(pktlist->next, struct qib_user_sdma_pkt,
list);
int i;
unsigned ofs = 0;
u16 dtail = tail;
if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
goto unlock_check_tail;
for (i = 0; i < pkt->naddr; i++) {
qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
ofs += pkt->addr[i].length >> 2;
if (++tail == ppd->sdma_descq_cnt) {
tail = 0;
++ppd->sdma_generation;
}
}
if ((ofs << 2) > ppd->ibmaxlen) {
ret = -EMSGSIZE;
goto unlock;
}
/*
* If the packet is >= 2KB mtu equivalent, we have to use
* the large buffers, and have to mark each descriptor as
* part of a large buffer packet.
*/
if (ofs > dd->piosize2kmax_dwords) {
for (i = 0; i < pkt->naddr; i++) {
ppd->sdma_descq[dtail].qw[0] |=
cpu_to_le64(1ULL << 14);
if (++dtail == ppd->sdma_descq_cnt)
dtail = 0;
}
}
ppd->sdma_descq_added += pkt->naddr;
pkt->added = ppd->sdma_descq_added;
list_move_tail(&pkt->list, &pq->sent);
ret++;
}
unlock_check_tail:
/* advance the tail on the chip if necessary */
if (ppd->sdma_descq_tail != tail)
dd->f_sdma_update_tail(ppd, tail);
unlock:
if (unlikely(ret < 0)) {
ppd->sdma_generation = generation;
ppd->sdma_descq_added = descq_added;
}
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
struct qib_user_sdma_queue *pq,
const struct iovec *iov,
unsigned long dim)
{
struct qib_devdata *dd = rcd->dd;
struct qib_pportdata *ppd = rcd->ppd;
int ret = 0;
struct list_head list;
int npkts = 0;
INIT_LIST_HEAD(&list);
mutex_lock(&pq->lock);
/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
if (!qib_sdma_running(ppd))
goto done_unlock;
if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
}
while (dim) {
const int mxp = 8;
down_write(&current->mm->mmap_sem);
ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
up_write(&current->mm->mmap_sem);
if (ret <= 0)
goto done_unlock;
else {
dim -= ret;
iov += ret;
}
/* force packets onto the sdma hw queue... */
if (!list_empty(&list)) {
/*
* Lazily clean hw queue. the 4 is a guess of about
* how many sdma descriptors a packet will take (it
* doesn't have to be perfect).
*/
if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
}
ret = qib_user_sdma_push_pkts(ppd, pq, &list);
if (ret < 0)
goto done_unlock;
else {
npkts += ret;
pq->counter += ret;
if (!list_empty(&list))
goto done_unlock;
}
}
}
done_unlock:
if (!list_empty(&list))
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
mutex_unlock(&pq->lock);
return (ret < 0) ? ret : npkts;
}
int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq)
{
int ret = 0;
mutex_lock(&pq->lock);
qib_user_sdma_hwqueue_clean(ppd);
ret = qib_user_sdma_queue_clean(ppd, pq);
mutex_unlock(&pq->lock);
return ret;
}
u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
{
return pq ? pq->sent_counter : 0;
}
u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
{
return pq ? pq->counter : 0;
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/device.h>
struct qib_user_sdma_queue;
struct qib_user_sdma_queue *
qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
int qib_user_sdma_writev(struct qib_ctxtdata *pd,
struct qib_user_sdma_queue *pq,
const struct iovec *iov,
unsigned long dim);
int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq);
void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq);
u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,368 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/rculist.h>
#include "qib.h"
/**
* qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
{
struct qib_mcast_qp *mqp;
mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
if (!mqp)
goto bail;
mqp->qp = qp;
atomic_inc(&qp->refcount);
bail:
return mqp;
}
static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
{
struct qib_qp *qp = mqp->qp;
/* Notify qib_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
kfree(mqp);
}
/**
* qib_mcast_alloc - allocate the multicast GID structure
* @mgid: the multicast GID
*
* A list of QPs will be attached to this structure.
*/
static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
{
struct qib_mcast *mcast;
mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
if (!mcast)
goto bail;
mcast->mgid = *mgid;
INIT_LIST_HEAD(&mcast->qp_list);
init_waitqueue_head(&mcast->wait);
atomic_set(&mcast->refcount, 0);
mcast->n_attached = 0;
bail:
return mcast;
}
static void qib_mcast_free(struct qib_mcast *mcast)
{
struct qib_mcast_qp *p, *tmp;
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
qib_mcast_qp_free(p);
kfree(mcast);
}
/**
* qib_mcast_find - search the global table for the given multicast GID
* @ibp: the IB port structure
* @mgid: the multicast GID to search for
*
* Returns NULL if not found.
*
* The caller is responsible for decrementing the reference count if found.
*/
struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
{
struct rb_node *n;
unsigned long flags;
struct qib_mcast *mcast;
spin_lock_irqsave(&ibp->lock, flags);
n = ibp->mcast_tree.rb_node;
while (n) {
int ret;
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(mgid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else {
atomic_inc(&mcast->refcount);
spin_unlock_irqrestore(&ibp->lock, flags);
goto bail;
}
}
spin_unlock_irqrestore(&ibp->lock, flags);
mcast = NULL;
bail:
return mcast;
}
/**
* qib_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
* Return zero if both were added. Return EEXIST if the GID was already in
* the table but the QP was added. Return ESRCH if the QP was already
* attached and neither structure was added.
*/
static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
{
struct rb_node **n = &ibp->mcast_tree.rb_node;
struct rb_node *pn = NULL;
int ret;
spin_lock_irq(&ibp->lock);
while (*n) {
struct qib_mcast *tmcast;
struct qib_mcast_qp *p;
pn = *n;
tmcast = rb_entry(pn, struct qib_mcast, rb_node);
ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0) {
n = &pn->rb_left;
continue;
}
if (ret > 0) {
n = &pn->rb_right;
continue;
}
/* Search the QP list to see if this is already there. */
list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
if (p->qp == mqp->qp) {
ret = ESRCH;
goto bail;
}
}
if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
ret = ENOMEM;
goto bail;
}
tmcast->n_attached++;
list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
ret = EEXIST;
goto bail;
}
spin_lock(&dev->n_mcast_grps_lock);
if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
spin_unlock(&dev->n_mcast_grps_lock);
ret = ENOMEM;
goto bail;
}
dev->n_mcast_grps_allocated++;
spin_unlock(&dev->n_mcast_grps_lock);
mcast->n_attached++;
list_add_tail_rcu(&mqp->list, &mcast->qp_list);
atomic_inc(&mcast->refcount);
rb_link_node(&mcast->rb_node, pn, n);
rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
ret = 0;
bail:
spin_unlock_irq(&ibp->lock);
return ret;
}
int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp;
struct qib_mcast *mcast;
struct qib_mcast_qp *mqp;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
/*
* Allocate data structures since its better to do this outside of
* spin locks and it will most likely be needed.
*/
mcast = qib_mcast_alloc(gid);
if (mcast == NULL) {
ret = -ENOMEM;
goto bail;
}
mqp = qib_mcast_qp_alloc(qp);
if (mqp == NULL) {
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
}
ibp = to_iport(ibqp->device, qp->port_num);
switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
case ESRCH:
/* Neither was used: OK to attach the same QP twice. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
break;
case EEXIST: /* The mcast wasn't used */
qib_mcast_free(mcast);
break;
case ENOMEM:
/* Exceeded the maximum number of mcast groups. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
default:
break;
}
ret = 0;
bail:
return ret;
}
int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
struct qib_mcast *mcast = NULL;
struct qib_mcast_qp *p, *tmp;
struct rb_node *n;
int last = 0;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
spin_lock_irq(&ibp->lock);
/* Find the GID in the mcast table. */
n = ibp->mcast_tree.rb_node;
while (1) {
if (n == NULL) {
spin_unlock_irq(&ibp->lock);
ret = -EINVAL;
goto bail;
}
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(gid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else
break;
}
/* Search the QP list. */
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
if (p->qp != qp)
continue;
/*
* We found it, so remove it, but don't poison the forward
* link until we are sure there are no list walkers.
*/
list_del_rcu(&p->list);
mcast->n_attached--;
/* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) {
rb_erase(&mcast->rb_node, &ibp->mcast_tree);
last = 1;
}
break;
}
spin_unlock_irq(&ibp->lock);
if (p) {
/*
* Wait for any list walkers to finish before freeing the
* list element.
*/
wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
qib_mcast_qp_free(p);
}
if (last) {
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
qib_mcast_free(mcast);
spin_lock_irq(&dev->n_mcast_grps_lock);
dev->n_mcast_grps_allocated--;
spin_unlock_irq(&dev->n_mcast_grps_lock);
}
ret = 0;
bail:
return ret;
}
int qib_mcast_tree_empty(struct qib_ibport *ibp)
{
return ibp->mcast_tree.rb_node == NULL;
}

View File

@ -1,7 +1,5 @@
#ifndef _IPATH_7220_H
#define _IPATH_7220_H
/*
* Copyright (c) 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -33,25 +31,32 @@
*/
/*
* This header file provides the declarations and common definitions
* for (mostly) manipulation of the SerDes blocks within the IBA7220.
* the functions declared should only be called from within other
* 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
* This file is conditionally built on PowerPC only. Otherwise weak symbol
* versions of the functions exported from here are used.
*/
int ipath_sd7220_presets(struct ipath_devdata *dd);
int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
int len, int offset);
int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
int len, int offset);
/*
* Below used for sdnum parameter, selecting one of the two sections
* used for PCIe, or the single SerDes used for IB, which is the
* only one currently used
#include "qib.h"
/**
* qib_enable_wc - enable write combining for MMIO writes to the device
* @dd: qlogic_ib device
*
* Nothing to do on PowerPC, so just return without error.
*/
#define IB_7220_SERDES 2
int qib_enable_wc(struct qib_devdata *dd)
{
return 0;
}
int ipath_sd7220_ib_load(struct ipath_devdata *dd);
int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
#endif /* _IPATH_7220_H */
/**
* qib_unordered_wc - indicate whether write combining is unordered
*
* Because our performance depends on our ability to do write
* combining mmio writes in the most efficient way, we need to
* know if we are on a processor that may reorder stores when
* write combining.
*/
int qib_unordered_wc(void)
{
return 1;
}

View File

@ -0,0 +1,171 @@
/*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file is conditionally built on x86_64 only. Otherwise weak symbol
* versions of the functions exported from here are used.
*/
#include <linux/pci.h>
#include <asm/mtrr.h>
#include <asm/processor.h>
#include "qib.h"
/**
* qib_enable_wc - enable write combining for MMIO writes to the device
* @dd: qlogic_ib device
*
* This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
* write combining.
*/
int qib_enable_wc(struct qib_devdata *dd)
{
int ret = 0;
u64 pioaddr, piolen;
unsigned bits;
const unsigned long addr = pci_resource_start(dd->pcidev, 0);
const size_t len = pci_resource_len(dd->pcidev, 0);
/*
* Set the PIO buffers to be WCCOMB, so we get HT bursts to the
* chip. Linux (possibly the hardware) requires it to be on a power
* of 2 address matching the length (which has to be a power of 2).
* For rev1, that means the base address, for rev2, it will be just
* the PIO buffers themselves.
* For chips with two sets of buffers, the calculations are
* somewhat more complicated; we need to sum, and the piobufbase
* register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
* The buffers are still packed, so a single range covers both.
*/
if (dd->piobcnt2k && dd->piobcnt4k) {
/* 2 sizes for chip */
unsigned long pio2kbase, pio4kbase;
pio2kbase = dd->piobufbase & 0xffffffffUL;
pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
if (pio2kbase < pio4kbase) {
/* all current chips */
pioaddr = addr + pio2kbase;
piolen = pio4kbase - pio2kbase +
dd->piobcnt4k * dd->align4k;
} else {
pioaddr = addr + pio4kbase;
piolen = pio2kbase - pio4kbase +
dd->piobcnt2k * dd->palign;
}
} else { /* single buffer size (2K, currently) */
pioaddr = addr + dd->piobufbase;
piolen = dd->piobcnt2k * dd->palign +
dd->piobcnt4k * dd->align4k;
}
for (bits = 0; !(piolen & (1ULL << bits)); bits++)
/* do nothing */ ;
if (piolen != (1ULL << bits)) {
piolen >>= bits;
while (piolen >>= 1)
bits++;
piolen = 1ULL << (bits + 1);
}
if (pioaddr & (piolen - 1)) {
u64 atmp;
atmp = pioaddr & ~(piolen - 1);
if (atmp < addr || (atmp + piolen) > (addr + len)) {
qib_dev_err(dd, "No way to align address/size "
"(%llx/%llx), no WC mtrr\n",
(unsigned long long) atmp,
(unsigned long long) piolen << 1);
ret = -ENODEV;
} else {
pioaddr = atmp;
piolen <<= 1;
}
}
if (!ret) {
int cookie;
cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
if (cookie < 0) {
{
qib_devinfo(dd->pcidev,
"mtrr_add() WC for PIO bufs "
"failed (%d)\n",
cookie);
ret = -EINVAL;
}
} else {
dd->wc_cookie = cookie;
dd->wc_base = (unsigned long) pioaddr;
dd->wc_len = (unsigned long) piolen;
}
}
return ret;
}
/**
* qib_disable_wc - disable write combining for MMIO writes to the device
* @dd: qlogic_ib device
*/
void qib_disable_wc(struct qib_devdata *dd)
{
if (dd->wc_cookie) {
int r;
r = mtrr_del(dd->wc_cookie, dd->wc_base,
dd->wc_len);
if (r < 0)
qib_devinfo(dd->pcidev,
"mtrr_del(%lx, %lx, %lx) failed: %d\n",
dd->wc_cookie, dd->wc_base,
dd->wc_len, r);
dd->wc_cookie = 0; /* even on failure */
}
}
/**
* qib_unordered_wc - indicate whether write combining is ordered
*
* Because our performance depends on our ability to do write combining mmio
* writes in the most efficient way, we need to know if we are on an Intel
* or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
* the order completed, and so no special flushing is required to get
* correct ordering. Intel processors, however, will flush write buffers
* out in "random" orders, and so explicit ordering is needed at times.
*/
int qib_unordered_wc(void)
{
return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
}

View File

@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask);
if (!ret) {
++chunk->npages;
if (ret) {
if (--cur_order < 0)
goto fail;
else
continue;
}
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
++chunk->npages;
if (chunk->nsg <= 0)
goto fail;
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
chunk = NULL;
}
npages -= 1 << cur_order;
} else {
--cur_order;
if (cur_order < 0)
if (chunk->nsg <= 0)
goto fail;
}
if (chunk->npages == MLX4_ICM_CHUNK_LEN)
chunk = NULL;
npages -= 1 << cur_order;
}
if (!coherent && chunk) {

View File

@ -1172,7 +1172,9 @@ struct ib_client {
struct ib_device *ib_alloc_device(size_t size);
void ib_dealloc_device(struct ib_device *device);
int ib_register_device (struct ib_device *device);
int ib_register_device(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *));
void ib_unregister_device(struct ib_device *device);
int ib_register_client (struct ib_client *client);