Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
This commit is contained in:
commit
d0eaeec8e8
|
@ -1,6 +1,7 @@
|
|||
SOLOS_ATTR_RO(DriverVersion)
|
||||
SOLOS_ATTR_RO(APIVersion)
|
||||
SOLOS_ATTR_RO(FirmwareVersion)
|
||||
SOLOS_ATTR_RO(Version)
|
||||
// SOLOS_ATTR_RO(DspVersion)
|
||||
// SOLOS_ATTR_RO(CommonHandshake)
|
||||
SOLOS_ATTR_RO(Connected)
|
||||
|
|
|
@ -1161,6 +1161,14 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
|
||||
major_ver, minor_ver, fpga_ver);
|
||||
|
||||
if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
|
||||
db_fpga_upgrade || db_firmware_upgrade)) {
|
||||
dev_warn(&dev->dev,
|
||||
"FPGA too old; cannot upgrade flash. Use JTAG.\n");
|
||||
fpga_upgrade = firmware_upgrade = 0;
|
||||
db_fpga_upgrade = db_firmware_upgrade = 0;
|
||||
}
|
||||
|
||||
if (card->fpga_version >= DMA_SUPPORTED){
|
||||
card->using_dma = 1;
|
||||
} else {
|
||||
|
|
|
@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
|
|||
|
||||
/* device is off until link detection */
|
||||
netif_carrier_off(dev);
|
||||
netif_stop_queue(dev);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
@ -440,7 +440,6 @@ struct qeth_qdio_out_q {
|
|||
* index of buffer to be filled by driver; state EMPTY or PACKING
|
||||
*/
|
||||
int next_buf_to_fill;
|
||||
int sync_iqdio_error;
|
||||
/*
|
||||
* number of buffers that are currently filled (PRIMED)
|
||||
* -> these buffers are hardware-owned
|
||||
|
@ -695,14 +694,6 @@ struct qeth_mc_mac {
|
|||
int is_vmac;
|
||||
};
|
||||
|
||||
struct qeth_skb_data {
|
||||
__u32 magic;
|
||||
int count;
|
||||
};
|
||||
|
||||
#define QETH_SKB_MAGIC 0x71657468
|
||||
#define QETH_SIGA_CC2_RETRIES 3
|
||||
|
||||
struct qeth_rx {
|
||||
int b_count;
|
||||
int b_index;
|
||||
|
|
|
@ -877,8 +877,8 @@ out:
|
|||
return;
|
||||
}
|
||||
|
||||
static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb)
|
||||
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf)
|
||||
{
|
||||
int i;
|
||||
struct sk_buff *skb;
|
||||
|
@ -887,13 +887,11 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
|||
if (buf->buffer->element[0].flags & 0x40)
|
||||
atomic_dec(&queue->set_pci_flags_count);
|
||||
|
||||
if (!qeth_skip_skb) {
|
||||
skb = skb_dequeue(&buf->skb_list);
|
||||
while (skb) {
|
||||
atomic_dec(&skb->users);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb_dequeue(&buf->skb_list);
|
||||
while (skb) {
|
||||
atomic_dec(&skb->users);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb_dequeue(&buf->skb_list);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
|
||||
if (buf->buffer->element[i].addr && buf->is_header[i])
|
||||
|
@ -909,12 +907,6 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
|||
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
|
||||
}
|
||||
|
||||
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf)
|
||||
{
|
||||
__qeth_clear_output_buffer(queue, buf, 0);
|
||||
}
|
||||
|
||||
void qeth_clear_qdio_buffers(struct qeth_card *card)
|
||||
{
|
||||
int i, j;
|
||||
|
@ -2833,7 +2825,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|||
}
|
||||
}
|
||||
|
||||
queue->sync_iqdio_error = 0;
|
||||
queue->card->dev->trans_start = jiffies;
|
||||
if (queue->card->options.performance_stats) {
|
||||
queue->card->perf_stats.outbound_do_qdio_cnt++;
|
||||
|
@ -2849,10 +2840,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
|||
queue->card->perf_stats.outbound_do_qdio_time +=
|
||||
qeth_get_micros() -
|
||||
queue->card->perf_stats.outbound_do_qdio_start_time;
|
||||
if (rc > 0) {
|
||||
if (!(rc & QDIO_ERROR_SIGA_BUSY))
|
||||
queue->sync_iqdio_error = rc & 3;
|
||||
}
|
||||
if (rc) {
|
||||
queue->card->stats.tx_errors += count;
|
||||
/* ignore temporary SIGA errors without busy condition */
|
||||
|
@ -2916,7 +2903,7 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
|
|||
{
|
||||
struct qeth_card *card = (struct qeth_card *)card_ptr;
|
||||
|
||||
if (card->dev)
|
||||
if (card->dev && (card->dev->flags & IFF_UP))
|
||||
napi_schedule(&card->napi);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
|
||||
|
@ -2940,7 +2927,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
|||
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
|
||||
struct qeth_qdio_out_buffer *buffer;
|
||||
int i;
|
||||
unsigned qeth_send_err;
|
||||
|
||||
QETH_CARD_TEXT(card, 6, "qdouhdl");
|
||||
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
|
||||
|
@ -2956,9 +2942,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
|||
}
|
||||
for (i = first_element; i < (first_element + count); ++i) {
|
||||
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
|
||||
qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error);
|
||||
__qeth_clear_output_buffer(queue, buffer,
|
||||
(qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
|
||||
qeth_handle_send_error(card, buffer, qdio_error);
|
||||
qeth_clear_output_buffer(queue, buffer);
|
||||
}
|
||||
atomic_sub(count, &queue->used_buffers);
|
||||
/* check if we need to do something on this outbound queue */
|
||||
|
@ -3183,10 +3168,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
|
|||
int offset, int hd_len)
|
||||
{
|
||||
struct qeth_qdio_out_buffer *buffer;
|
||||
struct sk_buff *skb1;
|
||||
struct qeth_skb_data *retry_ctrl;
|
||||
int index;
|
||||
int rc;
|
||||
|
||||
/* spin until we get the queue ... */
|
||||
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
|
||||
|
@ -3205,25 +3187,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
|
|||
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
||||
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
|
||||
qeth_flush_buffers(queue, index, 1);
|
||||
if (queue->sync_iqdio_error == 2) {
|
||||
skb1 = skb_dequeue(&buffer->skb_list);
|
||||
while (skb1) {
|
||||
atomic_dec(&skb1->users);
|
||||
skb1 = skb_dequeue(&buffer->skb_list);
|
||||
}
|
||||
retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
|
||||
if (retry_ctrl->magic != QETH_SKB_MAGIC) {
|
||||
retry_ctrl->magic = QETH_SKB_MAGIC;
|
||||
retry_ctrl->count = 0;
|
||||
}
|
||||
if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
|
||||
retry_ctrl->count++;
|
||||
rc = dev_queue_xmit(skb);
|
||||
} else {
|
||||
dev_kfree_skb_any(skb);
|
||||
QETH_CARD_TEXT(card, 2, "qrdrop");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
||||
|
|
|
@ -1554,6 +1554,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
|
|||
|
||||
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
|
||||
{
|
||||
if (WARN_ON(!dev_queue)) {
|
||||
printk(KERN_INFO "netif_stop_queue() cannot be called before "
|
||||
"register_netdev()");
|
||||
return;
|
||||
}
|
||||
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _NET_DST_OPS_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
struct dst_entry;
|
||||
struct kmem_cachep;
|
||||
|
|
|
@ -887,7 +887,7 @@ static ssize_t pktgen_if_write(struct file *file,
|
|||
i += len;
|
||||
|
||||
if (debug) {
|
||||
size_t copy = min(count, 1023);
|
||||
size_t copy = min_t(size_t, count, 1023);
|
||||
char tb[copy + 1];
|
||||
if (copy_from_user(tb, user_buffer, copy))
|
||||
return -EFAULT;
|
||||
|
@ -2612,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
|
|||
/* Update any of the values, used when we're incrementing various
|
||||
* fields.
|
||||
*/
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
mod_cur_headers(pkt_dev);
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
|
||||
datalen = (odev->hard_header_len + 16) & ~0xf;
|
||||
|
||||
|
@ -2976,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
|
|||
/* Update any of the values, used when we're incrementing various
|
||||
* fields.
|
||||
*/
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
mod_cur_headers(pkt_dev);
|
||||
queue_map = pkt_dev->cur_queue_map;
|
||||
|
||||
skb = __netdev_alloc_skb(odev,
|
||||
pkt_dev->cur_pkt_size + 64
|
||||
|
|
|
@ -349,7 +349,7 @@ found:
|
|||
|
||||
/* Check for overlap with preceding fragment. */
|
||||
if (prev &&
|
||||
(FRAG6_CB(prev)->offset + prev->len) - offset > 0)
|
||||
(FRAG6_CB(prev)->offset + prev->len) > offset)
|
||||
goto discard_fq;
|
||||
|
||||
/* Look for overlap with succeeding segment. */
|
||||
|
|
|
@ -249,8 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
|
|||
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
|
||||
rm->data.op_nents = ceil(total_len, PAGE_SIZE);
|
||||
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
|
||||
if (!rm->data.op_sg)
|
||||
if (!rm->data.op_sg) {
|
||||
rds_message_put(rm);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < rm->data.op_nents; ++i) {
|
||||
sg_set_page(&rm->data.op_sg[i],
|
||||
|
|
|
@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
|
|||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
|
|
Loading…
Reference in New Issue