Merge branch 's390-next'

Julian Wiedmann says:

====================
s390/net: updates for 4.14

a mixed bag of minor fixes, cleanups and refactors for net-next. Please apply.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-08-15 10:58:40 -07:00
commit 0461b76661
9 changed files with 169 additions and 207 deletions

View File

@ -305,7 +305,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
* ch The channel, the sense code belongs to.
* sense The sense code to inspect.
*/
static inline void ccw_unit_check(struct channel *ch, __u8 sense)
static void ccw_unit_check(struct channel *ch, __u8 sense)
{
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
"%s(%s): %02x",

View File

@ -327,8 +327,7 @@ lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
spin_unlock_irqrestore(&card->mask_lock, flags);
wake_up(&card->wait_q);
}
static inline int
lcs_threads_running(struct lcs_card *card, unsigned long threads)
static int lcs_threads_running(struct lcs_card *card, unsigned long threads)
{
unsigned long flags;
int rc = 0;
@ -346,8 +345,7 @@ lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
lcs_threads_running(card, threads) == 0);
}
static inline int
lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
@ -373,8 +371,7 @@ lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
wake_up(&card->wait_q);
}
static inline int
__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
@ -444,8 +441,7 @@ lcs_setup_card(struct lcs_card *card)
INIT_LIST_HEAD(&card->lancmd_waiters);
}
static inline void
lcs_clear_multicast_list(struct lcs_card *card)
static void lcs_clear_multicast_list(struct lcs_card *card)
{
#ifdef CONFIG_IP_MULTICAST
struct lcs_ipm_list *ipm;
@ -656,8 +652,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
/**
* Make a buffer ready for processing.
*/
static inline void
__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
{
int prev, next;
@ -1169,8 +1164,8 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
/**
* function called by net device to handle multicast address relevant things
*/
static inline void
lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
static void lcs_remove_mc_addresses(struct lcs_card *card,
struct in_device *in4_dev)
{
struct ip_mc_list *im4;
struct list_head *l;
@ -1196,8 +1191,9 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
static inline struct lcs_ipm_list *
lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card,
struct ip_mc_list *im4,
char *buf)
{
struct lcs_ipm_list *tmp, *ipm = NULL;
struct list_head *l;
@ -1218,8 +1214,8 @@ lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
return ipm;
}
static inline void
lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
static void lcs_set_mc_addresses(struct lcs_card *card,
struct in_device *in4_dev)
{
struct ip_mc_list *im4;

View File

@ -249,14 +249,14 @@ struct ll_header {
* Compatibility macros for busy handling
* of network devices.
*/
static inline void netiucv_clear_busy(struct net_device *dev)
static void netiucv_clear_busy(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
clear_bit(0, &priv->tbusy);
netif_wake_queue(dev);
}
static inline int netiucv_test_and_set_busy(struct net_device *dev)
static int netiucv_test_and_set_busy(struct net_device *dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);

View File

@ -857,11 +857,6 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
static inline int qeth_get_ip_protocol(struct sk_buff *skb)
{
return ip_hdr(skb)->protocol;
}
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
struct qeth_buffer_pool_entry *entry)
{
@ -951,8 +946,10 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int, int);
int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset,
int hd_len);
int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);

View File

@ -101,7 +101,7 @@ void qeth_close_dev(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_close_dev);
static inline const char *qeth_get_cardname(struct qeth_card *card)
static const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
@ -330,7 +330,7 @@ static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
return q;
}
static inline int qeth_cq_init(struct qeth_card *card)
static int qeth_cq_init(struct qeth_card *card)
{
int rc;
@ -352,7 +352,7 @@ out:
return rc;
}
static inline int qeth_alloc_cq(struct qeth_card *card)
static int qeth_alloc_cq(struct qeth_card *card)
{
int rc;
@ -397,7 +397,7 @@ kmsg_out:
goto out;
}
static inline void qeth_free_cq(struct qeth_card *card)
static void qeth_free_cq(struct qeth_card *card)
{
if (card->qdio.c_q) {
--card->qdio.no_in_queues;
@ -408,8 +408,9 @@ static inline void qeth_free_cq(struct qeth_card *card)
card->qdio.out_bufstates = NULL;
}
static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed) {
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed)
{
enum iucv_tx_notify n;
switch (sbalf15) {
@ -432,8 +433,8 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
int bidx, int forced_cleanup)
static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
int forced_cleanup)
{
if (q->card->options.cq != QETH_CQ_ENABLED)
return;
@ -475,8 +476,9 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
}
static inline void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr) {
static void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr)
{
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
@ -2228,7 +2230,7 @@ static int qeth_cm_setup(struct qeth_card *card)
}
static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
static int qeth_get_initial_mtu_for_card(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_UNKNOWN:
@ -2251,7 +2253,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
}
}
static inline int qeth_get_mtu_outof_framesize(int framesize)
static int qeth_get_mtu_outof_framesize(int framesize)
{
switch (framesize) {
case 0x4000:
@ -2267,7 +2269,7 @@ static inline int qeth_get_mtu_outof_framesize(int framesize)
}
}
static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
static int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
@ -2738,8 +2740,8 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
}
}
static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct qeth_card *card)
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct qeth_card *card)
{
struct list_head *plh;
struct qeth_buffer_pool_entry *entry;
@ -2870,7 +2872,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
{
switch (link_type) {
case QETH_LINK_TYPE_HSTR:
@ -3888,27 +3890,17 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
}
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
static void __qeth_fill_buffer(struct sk_buff *skb,
struct qeth_qdio_out_buffer *buf,
bool is_first_elem, unsigned int offset)
{
int length = skb_headlen(skb);
int length_here;
int element;
char *data;
int first_lap, cnt;
struct skb_frag_struct *frag;
element = *next_element_to_fill;
data = skb->data;
first_lap = (is_tso == 0 ? 1 : 0);
if (offset >= 0) {
data = skb->data + offset;
length -= offset;
first_lap = 0;
}
struct qdio_buffer *buffer = buf->buffer;
int element = buf->next_element_to_fill;
int length = skb_headlen(skb) - offset;
char *data = skb->data + offset;
int length_here, cnt;
/* map linear part into buffer element(s) */
while (length > 0) {
/* length_here is the remaining amount of data in this page */
length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
@ -3918,34 +3910,28 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
buffer->element[element].addr = data;
buffer->element[element].length = length_here;
length -= length_here;
if (!length) {
if (first_lap)
if (skb_shinfo(skb)->nr_frags)
buffer->element[element].eflags =
SBAL_EFLAGS_FIRST_FRAG;
else
buffer->element[element].eflags = 0;
else
if (is_first_elem) {
is_first_elem = false;
if (length || skb_is_nonlinear(skb))
/* skb needs additional elements */
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
SBAL_EFLAGS_FIRST_FRAG;
else
buffer->element[element].eflags = 0;
} else {
if (first_lap)
buffer->element[element].eflags =
SBAL_EFLAGS_FIRST_FRAG;
else
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
}
data += length_here;
element++;
first_lap = 0;
}
/* map page frags into buffer element(s) */
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
data = (char *)page_to_phys(skb_frag_page(frag)) +
frag->page_offset;
length = frag->size;
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
data = skb_frag_address(frag);
length = skb_frag_size(frag);
while (length > 0) {
length_here = PAGE_SIZE -
((unsigned long) data % PAGE_SIZE);
@ -3964,23 +3950,25 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
if (buffer->element[element - 1].eflags)
buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
*next_element_to_fill = element;
buf->next_element_to_fill = element;
}
static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf, struct sk_buff *skb,
struct qeth_hdr *hdr, int offset, int hd_len)
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, int hd_len)
{
struct qdio_buffer *buffer;
int flush_cnt = 0, hdr_len, large_send = 0;
int flush_cnt = 0, hdr_len;
bool is_first_elem = true;
buffer = buf->buffer;
refcount_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*check first on TSO ....*/
if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
int element = buf->next_element_to_fill;
is_first_elem = false;
hdr_len = sizeof(struct qeth_hdr_tso) +
((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
@ -3989,13 +3977,14 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buffer->element[element].length = hdr_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
skb->data += hdr_len;
skb->len -= hdr_len;
large_send = 1;
skb_pull(skb, hdr_len);
}
if (offset >= 0) {
/* IQD */
if (offset > 0) {
int element = buf->next_element_to_fill;
is_first_elem = false;
buffer->element[element].addr = hdr;
buffer->element[element].length = sizeof(struct qeth_hdr) +
hd_len;
@ -4004,8 +3993,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buf->next_element_to_fill++;
}
__qeth_fill_buffer(skb, buffer, large_send,
(int *)&buf->next_element_to_fill, offset);
__qeth_fill_buffer(skb, buf, is_first_elem, offset);
if (!queue->do_pack) {
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
@ -4030,8 +4018,9 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
}
int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, int offset, int hd_len)
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset,
int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
int index;
@ -4111,7 +4100,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
}
tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, 0);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp;
@ -4834,7 +4823,7 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
static int qeth_get_qdio_q_format(struct qeth_card *card)
{
if (card->info.type == QETH_CARD_TYPE_IQD)
return QDIO_IQDIO_QFMT;
@ -4899,9 +4888,12 @@ out:
return;
}
static inline void qeth_qdio_establish_cq(struct qeth_card *card,
struct qdio_buffer **in_sbal_ptrs,
void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) {
static void qeth_qdio_establish_cq(struct qeth_card *card,
struct qdio_buffer **in_sbal_ptrs,
void (**queue_start_poll)
(struct ccw_device *, int,
unsigned long))
{
int i;
if (card->options.cq == QETH_CQ_ENABLED) {
@ -5193,9 +5185,10 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
struct qdio_buffer_element *element,
struct sk_buff **pskb, int offset, int *pfrag, int data_len)
static int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
struct qdio_buffer_element *element,
struct sk_buff **pskb, int offset, int *pfrag,
int data_len)
{
struct page *page = virt_to_page(element->addr);
if (*pskb == NULL) {

View File

@ -78,7 +78,7 @@ static ssize_t qeth_dev_card_type_show(struct device *dev,
static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
static const char *qeth_get_bufsize_str(struct qeth_card *card)
{
if (card->qdio.in_buf_size == 16384)
return "16k";

View File

@ -231,13 +231,7 @@ static void qeth_l2_del_all_macs(struct qeth_card *card)
spin_unlock_bh(&card->mclock);
}
static inline u32 qeth_l2_mac_hash(const u8 *addr)
{
return get_unaligned((u32 *)(&addr[2]));
}
static inline int qeth_l2_get_cast_type(struct qeth_card *card,
struct sk_buff *skb)
static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
if (card->info.type == QETH_CARD_TYPE_OSN)
return RTN_UNSPEC;
@ -248,8 +242,8 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card,
return RTN_UNSPEC;
}
static inline void qeth_l2_hdr_csum(struct qeth_card *card,
struct qeth_hdr *hdr, struct sk_buff *skb)
static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
@ -519,15 +513,6 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
/* fall back to alternative mechanism: */
}
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc) {
QETH_DBF_MESSAGE(2, "could not query adapter "
"parameters on device %s: x%x\n",
CARD_BUS_ID(card), rc);
}
}
if (card->info.type == QETH_CARD_TYPE_IQD ||
card->info.type == QETH_CARD_TYPE_OSM ||
card->info.type == QETH_CARD_TYPE_OSX ||
@ -615,13 +600,13 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
* only if there is not in the hash table storage already
*
*/
static void
qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
u8 is_uc)
{
u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
struct qeth_mac *mac;
hash_for_each_possible(card->mac_htable, mac, hnode,
qeth_l2_mac_hash(ha->addr)) {
hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
if (is_uc == mac->is_uc &&
!memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
@ -638,9 +623,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
mac->is_uc = is_uc;
mac->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->mac_htable, &mac->hnode,
qeth_l2_mac_hash(mac->mac_addr));
hash_add(card->mac_htable, &mac->hnode, mac_hash);
}
static void qeth_l2_set_rx_mode(struct net_device *dev)
@ -707,7 +690,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
int nr_frags;
unsigned int nr_frags;
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
@ -747,6 +730,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (lin_rc)
goto tx_drop;
}
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (card->info.type == QETH_CARD_TYPE_OSN)
hdr = (struct qeth_hdr *)skb->data;
@ -760,11 +744,11 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (!hdr)
goto tx_drop;
elements_needed++;
skb_reset_mac_header(new_skb);
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
hdr->hdr.l2.pkt_length = new_skb->len;
memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
skb_mac_header(new_skb), ETH_HLEN);
skb_copy_from_linear_data(new_skb,
((char *)hdr) + sizeof(*hdr),
ETH_HLEN);
} else {
/* create a clone with writeable headroom */
new_skb = skb_realloc_headroom(skb,
@ -772,7 +756,6 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (!new_skb)
goto tx_drop;
hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
if (new_skb->ip_summed == CHECKSUM_PARTIAL)
qeth_l2_hdr_csum(card, hdr, new_skb);
@ -799,13 +782,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (card->options.performance_stats) {
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (nr_frags) {
card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */
card->perf_stats.sg_frags_sent += nr_frags + 1;
}
if (card->options.performance_stats && nr_frags) {
card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */
card->perf_stats.sg_frags_sent += nr_frags + 1;
}
if (new_skb != skb)
dev_kfree_skb_any(skb);
@ -1744,11 +1724,26 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
return rc;
}
static inline int ipa_cmd_sbp(struct qeth_card *card)
static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
enum qeth_ipa_sbp_cmd sbp_cmd,
unsigned int cmd_length)
{
return (card->info.type == QETH_CARD_TYPE_IQD) ?
IPA_CMD_SETBRIDGEPORT_IQD :
IPA_CMD_SETBRIDGEPORT_OSA;
enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ?
IPA_CMD_SETBRIDGEPORT_IQD :
IPA_CMD_SETBRIDGEPORT_OSA;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0);
if (!iob)
return iob;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
cmd_length;
cmd->data.sbp.hdr.command_code = sbp_cmd;
cmd->data.sbp.hdr.used_total = 1;
cmd->data.sbp.hdr.seq_no = 1;
return iob;
}
static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
@ -1778,21 +1773,13 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
static void qeth_bridgeport_query_support(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct _qeth_sbp_cbctl cbctl;
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_sbp_query_cmds_supp));
if (!iob)
return;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr) +
sizeof(struct qeth_sbp_query_cmds_supp);
cmd->data.sbp.hdr.command_code =
IPA_SBP_QUERY_COMMANDS_SUPPORTED;
cmd->data.sbp.hdr.used_total = 1;
cmd->data.sbp.hdr.seq_no = 1;
if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
(void *)&cbctl) ||
qeth_bridgeport_makerc(card, &cbctl,
@ -1846,7 +1833,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
{
int rc = 0;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct _qeth_sbp_cbctl cbctl = {
.data = {
.qports = {
@ -1859,16 +1845,9 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "brqports");
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr);
cmd->data.sbp.hdr.command_code =
IPA_SBP_QUERY_BRIDGE_PORTS;
cmd->data.sbp.hdr.used_total = 1;
cmd->data.sbp.hdr.seq_no = 1;
rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
(void *)&cbctl);
if (rc < 0)
@ -1900,7 +1879,6 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
int rc = 0;
int cmdlength;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct _qeth_sbp_cbctl cbctl;
enum qeth_ipa_sbp_cmd setcmd;
@ -1908,32 +1886,24 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
switch (role) {
case QETH_SBP_ROLE_NONE:
setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
sizeof(struct qeth_sbp_reset_role);
cmdlength = sizeof(struct qeth_sbp_reset_role);
break;
case QETH_SBP_ROLE_PRIMARY:
setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
sizeof(struct qeth_sbp_set_primary);
cmdlength = sizeof(struct qeth_sbp_set_primary);
break;
case QETH_SBP_ROLE_SECONDARY:
setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
sizeof(struct qeth_sbp_set_secondary);
cmdlength = sizeof(struct qeth_sbp_set_secondary);
break;
default:
return -EINVAL;
}
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength = cmdlength;
cmd->data.sbp.hdr.command_code = setcmd;
cmd->data.sbp.hdr.used_total = 1;
cmd->data.sbp.hdr.seq_no = 1;
rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
(void *)&cbctl);
if (rc < 0)

View File

@ -247,7 +247,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return -ENOENT;
addr->ref_counter--;
if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
addr->type == QETH_IP_TYPE_RXIP))
return rc;
if (addr->in_progress)
return -EINPROGRESS;
@ -329,8 +330,9 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
kfree(addr);
}
} else {
if (addr->type == QETH_IP_TYPE_NORMAL)
addr->ref_counter++;
if (addr->type == QETH_IP_TYPE_NORMAL ||
addr->type == QETH_IP_TYPE_RXIP)
addr->ref_counter++;
}
return rc;
@ -784,11 +786,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "addrxip4");
QETH_CARD_TEXT(card, 2, "delrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
QETH_CARD_TEXT(card, 2, "addrxip6");
QETH_CARD_TEXT(card, 2, "delrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@ -867,7 +869,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
return rc;
}
static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
{
if (cast_type == RTN_MULTICAST)
return QETH_CAST_MULTICAST;
@ -876,7 +878,7 @@ static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
return QETH_CAST_UNICAST;
}
static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
{
u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
if (cast_type == RTN_MULTICAST)
@ -890,22 +892,10 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
static int qeth_l3_setadapter_parms(struct qeth_card *card)
{
int rc;
int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "setadprm");
if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
dev_info(&card->gdev->dev,
"set adapter parameters not supported.\n");
QETH_DBF_TEXT(SETUP, 2, " notsupp");
return 0;
}
rc = qeth_query_setadapterparms(card);
if (rc) {
QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: "
"0x%x\n", dev_name(&card->gdev->dev), rc);
return rc;
}
if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
rc = qeth_setadpparms_change_macaddr(card);
if (rc)
@ -1656,9 +1646,8 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned short *vlan_id)
static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned short *vlan_id)
{
__u16 prot;
struct iphdr *ip_hdr;
@ -2408,7 +2397,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
int cast_type = RTN_UNSPEC;
struct neighbour *n = NULL;
@ -2546,8 +2535,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_unlock();
}
static inline void qeth_l3_hdr_csum(struct qeth_card *card,
struct qeth_hdr *hdr, struct sk_buff *skb)
static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
@ -2582,7 +2571,7 @@ static void qeth_tso_fill_header(struct qeth_card *card,
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->gso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
tcph->check = 0;
@ -2650,7 +2639,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
bool use_tso;
int data_offset = -1;
int nr_frags;
unsigned int nr_frags;
if (((card->info.type == QETH_CARD_TYPE_IQD) &&
(((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
@ -2675,7 +2664,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
use_tso = skb_is_gso(skb) &&
(qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4);
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb;
@ -2727,6 +2716,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (lin_rc)
goto tx_drop;
}
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (use_tso) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
@ -2786,7 +2776,6 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (new_skb != skb)
dev_kfree_skb_any(skb);
if (card->options.performance_stats) {
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (use_tso) {
card->perf_stats.large_send_bytes += tx_bytes;
card->perf_stats.large_send_cnt++;

View File

@ -895,9 +895,26 @@ static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
u8 *addr)
{
__be32 ipv4_addr;
struct in6_addr ipv6_addr;
if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
return -EINVAL;
}
if (proto == QETH_PROT_IPV4) {
memcpy(&ipv4_addr, addr, sizeof(ipv4_addr));
if (ipv4_is_multicast(ipv4_addr)) {
QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
return -EINVAL;
}
} else if (proto == QETH_PROT_IPV6) {
memcpy(&ipv6_addr, addr, sizeof(ipv6_addr));
if (ipv6_addr_is_multicast(&ipv6_addr)) {
QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
return -EINVAL;
}
}
return 0;
}