Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) MLX5 bug fixes from Saeed Mahameed et al: - released wrong resources when firmware timeout happens - fix wrong check for encapsulation size limits - UAR memory leak - ETHTOOL_GRXCLSRLALL failed to fill in info->data 2) Don't cache l3mdev on mis-matches local route, causes net devices to leak refs. From Robert Shearman. 3) Handle fragmented SKBs properly in macsec driver, the problem is that we were mis-sizing the sgvec table. From Jason A. Donenfeld. 4) We cannot have checksum offload enabled for inner UDP tunneled packet during IPSEC, from Ansis Atteka. 5) Fix double SKB free in ravb driver, from Dan Carpenter. 6) Fix CPU port handling in b53 DSA driver, from Florian Dainelli. 7) Don't use on-stack buffers for usb_control_msg() in CAN usb driver, from Maksim Salau. 8) Fix device leak in macvlan driver, from Herbert Xu. We have to purge the broadcast queue properly on port destroy. 9) Fix tx ring entry limit on EF10 devices in sfc driver. From Bert Kenward. 10) Fix memory leaks in team driver, from Pan Bian. 11) Don't setup ipv6_stub before it can be actually used, from Paolo Abeni. 12) Fix tipc socket flow control accounting, from Parthasarathy Bhuvaragan. 13) Fix crash on module unload in hso driver, from Andreas Kemnade. 14) Fix purging of bridge multicast entries, the problem is that if we don't defer it to ndo_uninit it's possible for new entries to get added after we purge. Fix from Xin Long. 15) Don't return garbage for PACKET_HDRLEN getsockopt, from Alexander Potapenko. 16) Fix autoneg stall properly in PHY layer, and revert micrel driver change that was papering over it. From Alexander Kochetkov. 17) Don't dereference an ipv4 route as an ipv6 one in the ip6_tunnnel code, from Cong Wang. 18) Clear out the congestion control private of the TCP socket in all of the right places, from Wei Wang. 19) rawv6_ioctl measures SKB length incorrectly, fix from Jamie Bainbridge. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits) ipv6: check raw payload size correctly in ioctl tcp: memset ca_priv data to 0 properly ipv6: check skb->protocol before lookup for nexthop net: core: Prevent from dereferencing null pointer when releasing SKB macsec: dynamically allocate space for sglist Revert "phy: micrel: Disable auto negotiation on startup" net: phy: fix auto-negotiation stall due to unavailable interrupt net/packet: check length in getsockopt() called with PACKET_HDRLEN net: ipv6: regenerate host route if moved to gc list bridge: move bridge multicast cleanup to ndo_uninit ipv6: fix source routing qed: Fix error in the dcbx app meta data initialization. netvsc: fix calculation of available send sections net: hso: fix module unloading tipc: fix socket flow control accounting error at tipc_recv_stream tipc: fix socket flow control accounting error at tipc_send_stream ipv6: move stub initialization after ipv6 setup completion team: fix memory leaks sfc: tx ring can only have 2048 entries for all EF10 NICs macvlan: Fix device ref leak when purging bc_queue ...
This commit is contained in:
commit
fc08b197bb
|
@ -72,6 +72,8 @@ config CAN_PEAK_USB
|
|||
PCAN-USB Pro dual CAN 2.0b channels USB adapter
|
||||
PCAN-USB FD single CAN-FD channel USB adapter
|
||||
PCAN-USB Pro FD dual CAN-FD channels USB adapter
|
||||
PCAN-Chip USB CAN-FD to USB stamp module
|
||||
PCAN-USB X6 6 CAN-FD channels USB adapter
|
||||
|
||||
(see also http://www.peak-system.com).
|
||||
|
||||
|
|
|
@ -739,13 +739,18 @@ static const struct net_device_ops gs_usb_netdev_ops = {
|
|||
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
|
||||
{
|
||||
struct gs_can *dev = netdev_priv(netdev);
|
||||
struct gs_identify_mode imode;
|
||||
struct gs_identify_mode *imode;
|
||||
int rc;
|
||||
|
||||
imode = kmalloc(sizeof(*imode), GFP_KERNEL);
|
||||
|
||||
if (!imode)
|
||||
return -ENOMEM;
|
||||
|
||||
if (do_identify)
|
||||
imode.mode = GS_CAN_IDENTIFY_ON;
|
||||
imode->mode = GS_CAN_IDENTIFY_ON;
|
||||
else
|
||||
imode.mode = GS_CAN_IDENTIFY_OFF;
|
||||
imode->mode = GS_CAN_IDENTIFY_OFF;
|
||||
|
||||
rc = usb_control_msg(interface_to_usbdev(dev->iface),
|
||||
usb_sndctrlpipe(interface_to_usbdev(dev->iface),
|
||||
|
@ -755,10 +760,12 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
|
|||
USB_RECIP_INTERFACE,
|
||||
dev->channel,
|
||||
0,
|
||||
&imode,
|
||||
sizeof(imode),
|
||||
imode,
|
||||
sizeof(*imode),
|
||||
100);
|
||||
|
||||
kfree(imode);
|
||||
|
||||
return (rc > 0) ? 0 : rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = {
|
|||
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
|
||||
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
|
||||
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
|
||||
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)},
|
||||
{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
@ -51,6 +52,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
|
|||
&pcan_usb_pro,
|
||||
&pcan_usb_fd,
|
||||
&pcan_usb_pro_fd,
|
||||
&pcan_usb_chip,
|
||||
&pcan_usb_x6,
|
||||
};
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define PCAN_USBPRO_PRODUCT_ID 0x000d
|
||||
#define PCAN_USBPROFD_PRODUCT_ID 0x0011
|
||||
#define PCAN_USBFD_PRODUCT_ID 0x0012
|
||||
#define PCAN_USBCHIP_PRODUCT_ID 0x0013
|
||||
#define PCAN_USBX6_PRODUCT_ID 0x0014
|
||||
|
||||
#define PCAN_USB_DRIVER_NAME "peak_usb"
|
||||
|
@ -90,6 +91,7 @@ struct peak_usb_adapter {
|
|||
extern const struct peak_usb_adapter pcan_usb;
|
||||
extern const struct peak_usb_adapter pcan_usb_pro;
|
||||
extern const struct peak_usb_adapter pcan_usb_fd;
|
||||
extern const struct peak_usb_adapter pcan_usb_chip;
|
||||
extern const struct peak_usb_adapter pcan_usb_pro_fd;
|
||||
extern const struct peak_usb_adapter pcan_usb_x6;
|
||||
|
||||
|
|
|
@ -1061,6 +1061,78 @@ const struct peak_usb_adapter pcan_usb_fd = {
|
|||
.do_get_berr_counter = pcan_usb_fd_get_berr_counter,
|
||||
};
|
||||
|
||||
/* describes the PCAN-CHIP USB */
|
||||
static const struct can_bittiming_const pcan_usb_chip_const = {
|
||||
.name = "pcan_chip_usb",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
|
||||
.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
|
||||
.brp_min = 1,
|
||||
.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const pcan_usb_chip_data_const = {
|
||||
.name = "pcan_chip_usb",
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
|
||||
.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
|
||||
.brp_min = 1,
|
||||
.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
const struct peak_usb_adapter pcan_usb_chip = {
|
||||
.name = "PCAN-Chip USB",
|
||||
.device_id = PCAN_USBCHIP_PRODUCT_ID,
|
||||
.ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
|
||||
.ctrlmode_supported = CAN_CTRLMODE_FD |
|
||||
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
|
||||
.clock = {
|
||||
.freq = PCAN_UFD_CRYSTAL_HZ,
|
||||
},
|
||||
.bittiming_const = &pcan_usb_chip_const,
|
||||
.data_bittiming_const = &pcan_usb_chip_data_const,
|
||||
|
||||
/* size of device private data */
|
||||
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
|
||||
|
||||
/* timestamps usage */
|
||||
.ts_used_bits = 32,
|
||||
.ts_period = 1000000, /* calibration period in ts. */
|
||||
.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
|
||||
.us_per_ts_shift = 0,
|
||||
|
||||
/* give here messages in/out endpoints */
|
||||
.ep_msg_in = PCAN_USBPRO_EP_MSGIN,
|
||||
.ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
|
||||
|
||||
/* size of rx/tx usb buffers */
|
||||
.rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
|
||||
.tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
|
||||
|
||||
/* device callbacks */
|
||||
.intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
|
||||
.dev_init = pcan_usb_fd_init,
|
||||
|
||||
.dev_exit = pcan_usb_fd_exit,
|
||||
.dev_free = pcan_usb_fd_free,
|
||||
.dev_set_bus = pcan_usb_fd_set_bus,
|
||||
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
|
||||
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
|
||||
.dev_decode_buf = pcan_usb_fd_decode_buf,
|
||||
.dev_start = pcan_usb_fd_start,
|
||||
.dev_stop = pcan_usb_fd_stop,
|
||||
.dev_restart_async = pcan_usb_fd_restart_async,
|
||||
.dev_encode_msg = pcan_usb_fd_encode_msg,
|
||||
|
||||
.do_get_berr_counter = pcan_usb_fd_get_berr_counter,
|
||||
};
|
||||
|
||||
/* describes the PCAN-USB Pro FD adapter */
|
||||
static const struct can_bittiming_const pcan_usb_pro_fd_const = {
|
||||
.name = "pcan_usb_pro_fd",
|
||||
|
|
|
@ -326,6 +326,7 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
|
|||
|
||||
static void b53_set_forwarding(struct b53_device *dev, int enable)
|
||||
{
|
||||
struct dsa_switch *ds = dev->ds;
|
||||
u8 mgmt;
|
||||
|
||||
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
|
||||
|
@ -336,6 +337,15 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
|
|||
mgmt &= ~SM_SW_FWD_EN;
|
||||
|
||||
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
|
||||
|
||||
/* Include IMP port in dumb forwarding mode when no tagging protocol is
|
||||
* set
|
||||
*/
|
||||
if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) {
|
||||
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
|
||||
mgmt |= B53_MII_DUMB_FWDG_EN;
|
||||
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
|
||||
}
|
||||
}
|
||||
|
||||
static void b53_enable_vlan(struct b53_device *dev, bool enable)
|
||||
|
@ -598,7 +608,8 @@ static void b53_switch_reset_gpio(struct b53_device *dev)
|
|||
|
||||
static int b53_switch_reset(struct b53_device *dev)
|
||||
{
|
||||
u8 mgmt;
|
||||
unsigned int timeout = 1000;
|
||||
u8 mgmt, reg;
|
||||
|
||||
b53_switch_reset_gpio(dev);
|
||||
|
||||
|
@ -607,6 +618,28 @@ static int b53_switch_reset(struct b53_device *dev)
|
|||
b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
|
||||
}
|
||||
|
||||
/* This is specific to 58xx devices here, do not use is58xx() which
|
||||
* covers the larger Starfigther 2 family, including 7445/7278 which
|
||||
* still use this driver as a library and need to perform the reset
|
||||
* earlier.
|
||||
*/
|
||||
if (dev->chip_id == BCM58XX_DEVICE_ID) {
|
||||
b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
|
||||
reg |= SW_RST | EN_SW_RST | EN_CH_RST;
|
||||
b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
|
||||
|
||||
do {
|
||||
b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
|
||||
if (!(reg & SW_RST))
|
||||
break;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
} while (timeout-- > 0);
|
||||
|
||||
if (timeout == 0)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
|
||||
|
||||
if (!(mgmt & SM_SW_FWD_EN)) {
|
||||
|
@ -1731,7 +1764,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.vlans = 4096,
|
||||
.enabled_ports = 0x1ff,
|
||||
.arl_entries = 4,
|
||||
.cpu_port = B53_CPU_PORT_25,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
|
|
|
@ -104,6 +104,10 @@
|
|||
#define B53_UC_FWD_EN BIT(6)
|
||||
#define B53_MC_FWD_EN BIT(7)
|
||||
|
||||
/* Switch control (8 bit) */
|
||||
#define B53_SWITCH_CTRL 0x22
|
||||
#define B53_MII_DUMB_FWDG_EN BIT(6)
|
||||
|
||||
/* (16 bit) */
|
||||
#define B53_UC_FLOOD_MASK 0x32
|
||||
#define B53_MC_FLOOD_MASK 0x34
|
||||
|
@ -139,6 +143,7 @@
|
|||
/* Software reset register (8 bit) */
|
||||
#define B53_SOFTRESET 0x79
|
||||
#define SW_RST BIT(7)
|
||||
#define EN_CH_RST BIT(6)
|
||||
#define EN_SW_RST BIT(4)
|
||||
|
||||
/* Fast Aging Control register (8 bit) */
|
||||
|
|
|
@ -90,7 +90,7 @@
|
|||
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
|
||||
|
||||
#define MLX5_UMR_ALIGN (2048)
|
||||
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
|
||||
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
|
||||
|
||||
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
|
||||
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
|
||||
|
|
|
@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
|
|||
int idx = 0;
|
||||
int err = 0;
|
||||
|
||||
info->data = MAX_NUM_OF_ETHTOOL_RULES;
|
||||
while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
|
||||
err = mlx5e_ethtool_get_flow(priv, info, location);
|
||||
if (!err)
|
||||
|
|
|
@ -174,7 +174,7 @@ unlock:
|
|||
|
||||
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_sw_stats *s = &priv->stats.sw;
|
||||
struct mlx5e_sw_stats temp, *s = &temp;
|
||||
struct mlx5e_rq_stats *rq_stats;
|
||||
struct mlx5e_sq_stats *sq_stats;
|
||||
u64 tx_offload_none = 0;
|
||||
|
@ -229,6 +229,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
|||
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
|
||||
priv->stats.pport.phy_counters,
|
||||
counter_set.phys_layer_cntrs.link_down_events);
|
||||
memcpy(&priv->stats.sw, s, sizeof(*s));
|
||||
}
|
||||
|
||||
static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
|
||||
|
@ -243,7 +244,6 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
|
|||
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
|
||||
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
|
||||
|
||||
memset(out, 0, outlen);
|
||||
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
|
|
|
@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
|||
|
||||
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
|
||||
rep->vport != FDB_UPLINK_VPORT) {
|
||||
if (min_inline > esw->offloads.inline_mode) {
|
||||
if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
|
||||
esw->offloads.inline_mode < min_inline) {
|
||||
netdev_warn(priv->netdev,
|
||||
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
|
||||
min_inline, esw->offloads.inline_mode);
|
||||
|
@ -785,16 +786,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gen_vxlan_header_ipv4(struct net_device *out_dev,
|
||||
char buf[],
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
__be32 daddr,
|
||||
__be32 saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
static void gen_vxlan_header_ipv4(struct net_device *out_dev,
|
||||
char buf[], int encap_size,
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
__be32 daddr,
|
||||
__be32 saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
{
|
||||
int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
|
||||
struct ethhdr *eth = (struct ethhdr *)buf;
|
||||
struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
|
||||
struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
|
||||
|
@ -817,20 +817,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
|
|||
udp->dest = udp_dst_port;
|
||||
vxh->vx_flags = VXLAN_HF_VNI;
|
||||
vxh->vx_vni = vxlan_vni_field(vx_vni);
|
||||
|
||||
return encap_size;
|
||||
}
|
||||
|
||||
static int gen_vxlan_header_ipv6(struct net_device *out_dev,
|
||||
char buf[],
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
struct in6_addr *daddr,
|
||||
struct in6_addr *saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
static void gen_vxlan_header_ipv6(struct net_device *out_dev,
|
||||
char buf[], int encap_size,
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
struct in6_addr *daddr,
|
||||
struct in6_addr *saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
{
|
||||
int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
|
||||
struct ethhdr *eth = (struct ethhdr *)buf;
|
||||
struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
|
||||
struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
|
||||
|
@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
|
|||
udp->dest = udp_dst_port;
|
||||
vxh->vx_flags = VXLAN_HF_VNI;
|
||||
vxh->vx_vni = vxlan_vni_field(vx_vni);
|
||||
|
||||
return encap_size;
|
||||
}
|
||||
|
||||
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||
|
@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
|||
struct net_device **out_dev)
|
||||
{
|
||||
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
||||
int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
|
||||
struct ip_tunnel_key *tun_key = &e->tun_info.key;
|
||||
int encap_size, ttl, err;
|
||||
struct neighbour *n = NULL;
|
||||
struct flowi4 fl4 = {};
|
||||
char *encap_header;
|
||||
int ttl, err;
|
||||
|
||||
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
|
||||
if (max_encap_size < ipv4_encap_size) {
|
||||
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
|
||||
ipv4_encap_size, max_encap_size);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
|
||||
if (!encap_header)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -903,11 +905,11 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
|||
|
||||
switch (e->tunnel_type) {
|
||||
case MLX5_HEADER_TYPE_VXLAN:
|
||||
encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
|
||||
e->h_dest, ttl,
|
||||
fl4.daddr,
|
||||
fl4.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
gen_vxlan_header_ipv4(*out_dev, encap_header,
|
||||
ipv4_encap_size, e->h_dest, ttl,
|
||||
fl4.daddr,
|
||||
fl4.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
|
@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
|||
}
|
||||
|
||||
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
|
||||
encap_size, encap_header, &e->encap_id);
|
||||
ipv4_encap_size, encap_header, &e->encap_id);
|
||||
out:
|
||||
if (err && n)
|
||||
neigh_release(n);
|
||||
|
@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
|||
|
||||
{
|
||||
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
||||
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
|
||||
struct ip_tunnel_key *tun_key = &e->tun_info.key;
|
||||
int encap_size, err, ttl = 0;
|
||||
struct neighbour *n = NULL;
|
||||
struct flowi6 fl6 = {};
|
||||
char *encap_header;
|
||||
int err, ttl = 0;
|
||||
|
||||
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
|
||||
if (max_encap_size < ipv6_encap_size) {
|
||||
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
|
||||
ipv6_encap_size, max_encap_size);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
|
||||
if (!encap_header)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -972,11 +981,11 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
|||
|
||||
switch (e->tunnel_type) {
|
||||
case MLX5_HEADER_TYPE_VXLAN:
|
||||
encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
|
||||
e->h_dest, ttl,
|
||||
&fl6.daddr,
|
||||
&fl6.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
gen_vxlan_header_ipv6(*out_dev, encap_header,
|
||||
ipv6_encap_size, e->h_dest, ttl,
|
||||
&fl6.daddr,
|
||||
&fl6.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
|
@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
|||
}
|
||||
|
||||
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
|
||||
encap_size, encap_header, &e->encap_id);
|
||||
ipv6_encap_size, encap_header, &e->encap_id);
|
||||
out:
|
||||
if (err && n)
|
||||
neigh_release(n);
|
||||
|
|
|
@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
|||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int num_vports = esw->enabled_vports;
|
||||
int err;
|
||||
int vport;
|
||||
int err, vport;
|
||||
u8 mlx5_mode;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
|
@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
|||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
|
||||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
|
||||
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
|
||||
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
|
||||
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
|
||||
return 0;
|
||||
/* fall through */
|
||||
case MLX5_CAP_INLINE_MODE_L2:
|
||||
esw_warn(dev, "Inline mode can't be set\n");
|
||||
return -EOPNOTSUPP;
|
||||
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
||||
break;
|
||||
}
|
||||
|
||||
if (esw->offloads.num_flows > 0) {
|
||||
esw_warn(dev, "Can't set inline mode when flows are configured\n");
|
||||
|
@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
|
|||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
|
||||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
|
||||
}
|
||||
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
||||
{
|
||||
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
int vport;
|
||||
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
|||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
|
||||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
|
||||
return -EOPNOTSUPP;
|
||||
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
|
||||
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
|
||||
mlx5_mode = MLX5_INLINE_MODE_NONE;
|
||||
goto out;
|
||||
case MLX5_CAP_INLINE_MODE_L2:
|
||||
mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
goto out;
|
||||
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
||||
goto query_vports;
|
||||
}
|
||||
|
||||
query_vports:
|
||||
for (vport = 1; vport <= nvfs; vport++) {
|
||||
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
|
||||
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
|
||||
|
@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
|||
prev_mlx5_mode = mlx5_mode;
|
||||
}
|
||||
|
||||
out:
|
||||
*mode = mlx5_mode;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
if (err) {
|
||||
dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
|
||||
FW_INIT_TIMEOUT_MILI);
|
||||
goto out_err;
|
||||
goto err_cmd_cleanup;
|
||||
}
|
||||
|
||||
err = mlx5_core_enable_hca(dev, 0);
|
||||
|
|
|
@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref)
|
|||
struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
|
||||
|
||||
list_del(&up->list);
|
||||
iounmap(up->map);
|
||||
if (mlx5_cmd_free_uar(up->mdev, up->index))
|
||||
mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
|
||||
kfree(up->reg_bitmap);
|
||||
|
|
|
@ -64,11 +64,11 @@
|
|||
((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
|
||||
|
||||
static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
|
||||
{DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
|
||||
{DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
|
||||
{DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
|
||||
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
|
||||
{DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
|
||||
{DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI},
|
||||
{DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE},
|
||||
{DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE},
|
||||
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE},
|
||||
{DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH},
|
||||
};
|
||||
|
||||
static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
|
||||
|
|
|
@ -1516,11 +1516,12 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
|
||||
priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
|
||||
|
||||
if (skb_put_padto(skb, ETH_ZLEN))
|
||||
goto drop;
|
||||
goto exit;
|
||||
|
||||
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
|
||||
priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
|
||||
|
||||
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
|
||||
entry / NUM_TX_DESC * DPTR_ALIGN;
|
||||
|
|
|
@ -74,7 +74,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
|
|||
#define EFX_RXQ_MIN_ENT 128U
|
||||
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
|
||||
|
||||
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
|
||||
/* All EF10 architecture NICs steal one bit of the DMAQ size for various
|
||||
* other purposes when counting TxQ entries, so we halve the queue size.
|
||||
*/
|
||||
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
|
||||
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
|
||||
|
||||
static inline bool efx_rss_enabled(struct efx_nic *efx)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
|
||||
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
|
||||
#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
#define EFX_WORKAROUND_10G(efx) 1
|
||||
|
||||
/* Bit-bashed I2C reads cause performance drop */
|
||||
|
|
|
@ -1017,8 +1017,8 @@ tc35815_free_queues(struct net_device *dev)
|
|||
BUG_ON(lp->tx_skbs[i].skb != skb);
|
||||
#endif
|
||||
if (skb) {
|
||||
dev_kfree_skb(skb);
|
||||
pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
lp->tx_skbs[i].skb = NULL;
|
||||
lp->tx_skbs[i].skb_dma = 0;
|
||||
}
|
||||
|
|
|
@ -751,7 +751,6 @@ struct netvsc_device {
|
|||
u32 send_section_cnt;
|
||||
u32 send_section_size;
|
||||
unsigned long *send_section_map;
|
||||
int map_words;
|
||||
|
||||
/* Used for NetVSP initialization protocol */
|
||||
struct completion channel_init_wait;
|
||||
|
|
|
@ -236,6 +236,7 @@ static int netvsc_init_buf(struct hv_device *device)
|
|||
struct netvsc_device *net_device;
|
||||
struct nvsp_message *init_packet;
|
||||
struct net_device *ndev;
|
||||
size_t map_words;
|
||||
int node;
|
||||
|
||||
net_device = get_outbound_net_device(device);
|
||||
|
@ -401,11 +402,9 @@ static int netvsc_init_buf(struct hv_device *device)
|
|||
net_device->send_section_size, net_device->send_section_cnt);
|
||||
|
||||
/* Setup state for managing the send buffer. */
|
||||
net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
|
||||
BITS_PER_LONG);
|
||||
map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
|
||||
|
||||
net_device->send_section_map = kcalloc(net_device->map_words,
|
||||
sizeof(ulong), GFP_KERNEL);
|
||||
net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
|
||||
if (net_device->send_section_map == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
|
@ -683,7 +682,7 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
|
|||
unsigned long *map_addr = net_device->send_section_map;
|
||||
unsigned int i;
|
||||
|
||||
for_each_clear_bit(i, map_addr, net_device->map_words) {
|
||||
for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
|
||||
if (sync_test_and_set_bit(i, map_addr) == 0)
|
||||
return i;
|
||||
}
|
||||
|
|
|
@ -617,7 +617,8 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
|
|||
|
||||
static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
|
||||
unsigned char **iv,
|
||||
struct scatterlist **sg)
|
||||
struct scatterlist **sg,
|
||||
int num_frags)
|
||||
{
|
||||
size_t size, iv_offset, sg_offset;
|
||||
struct aead_request *req;
|
||||
|
@ -629,7 +630,7 @@ static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
|
|||
|
||||
size = ALIGN(size, __alignof__(struct scatterlist));
|
||||
sg_offset = size;
|
||||
size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
|
||||
size += sizeof(struct scatterlist) * num_frags;
|
||||
|
||||
tmp = kmalloc(size, GFP_ATOMIC);
|
||||
if (!tmp)
|
||||
|
@ -649,6 +650,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
|
|||
{
|
||||
int ret;
|
||||
struct scatterlist *sg;
|
||||
struct sk_buff *trailer;
|
||||
unsigned char *iv;
|
||||
struct ethhdr *eth;
|
||||
struct macsec_eth_header *hh;
|
||||
|
@ -723,7 +725,14 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
|
||||
ret = skb_cow_data(skb, 0, &trailer);
|
||||
if (unlikely(ret < 0)) {
|
||||
macsec_txsa_put(tx_sa);
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
|
||||
if (!req) {
|
||||
macsec_txsa_put(tx_sa);
|
||||
kfree_skb(skb);
|
||||
|
@ -732,7 +741,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
|
|||
|
||||
macsec_fill_iv(iv, secy->sci, pn);
|
||||
|
||||
sg_init_table(sg, MAX_SKB_FRAGS + 1);
|
||||
sg_init_table(sg, ret);
|
||||
skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
|
||||
if (tx_sc->encrypt) {
|
||||
|
@ -917,6 +926,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
|||
{
|
||||
int ret;
|
||||
struct scatterlist *sg;
|
||||
struct sk_buff *trailer;
|
||||
unsigned char *iv;
|
||||
struct aead_request *req;
|
||||
struct macsec_eth_header *hdr;
|
||||
|
@ -927,7 +937,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
|||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
|
||||
ret = skb_cow_data(skb, 0, &trailer);
|
||||
if (unlikely(ret < 0)) {
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
|
||||
if (!req) {
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -936,7 +951,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
|||
hdr = (struct macsec_eth_header *)skb->data;
|
||||
macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
|
||||
|
||||
sg_init_table(sg, MAX_SKB_FRAGS + 1);
|
||||
sg_init_table(sg, ret);
|
||||
skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
|
||||
if (hdr->tci_an & MACSEC_TCI_E) {
|
||||
|
|
|
@ -1139,6 +1139,7 @@ static int macvlan_port_create(struct net_device *dev)
|
|||
static void macvlan_port_destroy(struct net_device *dev)
|
||||
{
|
||||
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
|
||||
struct sk_buff *skb;
|
||||
|
||||
dev->priv_flags &= ~IFF_MACVLAN_PORT;
|
||||
netdev_rx_handler_unregister(dev);
|
||||
|
@ -1147,7 +1148,15 @@ static void macvlan_port_destroy(struct net_device *dev)
|
|||
* but we need to cancel it and purge left skbs if any.
|
||||
*/
|
||||
cancel_work_sync(&port->bc_work);
|
||||
__skb_queue_purge(&port->bc_queue);
|
||||
|
||||
while ((skb = __skb_dequeue(&port->bc_queue))) {
|
||||
const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
|
||||
|
||||
if (src)
|
||||
dev_put(src->dev);
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
kfree(port);
|
||||
}
|
||||
|
|
|
@ -297,17 +297,6 @@ static int kszphy_config_init(struct phy_device *phydev)
|
|||
if (priv->led_mode >= 0)
|
||||
kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
|
||||
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
int ctl = phy_read(phydev, MII_BMCR);
|
||||
|
||||
if (ctl < 0)
|
||||
return ctl;
|
||||
|
||||
ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -591,16 +591,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
|
|||
EXPORT_SYMBOL(phy_mii_ioctl);
|
||||
|
||||
/**
|
||||
* phy_start_aneg - start auto-negotiation for this PHY device
|
||||
* phy_start_aneg_priv - start auto-negotiation for this PHY device
|
||||
* @phydev: the phy_device struct
|
||||
* @sync: indicate whether we should wait for the workqueue cancelation
|
||||
*
|
||||
* Description: Sanitizes the settings (if we're not autonegotiating
|
||||
* them), and then calls the driver's config_aneg function.
|
||||
* If the PHYCONTROL Layer is operating, we change the state to
|
||||
* reflect the beginning of Auto-negotiation or forcing.
|
||||
*/
|
||||
int phy_start_aneg(struct phy_device *phydev)
|
||||
static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
|
||||
{
|
||||
bool trigger = 0;
|
||||
int err;
|
||||
|
||||
if (!phydev->drv)
|
||||
|
@ -628,10 +630,40 @@ int phy_start_aneg(struct phy_device *phydev)
|
|||
}
|
||||
}
|
||||
|
||||
/* Re-schedule a PHY state machine to check PHY status because
|
||||
* negotiation may already be done and aneg interrupt may not be
|
||||
* generated.
|
||||
*/
|
||||
if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
|
||||
err = phy_aneg_done(phydev);
|
||||
if (err > 0) {
|
||||
trigger = true;
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
if (trigger)
|
||||
phy_trigger_machine(phydev, sync);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_start_aneg - start auto-negotiation for this PHY device
|
||||
* @phydev: the phy_device struct
|
||||
*
|
||||
* Description: Sanitizes the settings (if we're not autonegotiating
|
||||
* them), and then calls the driver's config_aneg function.
|
||||
* If the PHYCONTROL Layer is operating, we change the state to
|
||||
* reflect the beginning of Auto-negotiation or forcing.
|
||||
*/
|
||||
int phy_start_aneg(struct phy_device *phydev)
|
||||
{
|
||||
return phy_start_aneg_priv(phydev, true);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_start_aneg);
|
||||
|
||||
/**
|
||||
|
@ -659,7 +691,7 @@ void phy_start_machine(struct phy_device *phydev)
|
|||
* state machine runs.
|
||||
*/
|
||||
|
||||
static void phy_trigger_machine(struct phy_device *phydev, bool sync)
|
||||
void phy_trigger_machine(struct phy_device *phydev, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
|
@ -1154,7 +1186,7 @@ void phy_state_machine(struct work_struct *work)
|
|||
mutex_unlock(&phydev->lock);
|
||||
|
||||
if (needs_aneg)
|
||||
err = phy_start_aneg(phydev);
|
||||
err = phy_start_aneg_priv(phydev, false);
|
||||
else if (do_suspend)
|
||||
phy_suspend(phydev);
|
||||
|
||||
|
|
|
@ -2361,8 +2361,10 @@ start_again:
|
|||
|
||||
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
|
||||
TEAM_CMD_OPTIONS_GET);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
|
||||
goto nla_put_failure;
|
||||
|
@ -2634,8 +2636,10 @@ start_again:
|
|||
|
||||
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
|
||||
TEAM_CMD_PORT_LIST_GET);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
|
||||
goto nla_put_failure;
|
||||
|
|
|
@ -369,7 +369,7 @@ config USB_NET_NET1080
|
|||
optionally with LEDs that indicate traffic
|
||||
|
||||
config USB_NET_PLUSB
|
||||
tristate "Prolific PL-2301/2302/25A1 based cables"
|
||||
tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
|
||||
# if the handshake/init/reset problems, from original 'plusb',
|
||||
# are ever resolved ... then remove "experimental"
|
||||
depends on USB_USBNET
|
||||
|
|
|
@ -3279,9 +3279,9 @@ static void __exit hso_exit(void)
|
|||
pr_info("unloaded\n");
|
||||
|
||||
tty_unregister_driver(tty_drv);
|
||||
put_tty_driver(tty_drv);
|
||||
/* deregister the usb driver */
|
||||
usb_deregister(&hso_driver);
|
||||
put_tty_driver(tty_drv);
|
||||
}
|
||||
|
||||
/* Module definitions */
|
||||
|
|
|
@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
|
|||
}
|
||||
|
||||
static const struct driver_info prolific_info = {
|
||||
.description = "Prolific PL-2301/PL-2302/PL-25A1",
|
||||
.description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
|
||||
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
|
||||
/* some PL-2302 versions seem to fail usb_set_interface() */
|
||||
.reset = pl_reset,
|
||||
|
@ -139,6 +139,17 @@ static const struct usb_device_id products [] = {
|
|||
* Host-to-Host Cable
|
||||
*/
|
||||
.driver_info = (unsigned long) &prolific_info,
|
||||
|
||||
},
|
||||
|
||||
/* super speed cables */
|
||||
{
|
||||
USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom
|
||||
* also: goobay Active USB 3.0
|
||||
* Data Link,
|
||||
* Unitek Y-3501
|
||||
*/
|
||||
.driver_info = (unsigned long) &prolific_info,
|
||||
},
|
||||
|
||||
{ }, // END
|
||||
|
@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
|
|||
module_usb_driver(plusb_driver);
|
||||
|
||||
MODULE_AUTHOR("David Brownell");
|
||||
MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
|
||||
MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -852,6 +852,7 @@ void phy_change_work(struct work_struct *work);
|
|||
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
|
||||
void phy_start_machine(struct phy_device *phydev);
|
||||
void phy_stop_machine(struct phy_device *phydev);
|
||||
void phy_trigger_machine(struct phy_device *phydev, bool sync);
|
||||
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
|
||||
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
|
||||
int phy_ethtool_ksettings_get(struct phy_device *phydev,
|
||||
|
|
|
@ -123,6 +123,7 @@ static void br_dev_uninit(struct net_device *dev)
|
|||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
||||
br_multicast_dev_del(br);
|
||||
br_multicast_uninit_stats(br);
|
||||
br_vlan_flush(br);
|
||||
free_percpu(br->stats);
|
||||
|
|
|
@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
|
|||
|
||||
br_fdb_delete_by_port(br, NULL, 0, 1);
|
||||
|
||||
br_multicast_dev_del(br);
|
||||
cancel_delayed_work_sync(&br->gc_work);
|
||||
|
||||
br_sysfs_delbr(br->dev);
|
||||
|
|
|
@ -2450,6 +2450,9 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(!skb))
|
||||
return;
|
||||
|
||||
if (likely(atomic_read(&skb->users) == 1)) {
|
||||
smp_rmb();
|
||||
atomic_set(&skb->users, 0);
|
||||
|
|
|
@ -2359,7 +2359,8 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
|
|||
}
|
||||
|
||||
/* L3 master device is the loopback for that domain */
|
||||
dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev;
|
||||
dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? :
|
||||
net->loopback_dev;
|
||||
fl4->flowi4_oif = dev_out->ifindex;
|
||||
flags |= RTCF_LOCAL;
|
||||
goto make_route;
|
||||
|
|
|
@ -168,12 +168,8 @@ void tcp_assign_congestion_control(struct sock *sk)
|
|||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
|
||||
|
||||
/* Clear out private data before diag gets it and
|
||||
* the ca has not been initialized.
|
||||
*/
|
||||
if (ca->get_info)
|
||||
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
|
||||
if (ca->flags & TCP_CONG_NEEDS_ECN)
|
||||
INET_ECN_xmit(sk);
|
||||
else
|
||||
|
@ -200,11 +196,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
|
|||
tcp_cleanup_congestion_control(sk);
|
||||
icsk->icsk_ca_ops = ca;
|
||||
icsk->icsk_ca_setsockopt = 1;
|
||||
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
|
||||
|
||||
if (sk->sk_state != TCP_CLOSE) {
|
||||
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
|
||||
if (sk->sk_state != TCP_CLOSE)
|
||||
tcp_init_congestion_control(sk);
|
||||
}
|
||||
}
|
||||
|
||||
/* Manage refcounts on socket close. */
|
||||
|
|
|
@ -29,6 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
|
|||
u16 mac_len = skb->mac_len;
|
||||
int udp_offset, outer_hlen;
|
||||
__wsum partial;
|
||||
bool need_ipsec;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
|
||||
goto out;
|
||||
|
@ -62,8 +63,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
|
|||
|
||||
ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
|
||||
|
||||
need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
|
||||
/* Try to offload checksum if possible */
|
||||
offload_csum = !!(need_csum &&
|
||||
!need_ipsec &&
|
||||
(skb->dev->features &
|
||||
(is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
|
||||
(NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
|
||||
|
|
|
@ -3271,14 +3271,24 @@ static void addrconf_gre_config(struct net_device *dev)
|
|||
static int fixup_permanent_addr(struct inet6_dev *idev,
|
||||
struct inet6_ifaddr *ifp)
|
||||
{
|
||||
if (!ifp->rt) {
|
||||
struct rt6_info *rt;
|
||||
/* rt6i_ref == 0 means the host route was removed from the
|
||||
* FIB, for example, if 'lo' device is taken down. In that
|
||||
* case regenerate the host route.
|
||||
*/
|
||||
if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
|
||||
struct rt6_info *rt, *prev;
|
||||
|
||||
rt = addrconf_dst_alloc(idev, &ifp->addr, false);
|
||||
if (unlikely(IS_ERR(rt)))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
/* ifp->rt can be accessed outside of rtnl */
|
||||
spin_lock(&ifp->lock);
|
||||
prev = ifp->rt;
|
||||
ifp->rt = rt;
|
||||
spin_unlock(&ifp->lock);
|
||||
|
||||
ip6_rt_put(prev);
|
||||
}
|
||||
|
||||
if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
|
||||
|
|
|
@ -933,8 +933,6 @@ static int __init inet6_init(void)
|
|||
if (err)
|
||||
goto igmp_fail;
|
||||
|
||||
ipv6_stub = &ipv6_stub_impl;
|
||||
|
||||
err = ipv6_netfilter_init();
|
||||
if (err)
|
||||
goto netfilter_fail;
|
||||
|
@ -1010,6 +1008,10 @@ static int __init inet6_init(void)
|
|||
if (err)
|
||||
goto sysctl_fail;
|
||||
#endif
|
||||
|
||||
/* ensure that ipv6 stubs are visible only after ipv6 is ready */
|
||||
wmb();
|
||||
ipv6_stub = &ipv6_stub_impl;
|
||||
out:
|
||||
return err;
|
||||
|
||||
|
|
|
@ -909,6 +909,8 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
|
|||
{
|
||||
switch (opt->type) {
|
||||
case IPV6_SRCRT_TYPE_0:
|
||||
case IPV6_SRCRT_STRICT:
|
||||
case IPV6_SRCRT_TYPE_2:
|
||||
ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
|
||||
break;
|
||||
case IPV6_SRCRT_TYPE_4:
|
||||
|
@ -1163,6 +1165,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
|
|||
|
||||
switch (opt->srcrt->type) {
|
||||
case IPV6_SRCRT_TYPE_0:
|
||||
case IPV6_SRCRT_STRICT:
|
||||
case IPV6_SRCRT_TYPE_2:
|
||||
fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
|
||||
break;
|
||||
case IPV6_SRCRT_TYPE_4:
|
||||
|
|
|
@ -1037,7 +1037,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
|
|||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct net *net = t->net;
|
||||
struct net_device_stats *stats = &t->dev->stats;
|
||||
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
||||
struct ipv6hdr *ipv6h;
|
||||
struct ipv6_tel_txoption opt;
|
||||
struct dst_entry *dst = NULL, *ndst = NULL;
|
||||
struct net_device *tdev;
|
||||
|
@ -1057,26 +1057,28 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
|
|||
|
||||
/* NBMA tunnel */
|
||||
if (ipv6_addr_any(&t->parms.raddr)) {
|
||||
struct in6_addr *addr6;
|
||||
struct neighbour *neigh;
|
||||
int addr_type;
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
struct in6_addr *addr6;
|
||||
struct neighbour *neigh;
|
||||
int addr_type;
|
||||
|
||||
if (!skb_dst(skb))
|
||||
goto tx_err_link_failure;
|
||||
if (!skb_dst(skb))
|
||||
goto tx_err_link_failure;
|
||||
|
||||
neigh = dst_neigh_lookup(skb_dst(skb),
|
||||
&ipv6_hdr(skb)->daddr);
|
||||
if (!neigh)
|
||||
goto tx_err_link_failure;
|
||||
neigh = dst_neigh_lookup(skb_dst(skb),
|
||||
&ipv6_hdr(skb)->daddr);
|
||||
if (!neigh)
|
||||
goto tx_err_link_failure;
|
||||
|
||||
addr6 = (struct in6_addr *)&neigh->primary_key;
|
||||
addr_type = ipv6_addr_type(addr6);
|
||||
addr6 = (struct in6_addr *)&neigh->primary_key;
|
||||
addr_type = ipv6_addr_type(addr6);
|
||||
|
||||
if (addr_type == IPV6_ADDR_ANY)
|
||||
addr6 = &ipv6_hdr(skb)->daddr;
|
||||
if (addr_type == IPV6_ADDR_ANY)
|
||||
addr6 = &ipv6_hdr(skb)->daddr;
|
||||
|
||||
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
|
||||
neigh_release(neigh);
|
||||
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
|
||||
neigh_release(neigh);
|
||||
}
|
||||
} else if (!(t->parms.flags &
|
||||
(IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
|
||||
/* enable the cache only only if the routing decision does
|
||||
|
|
|
@ -1749,7 +1749,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
|
|||
idev = in6_dev_get(dev);
|
||||
if (!idev)
|
||||
break;
|
||||
if (idev->cnf.ndisc_notify)
|
||||
if (idev->cnf.ndisc_notify ||
|
||||
net->ipv6.devconf_all->ndisc_notify)
|
||||
ndisc_send_unsol_na(dev);
|
||||
in6_dev_put(idev);
|
||||
break;
|
||||
|
|
|
@ -1178,8 +1178,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
if (skb)
|
||||
amount = skb_tail_pointer(skb) -
|
||||
skb_transport_header(skb);
|
||||
amount = skb->len;
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
return put_user(amount, (int __user *)arg);
|
||||
}
|
||||
|
|
|
@ -3836,6 +3836,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
|||
case PACKET_HDRLEN:
|
||||
if (len > sizeof(int))
|
||||
len = sizeof(int);
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&val, optval, len))
|
||||
return -EFAULT;
|
||||
switch (val) {
|
||||
|
|
|
@ -1083,7 +1083,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
|
|||
}
|
||||
} while (sent < dlen && !rc);
|
||||
|
||||
return rc ? rc : sent;
|
||||
return sent ? sent : rc;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1484,7 +1484,7 @@ restart:
|
|||
if (unlikely(flags & MSG_PEEK))
|
||||
goto exit;
|
||||
|
||||
tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
|
||||
tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg));
|
||||
if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
|
||||
tipc_sk_send_ack(tsk);
|
||||
tsk_advance_rx_queue(sk);
|
||||
|
|
Loading…
Reference in New Issue