Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) In ip_gre tunnel, handle the conflict between TUNNEL_{SEQ,CSUM} and GSO/LLTX properly. From Sabrina Dubroca. 2) Stop properly on error in lan78xx_read_otp(), from Phil Elwell. 3) Don't uncompress in slip before rstate is initialized, from Tejaswi Tanikella. 4) When using 1.x firmware on aquantia, issue a deinit before we hardware reset the chip, otherwise we break dirty wake WOL. From Igor Russkikh. 5) Correct log check in vhost_vq_access_ok(), from Stefan Hajnoczi. 6) Fix ethtool -x crashes in bnxt_en, from Michael Chan. 7) Fix races in l2tp tunnel creation and duplicate tunnel detection, from Guillaume Nault. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (22 commits) l2tp: fix race in duplicate tunnel detection l2tp: fix races in tunnel creation tun: send netlink notification when the device is modified tun: set the flags before registering the netdevice lan78xx: Don't reset the interface on open bnxt_en: Fix NULL pointer dereference at bnxt_free_irq(). bnxt_en: Need to include RDMA rings in bnxt_check_rings(). bnxt_en: Support max-mtu with VF-reps bnxt_en: Ignore src port field in decap filter nodes bnxt_en: do not allow wildcard matches for L2 flows bnxt_en: Fix ethtool -x crash when device is down. vhost: return bool from *_access_ok() functions vhost: fix vhost_vq_access_ok() log check vhost: Fix vhost_copy_to_user() net: aquantia: oops when shutdown on already stopped device net: aquantia: Regression on reset with 1.x firmware cdc_ether: flag the Cinterion AHS8 modem by gemalto as WWAN slip: Check if rstate is initialized before uncompressing lan78xx: Avoid spurious kevent 4 "error" lan78xx: Correctly indicate invalid OTP ...
This commit is contained in:
commit
5d1365940a
|
@ -951,9 +951,11 @@ void aq_nic_shutdown(struct aq_nic_s *self)
|
|||
|
||||
netif_device_detach(self->ndev);
|
||||
|
||||
err = aq_nic_stop(self);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
if (netif_running(self->ndev)) {
|
||||
err = aq_nic_stop(self);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
}
|
||||
aq_nic_deinit(self);
|
||||
|
||||
err_exit:
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
#define FORCE_FLASHLESS 0
|
||||
|
||||
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
|
||||
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
|
||||
enum hal_atl_utils_fw_state_e state);
|
||||
|
||||
int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
|
||||
{
|
||||
|
@ -247,6 +249,20 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
|
|||
|
||||
self->rbl_enabled = (boot_exit_code != 0);
|
||||
|
||||
/* FW 1.x may bootup in an invalid POWER state (WOL feature).
|
||||
* We should work around this by forcing its state back to DEINIT
|
||||
*/
|
||||
if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
|
||||
aq_hw_read_reg(self,
|
||||
HW_ATL_MPI_FW_VERSION))) {
|
||||
int err = 0;
|
||||
|
||||
hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
|
||||
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
|
||||
HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
|
||||
10, 1000U);
|
||||
}
|
||||
|
||||
if (self->rbl_enabled)
|
||||
return hw_atl_utils_soft_reset_rbl(self);
|
||||
else
|
||||
|
|
|
@ -6090,7 +6090,7 @@ static void bnxt_free_irq(struct bnxt *bp)
|
|||
free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
|
||||
bp->dev->rx_cpu_rmap = NULL;
|
||||
#endif
|
||||
if (!bp->irq_tbl)
|
||||
if (!bp->irq_tbl || !bp->bnapi)
|
||||
return;
|
||||
|
||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
||||
|
@ -7686,6 +7686,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
|
|||
if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
||||
rx_rings <<= 1;
|
||||
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
|
||||
if (bp->flags & BNXT_FLAG_NEW_RM)
|
||||
cp += bnxt_get_ulp_msix_num(bp);
|
||||
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
|
||||
vnics);
|
||||
}
|
||||
|
|
|
@ -870,17 +870,22 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
|
|||
u8 *hfunc)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
|
||||
struct bnxt_vnic_info *vnic;
|
||||
int i = 0;
|
||||
|
||||
if (hfunc)
|
||||
*hfunc = ETH_RSS_HASH_TOP;
|
||||
|
||||
if (indir)
|
||||
if (!bp->vnic_info)
|
||||
return 0;
|
||||
|
||||
vnic = &bp->vnic_info[0];
|
||||
if (indir && vnic->rss_table) {
|
||||
for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
|
||||
indir[i] = le16_to_cpu(vnic->rss_table[i]);
|
||||
}
|
||||
|
||||
if (key)
|
||||
if (key && vnic->rss_hash_key)
|
||||
memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -377,6 +377,30 @@ static bool is_wildcard(void *mask, int len)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool is_exactmatch(void *mask, int len)
|
||||
{
|
||||
const u8 *p = mask;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
if (p[i] != 0xff)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool bits_set(void *key, int len)
|
||||
{
|
||||
const u8 *p = key;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
if (p[i] != 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
|
||||
__le16 ref_flow_handle,
|
||||
__le32 tunnel_handle, __le16 *flow_handle)
|
||||
|
@ -764,6 +788,41 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Currently source/dest MAC cannot be partial wildcard */
|
||||
if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
|
||||
!is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
|
||||
netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
|
||||
return false;
|
||||
}
|
||||
if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
|
||||
!is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
|
||||
netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Currently VLAN fields cannot be partial wildcard */
|
||||
if (bits_set(&flow->l2_key.inner_vlan_tci,
|
||||
sizeof(flow->l2_key.inner_vlan_tci)) &&
|
||||
!is_exactmatch(&flow->l2_mask.inner_vlan_tci,
|
||||
sizeof(flow->l2_mask.inner_vlan_tci))) {
|
||||
netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
|
||||
return false;
|
||||
}
|
||||
if (bits_set(&flow->l2_key.inner_vlan_tpid,
|
||||
sizeof(flow->l2_key.inner_vlan_tpid)) &&
|
||||
!is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
|
||||
sizeof(flow->l2_mask.inner_vlan_tpid))) {
|
||||
netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Currently Ethertype must be set */
|
||||
if (!is_exactmatch(&flow->l2_mask.ether_type,
|
||||
sizeof(flow->l2_mask.ether_type))) {
|
||||
netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -992,8 +1051,10 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
|
|||
|
||||
/* Check if there's another flow using the same tunnel decap.
|
||||
* If not, add this tunnel to the table and resolve the other
|
||||
* tunnel header fileds
|
||||
* tunnel header fileds. Ignore src_port in the tunnel_key,
|
||||
* since it is not required for decap filters.
|
||||
*/
|
||||
decap_key->tp_src = 0;
|
||||
decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
|
||||
&tc_info->decap_ht_params,
|
||||
decap_key);
|
||||
|
|
|
@ -64,6 +64,31 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
|
||||
u16 *max_mtu)
|
||||
{
|
||||
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
struct hwrm_func_qcfg_input req = {0};
|
||||
u16 mtu;
|
||||
int rc;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
|
||||
req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
|
||||
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (!rc) {
|
||||
mtu = le16_to_cpu(resp->max_mtu_configured);
|
||||
if (!mtu)
|
||||
*max_mtu = BNXT_MAX_MTU;
|
||||
else
|
||||
*max_mtu = mtu;
|
||||
}
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_vf_rep_open(struct net_device *dev)
|
||||
{
|
||||
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
|
||||
|
@ -365,6 +390,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
|
|||
struct net_device *dev)
|
||||
{
|
||||
struct net_device *pf_dev = bp->dev;
|
||||
u16 max_mtu;
|
||||
|
||||
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
|
||||
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
|
||||
|
@ -380,6 +406,10 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
|
|||
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
|
||||
dev->perm_addr);
|
||||
ether_addr_copy(dev->dev_addr, dev->perm_addr);
|
||||
/* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
|
||||
if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
|
||||
dev->max_mtu = max_mtu;
|
||||
dev->min_mtu = ETH_ZLEN;
|
||||
}
|
||||
|
||||
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
|
||||
|
|
|
@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
|
|||
if(x < 0 || x > comp->rslot_limit)
|
||||
goto bad;
|
||||
|
||||
/* Check if the cstate is initialized */
|
||||
if (!comp->rstate[x].initialized)
|
||||
goto bad;
|
||||
|
||||
comp->flags &=~ SLF_TOSS;
|
||||
comp->recv_current = x;
|
||||
} else {
|
||||
|
@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
|
|||
if (cs->cs_tcp.doff > 5)
|
||||
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
|
||||
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
|
||||
cs->initialized = true;
|
||||
/* Put headers back on packet
|
||||
* Neither header checksum is recalculated
|
||||
*/
|
||||
|
|
|
@ -743,8 +743,15 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
|||
|
||||
static void tun_detach(struct tun_file *tfile, bool clean)
|
||||
{
|
||||
struct tun_struct *tun;
|
||||
struct net_device *dev;
|
||||
|
||||
rtnl_lock();
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
dev = tun ? tun->dev : NULL;
|
||||
__tun_detach(tfile, clean);
|
||||
if (dev)
|
||||
netdev_state_change(dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -2562,10 +2569,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
/* One or more queue has already been attached, no need
|
||||
* to initialize the device again.
|
||||
*/
|
||||
netdev_state_change(dev);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
||||
tun->flags = (tun->flags & ~TUN_FEATURES) |
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
netdev_state_change(dev);
|
||||
} else {
|
||||
char *name;
|
||||
unsigned long flags = 0;
|
||||
int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
|
||||
|
@ -2642,6 +2654,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
tun->flags = (tun->flags & ~TUN_FEATURES) |
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
|
||||
if (err < 0)
|
||||
|
@ -2656,9 +2671,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
|
||||
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
|
||||
|
||||
tun->flags = (tun->flags & ~TUN_FEATURES) |
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
/* Make sure persistent devices do not get stuck in
|
||||
* xoff state.
|
||||
*/
|
||||
|
@ -2805,6 +2817,9 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
|
|||
} else
|
||||
ret = -EINVAL;
|
||||
|
||||
if (ret >= 0)
|
||||
netdev_state_change(tun->dev);
|
||||
|
||||
unlock:
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
|
@ -2845,6 +2860,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
|||
unsigned int ifindex;
|
||||
int le;
|
||||
int ret;
|
||||
bool do_notify = false;
|
||||
|
||||
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
|
||||
(_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
|
||||
|
@ -2941,10 +2957,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
|||
if (arg && !(tun->flags & IFF_PERSIST)) {
|
||||
tun->flags |= IFF_PERSIST;
|
||||
__module_get(THIS_MODULE);
|
||||
do_notify = true;
|
||||
}
|
||||
if (!arg && (tun->flags & IFF_PERSIST)) {
|
||||
tun->flags &= ~IFF_PERSIST;
|
||||
module_put(THIS_MODULE);
|
||||
do_notify = true;
|
||||
}
|
||||
|
||||
tun_debug(KERN_INFO, tun, "persist %s\n",
|
||||
|
@ -2959,6 +2977,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
|||
break;
|
||||
}
|
||||
tun->owner = owner;
|
||||
do_notify = true;
|
||||
tun_debug(KERN_INFO, tun, "owner set to %u\n",
|
||||
from_kuid(&init_user_ns, tun->owner));
|
||||
break;
|
||||
|
@ -2971,6 +2990,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
|||
break;
|
||||
}
|
||||
tun->group = group;
|
||||
do_notify = true;
|
||||
tun_debug(KERN_INFO, tun, "group set to %u\n",
|
||||
from_kgid(&init_user_ns, tun->group));
|
||||
break;
|
||||
|
@ -3130,6 +3150,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
|||
break;
|
||||
}
|
||||
|
||||
if (do_notify)
|
||||
netdev_state_change(tun->dev);
|
||||
|
||||
unlock:
|
||||
rtnl_unlock();
|
||||
if (tun)
|
||||
|
|
|
@ -901,6 +901,12 @@ static const struct usb_device_id products[] = {
|
|||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* Cinterion AHS3 modem by GEMALTO */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
|
|
|
@ -928,7 +928,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
|
|||
offset += 0x100;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
ret = lan78xx_read_raw_otp(dev, offset, length, data);
|
||||
if (!ret)
|
||||
ret = lan78xx_read_raw_otp(dev, offset, length, data);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2502,7 +2503,7 @@ static void lan78xx_init_stats(struct lan78xx_net *dev)
|
|||
dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
|
||||
dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
|
||||
|
||||
lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
|
||||
set_bit(EVENT_STAT_UPDATE, &dev->flags);
|
||||
}
|
||||
|
||||
static int lan78xx_open(struct net_device *net)
|
||||
|
@ -2514,10 +2515,6 @@ static int lan78xx_open(struct net_device *net)
|
|||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = lan78xx_reset(dev);
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
phy_start(net->phydev);
|
||||
|
||||
netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
|
||||
|
|
|
@ -641,14 +641,14 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
|
||||
|
||||
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
|
||||
static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
|
||||
{
|
||||
u64 a = addr / VHOST_PAGE_SIZE / 8;
|
||||
|
||||
/* Make sure 64 bit math will not overflow. */
|
||||
if (a > ULONG_MAX - (unsigned long)log_base ||
|
||||
a + (unsigned long)log_base > ULONG_MAX)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return access_ok(VERIFY_WRITE, log_base + a,
|
||||
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
|
||||
|
@ -661,30 +661,30 @@ static bool vhost_overflow(u64 uaddr, u64 size)
|
|||
}
|
||||
|
||||
/* Caller should have vq mutex and device mutex. */
|
||||
static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
|
||||
int log_all)
|
||||
static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
|
||||
int log_all)
|
||||
{
|
||||
struct vhost_umem_node *node;
|
||||
|
||||
if (!umem)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
list_for_each_entry(node, &umem->umem_list, link) {
|
||||
unsigned long a = node->userspace_addr;
|
||||
|
||||
if (vhost_overflow(node->userspace_addr, node->size))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, (void __user *)a,
|
||||
node->size))
|
||||
return 0;
|
||||
return false;
|
||||
else if (log_all && !log_access_ok(log_base,
|
||||
node->start,
|
||||
node->size))
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
|
||||
|
@ -701,13 +701,13 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
|
|||
|
||||
/* Can we switch to this memory table? */
|
||||
/* Caller should have device mutex but not vq mutex */
|
||||
static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
|
||||
int log_all)
|
||||
static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
|
||||
int log_all)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < d->nvqs; ++i) {
|
||||
int ok;
|
||||
bool ok;
|
||||
bool log;
|
||||
|
||||
mutex_lock(&d->vqs[i]->mutex);
|
||||
|
@ -717,12 +717,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
|
|||
ok = vq_memory_access_ok(d->vqs[i]->log_base,
|
||||
umem, log);
|
||||
else
|
||||
ok = 1;
|
||||
ok = true;
|
||||
mutex_unlock(&d->vqs[i]->mutex);
|
||||
if (!ok)
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
|
||||
|
@ -744,7 +744,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
|
|||
struct iov_iter t;
|
||||
void __user *uaddr = vhost_vq_meta_fetch(vq,
|
||||
(u64)(uintptr_t)to, size,
|
||||
VHOST_ADDR_DESC);
|
||||
VHOST_ADDR_USED);
|
||||
|
||||
if (uaddr)
|
||||
return __copy_to_user(uaddr, from, size);
|
||||
|
@ -959,21 +959,21 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
|
|||
spin_unlock(&d->iotlb_lock);
|
||||
}
|
||||
|
||||
static int umem_access_ok(u64 uaddr, u64 size, int access)
|
||||
static bool umem_access_ok(u64 uaddr, u64 size, int access)
|
||||
{
|
||||
unsigned long a = uaddr;
|
||||
|
||||
/* Make sure 64 bit math will not overflow. */
|
||||
if (vhost_overflow(uaddr, size))
|
||||
return -EFAULT;
|
||||
return false;
|
||||
|
||||
if ((access & VHOST_ACCESS_RO) &&
|
||||
!access_ok(VERIFY_READ, (void __user *)a, size))
|
||||
return -EFAULT;
|
||||
return false;
|
||||
if ((access & VHOST_ACCESS_WO) &&
|
||||
!access_ok(VERIFY_WRITE, (void __user *)a, size))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
||||
|
@ -988,7 +988,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
|
|||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
|
||||
if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
@ -1135,10 +1135,10 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
|
||||
struct vring_desc __user *desc,
|
||||
struct vring_avail __user *avail,
|
||||
struct vring_used __user *used)
|
||||
static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
|
||||
struct vring_desc __user *desc,
|
||||
struct vring_avail __user *avail,
|
||||
struct vring_used __user *used)
|
||||
|
||||
{
|
||||
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
||||
|
@ -1161,8 +1161,8 @@ static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
|
|||
vq->meta_iotlb[type] = node;
|
||||
}
|
||||
|
||||
static int iotlb_access_ok(struct vhost_virtqueue *vq,
|
||||
int access, u64 addr, u64 len, int type)
|
||||
static bool iotlb_access_ok(struct vhost_virtqueue *vq,
|
||||
int access, u64 addr, u64 len, int type)
|
||||
{
|
||||
const struct vhost_umem_node *node;
|
||||
struct vhost_umem *umem = vq->iotlb;
|
||||
|
@ -1220,7 +1220,7 @@ EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
|
|||
|
||||
/* Can we log writes? */
|
||||
/* Caller should have device mutex but not vq mutex */
|
||||
int vhost_log_access_ok(struct vhost_dev *dev)
|
||||
bool vhost_log_access_ok(struct vhost_dev *dev)
|
||||
{
|
||||
return memory_access_ok(dev, dev->umem, 1);
|
||||
}
|
||||
|
@ -1228,8 +1228,8 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok);
|
|||
|
||||
/* Verify access for write logging. */
|
||||
/* Caller should have vq mutex and device mutex */
|
||||
static int vq_log_access_ok(struct vhost_virtqueue *vq,
|
||||
void __user *log_base)
|
||||
static bool vq_log_access_ok(struct vhost_virtqueue *vq,
|
||||
void __user *log_base)
|
||||
{
|
||||
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
||||
|
||||
|
@ -1242,12 +1242,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
|
|||
|
||||
/* Can we start vq? */
|
||||
/* Caller should have vq mutex and device mutex */
|
||||
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
|
||||
bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
|
||||
{
|
||||
int ret = vq_log_access_ok(vq, vq->log_base);
|
||||
if (!vq_log_access_ok(vq, vq->log_base))
|
||||
return false;
|
||||
|
||||
if (ret || vq->iotlb)
|
||||
return ret;
|
||||
/* Access validation occurs at prefetch time with IOTLB */
|
||||
if (vq->iotlb)
|
||||
return true;
|
||||
|
||||
return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
|
||||
}
|
||||
|
|
|
@ -178,8 +178,8 @@ void vhost_dev_cleanup(struct vhost_dev *);
|
|||
void vhost_dev_stop(struct vhost_dev *);
|
||||
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
|
||||
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
|
||||
int vhost_vq_access_ok(struct vhost_virtqueue *vq);
|
||||
int vhost_log_access_ok(struct vhost_dev *);
|
||||
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
|
||||
bool vhost_log_access_ok(struct vhost_dev *);
|
||||
|
||||
int vhost_get_vq_desc(struct vhost_virtqueue *,
|
||||
struct iovec iov[], unsigned int iov_count,
|
||||
|
|
|
@ -127,6 +127,7 @@ typedef __u32 int32;
|
|||
*/
|
||||
struct cstate {
|
||||
byte_t cs_this; /* connection id number (xmit) */
|
||||
bool initialized; /* true if initialized */
|
||||
struct cstate *next; /* next in ring (xmit) */
|
||||
struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
|
||||
struct tcphdr cs_tcp;
|
||||
|
|
|
@ -781,8 +781,14 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
|
|||
tunnel->encap.type == TUNNEL_ENCAP_NONE) {
|
||||
dev->features |= NETIF_F_GSO_SOFTWARE;
|
||||
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
|
||||
} else {
|
||||
dev->features &= ~NETIF_F_GSO_SOFTWARE;
|
||||
dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
|
||||
}
|
||||
dev->features |= NETIF_F_LLTX;
|
||||
} else {
|
||||
dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
|
||||
dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -335,26 +335,6 @@ err_tlock:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_session_register);
|
||||
|
||||
/* Lookup a tunnel by id
|
||||
*/
|
||||
struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel;
|
||||
struct l2tp_net *pn = l2tp_pernet(net);
|
||||
|
||||
rcu_read_lock_bh();
|
||||
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
|
||||
if (tunnel->tunnel_id == tunnel_id) {
|
||||
rcu_read_unlock_bh();
|
||||
return tunnel;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
|
||||
|
||||
struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
|
||||
{
|
||||
struct l2tp_net *pn = l2tp_pernet(net);
|
||||
|
@ -1436,74 +1416,11 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
|||
{
|
||||
struct l2tp_tunnel *tunnel = NULL;
|
||||
int err;
|
||||
struct socket *sock = NULL;
|
||||
struct sock *sk = NULL;
|
||||
struct l2tp_net *pn;
|
||||
enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
|
||||
|
||||
/* Get the tunnel socket from the fd, which was opened by
|
||||
* the userspace L2TP daemon. If not specified, create a
|
||||
* kernel socket.
|
||||
*/
|
||||
if (fd < 0) {
|
||||
err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
|
||||
cfg, &sock);
|
||||
if (err < 0)
|
||||
goto err;
|
||||
} else {
|
||||
sock = sockfd_lookup(fd, &err);
|
||||
if (!sock) {
|
||||
pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
|
||||
tunnel_id, fd, err);
|
||||
err = -EBADF;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Reject namespace mismatches */
|
||||
if (!net_eq(sock_net(sock->sk), net)) {
|
||||
pr_err("tunl %u: netns mismatch\n", tunnel_id);
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
sk = sock->sk;
|
||||
|
||||
if (cfg != NULL)
|
||||
encap = cfg->encap;
|
||||
|
||||
/* Quick sanity checks */
|
||||
err = -EPROTONOSUPPORT;
|
||||
if (sk->sk_type != SOCK_DGRAM) {
|
||||
pr_debug("tunl %hu: fd %d wrong socket type\n",
|
||||
tunnel_id, fd);
|
||||
goto err;
|
||||
}
|
||||
switch (encap) {
|
||||
case L2TP_ENCAPTYPE_UDP:
|
||||
if (sk->sk_protocol != IPPROTO_UDP) {
|
||||
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
|
||||
tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
case L2TP_ENCAPTYPE_IP:
|
||||
if (sk->sk_protocol != IPPROTO_L2TP) {
|
||||
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
|
||||
tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check if this socket has already been prepped */
|
||||
tunnel = l2tp_tunnel(sk);
|
||||
if (tunnel != NULL) {
|
||||
/* This socket has already been prepped */
|
||||
err = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
|
||||
tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
|
||||
if (tunnel == NULL) {
|
||||
err = -ENOMEM;
|
||||
|
@ -1520,72 +1437,126 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
|
|||
rwlock_init(&tunnel->hlist_lock);
|
||||
tunnel->acpt_newsess = true;
|
||||
|
||||
/* The net we belong to */
|
||||
tunnel->l2tp_net = net;
|
||||
pn = l2tp_pernet(net);
|
||||
|
||||
if (cfg != NULL)
|
||||
tunnel->debug = cfg->debug;
|
||||
|
||||
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
|
||||
tunnel->encap = encap;
|
||||
if (encap == L2TP_ENCAPTYPE_UDP) {
|
||||
struct udp_tunnel_sock_cfg udp_cfg = { };
|
||||
|
||||
udp_cfg.sk_user_data = tunnel;
|
||||
udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
|
||||
udp_cfg.encap_rcv = l2tp_udp_encap_recv;
|
||||
udp_cfg.encap_destroy = l2tp_udp_encap_destroy;
|
||||
|
||||
setup_udp_tunnel_sock(net, sock, &udp_cfg);
|
||||
} else {
|
||||
sk->sk_user_data = tunnel;
|
||||
}
|
||||
|
||||
/* Bump the reference count. The tunnel context is deleted
|
||||
* only when this drops to zero. A reference is also held on
|
||||
* the tunnel socket to ensure that it is not released while
|
||||
* the tunnel is extant. Must be done before sk_destruct is
|
||||
* set.
|
||||
*/
|
||||
refcount_set(&tunnel->ref_count, 1);
|
||||
sock_hold(sk);
|
||||
tunnel->sock = sk;
|
||||
tunnel->fd = fd;
|
||||
|
||||
/* Hook on the tunnel socket destructor so that we can cleanup
|
||||
* if the tunnel socket goes away.
|
||||
*/
|
||||
tunnel->old_sk_destruct = sk->sk_destruct;
|
||||
sk->sk_destruct = &l2tp_tunnel_destruct;
|
||||
lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
|
||||
|
||||
sk->sk_allocation = GFP_ATOMIC;
|
||||
|
||||
/* Init delete workqueue struct */
|
||||
INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
|
||||
|
||||
/* Add tunnel to our list */
|
||||
INIT_LIST_HEAD(&tunnel->list);
|
||||
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
|
||||
err = 0;
|
||||
err:
|
||||
if (tunnelp)
|
||||
*tunnelp = tunnel;
|
||||
|
||||
/* If tunnel's socket was created by the kernel, it doesn't
|
||||
* have a file.
|
||||
*/
|
||||
if (sock && sock->file)
|
||||
sockfd_put(sock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
|
||||
|
||||
static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
|
||||
enum l2tp_encap_type encap)
|
||||
{
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
return -EINVAL;
|
||||
|
||||
if (sk->sk_type != SOCK_DGRAM)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
|
||||
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (sk->sk_user_data)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
|
||||
struct l2tp_tunnel_cfg *cfg)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel_walk;
|
||||
struct l2tp_net *pn;
|
||||
struct socket *sock;
|
||||
struct sock *sk;
|
||||
int ret;
|
||||
|
||||
if (tunnel->fd < 0) {
|
||||
ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
|
||||
tunnel->peer_tunnel_id, cfg,
|
||||
&sock);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
} else {
|
||||
sock = sockfd_lookup(tunnel->fd, &ret);
|
||||
if (!sock)
|
||||
goto err;
|
||||
|
||||
ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
|
||||
if (ret < 0)
|
||||
goto err_sock;
|
||||
}
|
||||
|
||||
sk = sock->sk;
|
||||
|
||||
sock_hold(sk);
|
||||
tunnel->sock = sk;
|
||||
tunnel->l2tp_net = net;
|
||||
|
||||
pn = l2tp_pernet(net);
|
||||
|
||||
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
|
||||
if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
|
||||
ret = -EEXIST;
|
||||
goto err_sock;
|
||||
}
|
||||
}
|
||||
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
|
||||
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
|
||||
struct udp_tunnel_sock_cfg udp_cfg = {
|
||||
.sk_user_data = tunnel,
|
||||
.encap_type = UDP_ENCAP_L2TPINUDP,
|
||||
.encap_rcv = l2tp_udp_encap_recv,
|
||||
.encap_destroy = l2tp_udp_encap_destroy,
|
||||
};
|
||||
|
||||
setup_udp_tunnel_sock(net, sock, &udp_cfg);
|
||||
} else {
|
||||
sk->sk_user_data = tunnel;
|
||||
}
|
||||
|
||||
tunnel->old_sk_destruct = sk->sk_destruct;
|
||||
sk->sk_destruct = &l2tp_tunnel_destruct;
|
||||
lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
|
||||
"l2tp_sock");
|
||||
sk->sk_allocation = GFP_ATOMIC;
|
||||
|
||||
if (tunnel->fd >= 0)
|
||||
sockfd_put(sock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_sock:
|
||||
if (tunnel->fd < 0)
|
||||
sock_release(sock);
|
||||
else
|
||||
sockfd_put(sock);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
|
||||
|
||||
/* This function is used by the netlink TUNNEL_DELETE command.
|
||||
*/
|
||||
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
|
||||
|
|
|
@ -220,12 +220,14 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
|
|||
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
|
||||
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
|
||||
const char *ifname);
|
||||
struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
|
||||
struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
|
||||
|
||||
int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
|
||||
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
|
||||
struct l2tp_tunnel **tunnelp);
|
||||
int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
|
||||
struct l2tp_tunnel_cfg *cfg);
|
||||
|
||||
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
|
||||
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
|
||||
struct l2tp_session *l2tp_session_create(int priv_size,
|
||||
|
|
|
@ -236,12 +236,6 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
|
|||
if (info->attrs[L2TP_ATTR_DEBUG])
|
||||
cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
|
||||
|
||||
tunnel = l2tp_tunnel_find(net, tunnel_id);
|
||||
if (tunnel != NULL) {
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
switch (cfg.encap) {
|
||||
case L2TP_ENCAPTYPE_UDP:
|
||||
|
@ -251,9 +245,19 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
|
|||
break;
|
||||
}
|
||||
|
||||
if (ret >= 0)
|
||||
ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
|
||||
tunnel, L2TP_CMD_TUNNEL_CREATE);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
l2tp_tunnel_inc_refcount(tunnel);
|
||||
ret = l2tp_tunnel_register(tunnel, net, &cfg);
|
||||
if (ret < 0) {
|
||||
kfree(tunnel);
|
||||
goto out;
|
||||
}
|
||||
ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel,
|
||||
L2TP_CMD_TUNNEL_CREATE);
|
||||
l2tp_tunnel_dec_refcount(tunnel);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -698,6 +698,15 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
|
||||
if (error < 0)
|
||||
goto end;
|
||||
|
||||
l2tp_tunnel_inc_refcount(tunnel);
|
||||
error = l2tp_tunnel_register(tunnel, sock_net(sk),
|
||||
&tcfg);
|
||||
if (error < 0) {
|
||||
kfree(tunnel);
|
||||
goto end;
|
||||
}
|
||||
drop_tunnel = true;
|
||||
}
|
||||
} else {
|
||||
/* Error if we can't find the tunnel */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2006 Oracle. All rights reserved.
|
||||
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
|
@ -1017,10 +1017,15 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
|
|||
if (conn->c_npaths == 0 && hash != 0) {
|
||||
rds_send_ping(conn, 0);
|
||||
|
||||
if (conn->c_npaths == 0) {
|
||||
wait_event_interruptible(conn->c_hs_waitq,
|
||||
(conn->c_npaths != 0));
|
||||
}
|
||||
/* The underlying connection is not up yet. Need to wait
|
||||
* until it is up to be sure that the non-zero c_path can be
|
||||
* used. But if we are interrupted, we have to use the zero
|
||||
* c_path in case the connection ends up being non-MP capable.
|
||||
*/
|
||||
if (conn->c_npaths == 0)
|
||||
if (wait_event_interruptible(conn->c_hs_waitq,
|
||||
conn->c_npaths != 0))
|
||||
hash = 0;
|
||||
if (conn->c_npaths == 1)
|
||||
hash = 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue