Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Don't save TIPC header values before the header has been validated, from Jon Paul Maloy. 2) Fix memory leak in RDS, from Zhu Yanjun. 3) We miss to initialize the UID in the flow key in some paths, from Julian Anastasov. 4) Fix latent TOS masking bug in the routing cache removal from years ago, also from Julian. 5) We forget to set the sockaddr port in sctp_copy_local_addr_list(), fix from Xin Long. 6) Missing module ref count drop in packet scheduler actions, from Roman Mashak. 7) Fix RCU annotations in rht_bucket_nested, from Herbert Xu. 8) Fix use after free which happens because L2TP's ipv4 support returns non-zero values from it's backlog_rcv function which ipv4 interprets as protocol values. Fix from Paul Hüber. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (35 commits) qed: Don't use attention PTT for configuring BW qed: Fix race with multiple VFs l2tp: avoid use-after-free caused by l2tp_ip_backlog_recv xfrm: provide correct dst in xfrm_neigh_lookup rhashtable: Fix RCU dereference annotation in rht_bucket_nested rhashtable: Fix use before NULL check in bucket_table_free net sched actions: do not overwrite status of action creation. rxrpc: Kernel calls get stuck in recvmsg net sched actions: decrement module reference count after table flush. lib: Allow compile-testing of parman ipv6: check sk sk_type and protocol early in ip_mroute_set/getsockopt sctp: set sin_port for addr param when checking duplicate address net/mlx4_en: fix overflow in mlx4_en_init_timestamp() netfilter: nft_set_bitmap: incorrect bitmap size net: s2io: fix typo argumnet argument net: vxge: fix typo argumnet argument netfilter: nf_ct_expect: Change __nf_ct_expect_check() return value. ipv4: mask tos for input route ipv4: add missing initialization for flowi4_uid lib: fix spelling mistake: "actualy" -> "actually" ...
This commit is contained in:
commit
c2eca00fec
|
@ -293,36 +293,29 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
|
|||
static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
|
||||
{
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
|
||||
bool mss_index_found = false;
|
||||
int mss_index;
|
||||
int mss_index = -EBUSY;
|
||||
int i;
|
||||
|
||||
spin_lock(&pdata->mss_lock);
|
||||
|
||||
/* Reuse the slot if MSS matches */
|
||||
for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
|
||||
for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
|
||||
if (pdata->mss[i] == mss) {
|
||||
pdata->mss_refcnt[i]++;
|
||||
mss_index = i;
|
||||
mss_index_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Overwrite the slot with ref_count = 0 */
|
||||
for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
|
||||
for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
|
||||
if (!pdata->mss_refcnt[i]) {
|
||||
pdata->mss_refcnt[i]++;
|
||||
pdata->mac_ops->set_mss(pdata, mss, i);
|
||||
pdata->mss[i] = mss;
|
||||
mss_index = i;
|
||||
mss_index_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* No slots with ref_count = 0 available, return busy */
|
||||
if (!mss_index_found)
|
||||
mss_index = -EBUSY;
|
||||
|
||||
spin_unlock(&pdata->mss_lock);
|
||||
|
||||
return mss_index;
|
||||
|
|
|
@ -89,10 +89,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
|
|||
}
|
||||
}
|
||||
|
||||
#define MLX4_EN_WRAP_AROUND_SEC 10UL
|
||||
/* By scheduling the overflow check every 5 seconds, we have a reasonably
|
||||
* good chance we wont miss a wrap around.
|
||||
* TOTO: Use a timer instead of a work queue to increase the guarantee.
|
||||
*/
|
||||
#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
|
||||
|
||||
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
|
||||
{
|
||||
bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
|
||||
mdev->overflow_period);
|
||||
MLX4_EN_OVERFLOW_PERIOD);
|
||||
unsigned long flags;
|
||||
|
||||
if (timeout) {
|
||||
|
@ -237,7 +244,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
|
|||
.enable = mlx4_en_phc_enable,
|
||||
};
|
||||
|
||||
#define MLX4_EN_WRAP_AROUND_SEC 10ULL
|
||||
|
||||
/* This function calculates the max shift that enables the user range
|
||||
* of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
|
||||
|
@ -258,7 +264,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
|
|||
{
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
unsigned long flags;
|
||||
u64 ns, zero = 0;
|
||||
|
||||
/* mlx4_en_init_timestamp is called for each netdev.
|
||||
* mdev->ptp_clock is common for all ports, skip initialization if
|
||||
|
@ -282,13 +287,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
|
|||
ktime_to_ns(ktime_get_real()));
|
||||
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
/* Calculate period in seconds to call the overflow watchdog - to make
|
||||
* sure counter is checked at least once every wrap around.
|
||||
*/
|
||||
ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
|
||||
do_div(ns, NSEC_PER_SEC / 2 / HZ);
|
||||
mdev->overflow_period = ns;
|
||||
|
||||
/* Configure the PHC */
|
||||
mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
|
||||
snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
|
||||
|
|
|
@ -430,7 +430,6 @@ struct mlx4_en_dev {
|
|||
seqlock_t clock_lock;
|
||||
struct timecounter clock;
|
||||
unsigned long last_overflow_check;
|
||||
unsigned long overflow_period;
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_clock_info;
|
||||
struct notifier_block nb;
|
||||
|
|
|
@ -5397,7 +5397,7 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
|
|||
* s2io_nic structure.
|
||||
* @regs : pointer to the structure with parameters given by ethtool for
|
||||
* dumping the registers.
|
||||
* @reg_space: The input argumnet into which all the registers are dumped.
|
||||
* @reg_space: The input argument into which all the registers are dumped.
|
||||
* Description:
|
||||
* Dumps the entire register space of xFrame NIC into the user given
|
||||
* buffer area.
|
||||
|
|
|
@ -119,7 +119,7 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
|
|||
* @dev: device pointer.
|
||||
* @regs: pointer to the structure with parameters given by ethtool for
|
||||
* dumping the registers.
|
||||
* @reg_space: The input argumnet into which all the registers are dumped.
|
||||
* @reg_space: The input argument into which all the registers are dumped.
|
||||
*
|
||||
* Dumps the vpath register space of Titan NIC into the user given
|
||||
* buffer area.
|
||||
|
|
|
@ -688,7 +688,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
|
|||
#define OOO_LB_TC 9
|
||||
|
||||
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
|
||||
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
|
||||
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 min_pf_rate);
|
||||
|
||||
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
|
||||
|
|
|
@ -3198,7 +3198,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
|
|||
}
|
||||
|
||||
/* API to configure WFQ from mcp link change */
|
||||
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
|
||||
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
|
||||
struct qed_ptt *p_ptt, u32 min_pf_rate)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -3212,8 +3213,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
|
|||
for_each_hwfn(cdev, i) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
|
||||
__qed_configure_vp_wfq_on_link_change(p_hwfn,
|
||||
p_hwfn->p_dpc_ptt,
|
||||
__qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
|
||||
min_pf_rate);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -679,7 +679,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* Min bandwidth configuration */
|
||||
__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
|
||||
qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
|
||||
qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
|
||||
p_link->min_pf_rate);
|
||||
|
||||
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
|
||||
p_link->an_complete = !!(status &
|
||||
|
|
|
@ -3014,8 +3014,7 @@ cleanup:
|
|||
ack_vfs[vfid / 32] |= BIT((vfid % 32));
|
||||
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
|
||||
~(1ULL << (rel_vf_id % 64));
|
||||
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
|
||||
~(1ULL << (rel_vf_id % 64));
|
||||
p_vf->vf_mbx.b_pending_msg = false;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -3128,11 +3127,20 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
|
|||
mbx = &p_vf->vf_mbx;
|
||||
|
||||
/* qed_iov_process_mbx_request */
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
|
||||
if (!mbx->b_pending_msg) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"VF[%02x]: Trying to process mailbox message when none is pending\n",
|
||||
p_vf->abs_vf_id);
|
||||
return;
|
||||
}
|
||||
mbx->b_pending_msg = false;
|
||||
|
||||
mbx->first_tlv = mbx->req_virt->first_tlv;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"VF[%02x]: Processing mailbox message [type %04x]\n",
|
||||
p_vf->abs_vf_id, mbx->first_tlv.tl.type);
|
||||
|
||||
/* check if tlv type is known */
|
||||
if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
|
||||
!p_vf->b_malicious) {
|
||||
|
@ -3219,20 +3227,19 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
}
|
||||
|
||||
static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
|
||||
void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
|
||||
{
|
||||
u64 add_bit = 1ULL << (vfid % 64);
|
||||
int i;
|
||||
|
||||
p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
|
||||
}
|
||||
memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
|
||||
|
||||
static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
|
||||
u64 *events)
|
||||
{
|
||||
u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
|
||||
qed_for_each_vf(p_hwfn, i) {
|
||||
struct qed_vf_info *p_vf;
|
||||
|
||||
memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
|
||||
memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
|
||||
p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
|
||||
if (p_vf->vf_mbx.b_pending_msg)
|
||||
events[i / 64] |= 1ULL << (i % 64);
|
||||
}
|
||||
}
|
||||
|
||||
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
|
||||
|
@ -3266,7 +3273,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
|
|||
p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
|
||||
|
||||
/* Mark the event and schedule the workqueue */
|
||||
qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
|
||||
p_vf->vf_mbx.b_pending_msg = true;
|
||||
qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
|
||||
|
||||
return 0;
|
||||
|
@ -4030,7 +4037,7 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
|
|||
return;
|
||||
}
|
||||
|
||||
qed_iov_pf_get_and_clear_pending_events(hwfn, events);
|
||||
qed_iov_pf_get_pending_events(hwfn, events);
|
||||
|
||||
DP_VERBOSE(hwfn, QED_MSG_IOV,
|
||||
"Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
|
||||
|
|
|
@ -140,6 +140,9 @@ struct qed_iov_vf_mbx {
|
|||
/* Address in VF where a pending message is located */
|
||||
dma_addr_t pending_req;
|
||||
|
||||
/* Message from VF awaits handling */
|
||||
bool b_pending_msg;
|
||||
|
||||
u8 *offset;
|
||||
|
||||
/* saved VF request header */
|
||||
|
@ -232,7 +235,6 @@ struct qed_vf_info {
|
|||
*/
|
||||
struct qed_pf_iov {
|
||||
struct qed_vf_info vfs_array[MAX_NUM_VFS];
|
||||
u64 pending_events[QED_VF_ARRAY_LENGTH];
|
||||
u64 pending_flr[QED_VF_ARRAY_LENGTH];
|
||||
|
||||
/* Allocate message address continuosuly and split to each VF */
|
||||
|
|
|
@ -416,7 +416,7 @@ struct stmmac_dma_ops {
|
|||
/* Configure the AXI Bus Mode Register */
|
||||
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
|
||||
/* Dump DMA registers */
|
||||
void (*dump_regs) (void __iomem *ioaddr);
|
||||
void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
|
||||
/* Set tx/rx threshold in the csr6 register
|
||||
* An invalid value enables the store-and-forward mode */
|
||||
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
|
||||
|
@ -456,7 +456,7 @@ struct stmmac_ops {
|
|||
/* Enable RX Queues */
|
||||
void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
|
||||
/* Dump MAC registers */
|
||||
void (*dump_regs)(struct mac_device_info *hw);
|
||||
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
|
||||
/* Handle extra events on specific interrupts hw dependent */
|
||||
int (*host_irq_status)(struct mac_device_info *hw,
|
||||
struct stmmac_extra_stats *x);
|
||||
|
|
|
@ -92,17 +92,13 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
|
|||
return !!(value & GMAC_CONTROL_IPC);
|
||||
}
|
||||
|
||||
static void dwmac1000_dump_regs(struct mac_device_info *hw)
|
||||
static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
int i;
|
||||
pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
|
||||
|
||||
for (i = 0; i < 55; i++) {
|
||||
int offset = i * 4;
|
||||
pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
|
||||
offset, readl(ioaddr + offset));
|
||||
}
|
||||
for (i = 0; i < 55; i++)
|
||||
reg_space[i] = readl(ioaddr + i * 4);
|
||||
}
|
||||
|
||||
static void dwmac1000_set_umac_addr(struct mac_device_info *hw,
|
||||
|
|
|
@ -201,18 +201,14 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
|
|||
writel(csr6, ioaddr + DMA_CONTROL);
|
||||
}
|
||||
|
||||
static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
|
||||
static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
|
||||
{
|
||||
int i;
|
||||
pr_info(" DMA registers\n");
|
||||
for (i = 0; i < 22; i++) {
|
||||
if ((i < 9) || (i > 17)) {
|
||||
int offset = i * 4;
|
||||
pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
|
||||
(DMA_BUS_MODE + offset),
|
||||
readl(ioaddr + DMA_BUS_MODE + offset));
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 22; i++)
|
||||
if ((i < 9) || (i > 17))
|
||||
reg_space[DMA_BUS_MODE / 4 + i] =
|
||||
readl(ioaddr + DMA_BUS_MODE + i * 4);
|
||||
}
|
||||
|
||||
static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
|
||||
|
|
|
@ -40,28 +40,18 @@ static void dwmac100_core_init(struct mac_device_info *hw, int mtu)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void dwmac100_dump_mac_regs(struct mac_device_info *hw)
|
||||
static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
pr_info("\t----------------------------------------------\n"
|
||||
"\t DWMAC 100 CSR (base addr = 0x%p)\n"
|
||||
"\t----------------------------------------------\n", ioaddr);
|
||||
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
|
||||
readl(ioaddr + MAC_CONTROL));
|
||||
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
|
||||
readl(ioaddr + MAC_ADDR_HIGH));
|
||||
pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
|
||||
readl(ioaddr + MAC_ADDR_LOW));
|
||||
pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
|
||||
MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
|
||||
pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
|
||||
MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
|
||||
pr_info("\tflow control (offset 0x%x): 0x%08x\n",
|
||||
MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
|
||||
pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
|
||||
readl(ioaddr + MAC_VLAN1));
|
||||
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
|
||||
readl(ioaddr + MAC_VLAN2));
|
||||
|
||||
reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL);
|
||||
reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH);
|
||||
reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW);
|
||||
reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH);
|
||||
reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW);
|
||||
reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL);
|
||||
reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1);
|
||||
reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2);
|
||||
}
|
||||
|
||||
static int dwmac100_rx_ipc_enable(struct mac_device_info *hw)
|
||||
|
|
|
@ -66,19 +66,18 @@ static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
|
|||
writel(csr6, ioaddr + DMA_CONTROL);
|
||||
}
|
||||
|
||||
static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
|
||||
static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_debug("DWMAC 100 DMA CSR\n");
|
||||
for (i = 0; i < 9; i++)
|
||||
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
|
||||
(DMA_BUS_MODE + i * 4),
|
||||
readl(ioaddr + DMA_BUS_MODE + i * 4));
|
||||
reg_space[DMA_BUS_MODE / 4 + i] =
|
||||
readl(ioaddr + DMA_BUS_MODE + i * 4);
|
||||
|
||||
pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n",
|
||||
DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR),
|
||||
DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
|
||||
reg_space[DMA_CUR_TX_BUF_ADDR / 4] =
|
||||
readl(ioaddr + DMA_CUR_TX_BUF_ADDR);
|
||||
reg_space[DMA_CUR_RX_BUF_ADDR / 4] =
|
||||
readl(ioaddr + DMA_CUR_RX_BUF_ADDR);
|
||||
}
|
||||
|
||||
/* DMA controller has two counters to track the number of the missed frames. */
|
||||
|
|
|
@ -70,19 +70,13 @@ static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
|
|||
writel(value, ioaddr + GMAC_RXQ_CTRL0);
|
||||
}
|
||||
|
||||
static void dwmac4_dump_regs(struct mac_device_info *hw)
|
||||
static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
int i;
|
||||
|
||||
pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
|
||||
|
||||
for (i = 0; i < GMAC_REG_NUM; i++) {
|
||||
int offset = i * 4;
|
||||
|
||||
pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
|
||||
offset, readl(ioaddr + offset));
|
||||
}
|
||||
for (i = 0; i < GMAC_REG_NUM; i++)
|
||||
reg_space[i] = readl(ioaddr + i * 4);
|
||||
}
|
||||
|
||||
static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
|
||||
|
|
|
@ -127,53 +127,51 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
|
|||
dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
|
||||
}
|
||||
|
||||
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
|
||||
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
|
||||
u32 *reg_space)
|
||||
{
|
||||
pr_debug(" Channel %d\n", channel);
|
||||
pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
|
||||
readl(ioaddr + DMA_CHAN_CONTROL(channel)));
|
||||
pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
|
||||
readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
|
||||
pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
|
||||
readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
|
||||
pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
|
||||
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
|
||||
pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
|
||||
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
|
||||
pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
|
||||
readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
|
||||
pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
|
||||
readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
|
||||
pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
|
||||
readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
|
||||
pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
|
||||
readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
|
||||
pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
|
||||
readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
|
||||
pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
|
||||
readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
|
||||
pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
|
||||
readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
|
||||
pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
|
||||
readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
|
||||
pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
|
||||
readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
|
||||
pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
|
||||
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
|
||||
pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
|
||||
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
|
||||
pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
|
||||
readl(ioaddr + DMA_CHAN_STATUS(channel)));
|
||||
reg_space[DMA_CHAN_CONTROL(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CONTROL(channel));
|
||||
reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
|
||||
reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
|
||||
reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
|
||||
reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
|
||||
reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel));
|
||||
reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel));
|
||||
reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel));
|
||||
reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel));
|
||||
reg_space[DMA_CHAN_INTR_ENA(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_INTR_ENA(channel));
|
||||
reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel));
|
||||
reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel));
|
||||
reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel));
|
||||
reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel));
|
||||
reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel));
|
||||
reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel));
|
||||
reg_space[DMA_CHAN_STATUS(channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_STATUS(channel));
|
||||
}
|
||||
|
||||
static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
|
||||
static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_debug(" GMAC4 DMA registers\n");
|
||||
|
||||
for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
|
||||
_dwmac4_dump_dma_regs(ioaddr, i);
|
||||
_dwmac4_dump_dma_regs(ioaddr, i, reg_space);
|
||||
}
|
||||
|
||||
static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
|
||||
|
|
|
@ -435,32 +435,14 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev)
|
|||
static void stmmac_ethtool_gregs(struct net_device *dev,
|
||||
struct ethtool_regs *regs, void *space)
|
||||
{
|
||||
int i;
|
||||
u32 *reg_space = (u32 *) space;
|
||||
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
memset(reg_space, 0x0, REG_SPACE_SIZE);
|
||||
|
||||
if (priv->plat->has_gmac || priv->plat->has_gmac4) {
|
||||
/* MAC registers */
|
||||
for (i = 0; i < 55; i++)
|
||||
reg_space[i] = readl(priv->ioaddr + (i * 4));
|
||||
/* DMA registers */
|
||||
for (i = 0; i < 22; i++)
|
||||
reg_space[i + 55] =
|
||||
readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
|
||||
} else {
|
||||
/* MAC registers */
|
||||
for (i = 0; i < 12; i++)
|
||||
reg_space[i] = readl(priv->ioaddr + (i * 4));
|
||||
/* DMA registers */
|
||||
for (i = 0; i < 9; i++)
|
||||
reg_space[i + 12] =
|
||||
readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
|
||||
reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
|
||||
reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
|
||||
}
|
||||
priv->hw->mac->dump_regs(priv->hw, reg_space);
|
||||
priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -1729,11 +1729,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|||
priv->hw->dma->start_tx(priv->ioaddr);
|
||||
priv->hw->dma->start_rx(priv->ioaddr);
|
||||
|
||||
/* Dump DMA/MAC registers */
|
||||
if (netif_msg_hw(priv)) {
|
||||
priv->hw->mac->dump_regs(priv->hw);
|
||||
priv->hw->dma->dump_regs(priv->ioaddr);
|
||||
}
|
||||
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
|
||||
|
||||
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
|
||||
|
|
|
@ -146,7 +146,7 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
|
|||
*/
|
||||
int phy_aneg_done(struct phy_device *phydev)
|
||||
{
|
||||
if (phydev->drv->aneg_done)
|
||||
if (phydev->drv && phydev->drv->aneg_done)
|
||||
return phydev->drv->aneg_done(phydev);
|
||||
|
||||
return genphy_aneg_done(phydev);
|
||||
|
|
|
@ -2035,7 +2035,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
const struct iphdr *old_iph = ip_hdr(skb);
|
||||
union vxlan_addr *dst;
|
||||
union vxlan_addr remote_ip, local_ip;
|
||||
union vxlan_addr *src;
|
||||
struct vxlan_metadata _md;
|
||||
struct vxlan_metadata *md = &_md;
|
||||
__be16 src_port = 0, dst_port;
|
||||
|
@ -2062,7 +2061,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
|
||||
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
|
||||
vni = (rdst->remote_vni) ? : default_vni;
|
||||
src = &vxlan->cfg.saddr;
|
||||
local_ip = vxlan->cfg.saddr;
|
||||
dst_cache = &rdst->dst_cache;
|
||||
md->gbp = skb->mark;
|
||||
ttl = vxlan->cfg.ttl;
|
||||
|
@ -2095,7 +2094,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
dst = &remote_ip;
|
||||
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
|
||||
vni = tunnel_id_to_key32(info->key.tun_id);
|
||||
src = &local_ip;
|
||||
dst_cache = &info->dst_cache;
|
||||
if (info->options_len)
|
||||
md = ip_tunnel_info_opts(info);
|
||||
|
@ -2115,7 +2113,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
rt = vxlan_get_route(vxlan, dev, sock4, skb,
|
||||
rdst ? rdst->remote_ifindex : 0, tos,
|
||||
dst->sin.sin_addr.s_addr,
|
||||
&src->sin.sin_addr.s_addr,
|
||||
&local_ip.sin.sin_addr.s_addr,
|
||||
dst_port, src_port,
|
||||
dst_cache, info);
|
||||
if (IS_ERR(rt)) {
|
||||
|
@ -2142,7 +2140,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
if (err < 0)
|
||||
goto tx_error;
|
||||
|
||||
udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, src->sin.sin_addr.s_addr,
|
||||
udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
|
||||
dst->sin.sin_addr.s_addr, tos, ttl, df,
|
||||
src_port, dst_port, xnet, !udp_sum);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
@ -2152,7 +2150,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
ndst = vxlan6_get_route(vxlan, dev, sock6, skb,
|
||||
rdst ? rdst->remote_ifindex : 0, tos,
|
||||
label, &dst->sin6.sin6_addr,
|
||||
&src->sin6.sin6_addr,
|
||||
&local_ip.sin6.sin6_addr,
|
||||
dst_port, src_port,
|
||||
dst_cache, info);
|
||||
if (IS_ERR(ndst)) {
|
||||
|
@ -2180,7 +2178,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
goto tx_error;
|
||||
|
||||
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
|
||||
&src->sin6.sin6_addr,
|
||||
&local_ip.sin6.sin6_addr,
|
||||
&dst->sin6.sin6_addr, tos, ttl,
|
||||
label, src_port, dst_port, !udp_sum);
|
||||
#endif
|
||||
|
@ -2675,7 +2673,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
|
|||
|
||||
if (data[IFLA_VXLAN_ID]) {
|
||||
__u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
|
||||
if (id >= VXLAN_VID_MASK)
|
||||
if (id >= VXLAN_N_VID)
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _UAPI_XT_HASHLIMIT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/if.h>
|
||||
|
||||
/* timings are in milliseconds. */
|
||||
|
|
|
@ -559,7 +559,7 @@ config SBITMAP
|
|||
bool
|
||||
|
||||
config PARMAN
|
||||
tristate
|
||||
tristate "parman" if COMPILE_TEST
|
||||
|
||||
config PRIME_NUMBERS
|
||||
tristate
|
||||
|
|
|
@ -146,9 +146,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
|
|||
if (tbl->nest)
|
||||
nested_bucket_table_free(tbl);
|
||||
|
||||
if (tbl)
|
||||
kvfree(tbl->locks);
|
||||
|
||||
kvfree(tbl->locks);
|
||||
kvfree(tbl);
|
||||
}
|
||||
|
||||
|
@ -1123,12 +1121,13 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
|
|||
union nested_table *ntbl;
|
||||
|
||||
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
|
||||
ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
|
||||
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
|
||||
subhash >>= tbl->nest;
|
||||
|
||||
while (ntbl && size > (1 << shift)) {
|
||||
index = subhash & ((1 << shift) - 1);
|
||||
ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
|
||||
ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
|
||||
tbl, hash);
|
||||
size >>= shift;
|
||||
subhash >>= shift;
|
||||
}
|
||||
|
|
|
@ -334,7 +334,7 @@ static int test_parman_check_array(struct test_parman *test_parman,
|
|||
last_priority = item->prio->priority;
|
||||
|
||||
if (item->parman_item.index != i) {
|
||||
pr_err("Item has different index in compare to where it actualy is (%lu != %d)\n",
|
||||
pr_err("Item has different index in compare to where it actually is (%lu != %d)\n",
|
||||
item->parman_item.index, i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -319,7 +319,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|||
int ret, no_addr;
|
||||
struct fib_result res;
|
||||
struct flowi4 fl4;
|
||||
struct net *net;
|
||||
struct net *net = dev_net(dev);
|
||||
bool dev_match;
|
||||
|
||||
fl4.flowi4_oif = 0;
|
||||
|
@ -332,6 +332,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|||
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
|
||||
fl4.flowi4_tun_key.tun_id = 0;
|
||||
fl4.flowi4_flags = 0;
|
||||
fl4.flowi4_uid = sock_net_uid(net, NULL);
|
||||
|
||||
no_addr = idev->ifa_list == NULL;
|
||||
|
||||
|
@ -339,13 +340,12 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|||
|
||||
trace_fib_validate_source(dev, &fl4);
|
||||
|
||||
net = dev_net(dev);
|
||||
if (fib_lookup(net, &fl4, &res, 0))
|
||||
goto last_resort;
|
||||
if (res.type != RTN_UNICAST &&
|
||||
(res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
|
||||
goto e_inval;
|
||||
if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
|
||||
if (!rpf && !fib_num_tclassid_users(net) &&
|
||||
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
|
||||
goto last_resort;
|
||||
fib_combine_itag(itag, &res);
|
||||
|
|
|
@ -1876,6 +1876,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
fl4.flowi4_flags = 0;
|
||||
fl4.daddr = daddr;
|
||||
fl4.saddr = saddr;
|
||||
fl4.flowi4_uid = sock_net_uid(net, NULL);
|
||||
err = fib_lookup(net, &fl4, &res, 0);
|
||||
if (err != 0) {
|
||||
if (!IN_DEV_FORWARD(in_dev))
|
||||
|
@ -2008,6 +2009,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
{
|
||||
int res;
|
||||
|
||||
tos &= IPTOS_RT_MASK;
|
||||
rcu_read_lock();
|
||||
|
||||
/* Multicast recognition logic is moved from route cache to here.
|
||||
|
|
|
@ -693,6 +693,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
|
|||
u->link = p->link;
|
||||
u->i_key = p->i_key;
|
||||
u->o_key = p->o_key;
|
||||
if (u->i_key)
|
||||
u->i_flags |= GRE_KEY;
|
||||
if (u->o_key)
|
||||
u->o_flags |= GRE_KEY;
|
||||
u->proto = p->proto;
|
||||
|
||||
memcpy(u->name, p->name, sizeof(u->name));
|
||||
|
|
|
@ -1666,6 +1666,10 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
|
|||
struct net *net = sock_net(sk);
|
||||
struct mr6_table *mrt;
|
||||
|
||||
if (sk->sk_type != SOCK_RAW ||
|
||||
inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
|
||||
if (!mrt)
|
||||
return -ENOENT;
|
||||
|
@ -1677,9 +1681,6 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
|
|||
|
||||
switch (optname) {
|
||||
case MRT6_INIT:
|
||||
if (sk->sk_type != SOCK_RAW ||
|
||||
inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
||||
return -EOPNOTSUPP;
|
||||
if (optlen < sizeof(int))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1815,6 +1816,10 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
|
|||
struct net *net = sock_net(sk);
|
||||
struct mr6_table *mrt;
|
||||
|
||||
if (sk->sk_type != SOCK_RAW ||
|
||||
inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
|
||||
if (!mrt)
|
||||
return -ENOENT;
|
||||
|
|
|
@ -388,7 +388,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
|
|||
drop:
|
||||
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
|
||||
|
|
|
@ -410,7 +410,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
|
|||
struct net *net = nf_ct_exp_net(expect);
|
||||
struct hlist_node *next;
|
||||
unsigned int h;
|
||||
int ret = 1;
|
||||
int ret = 0;
|
||||
|
||||
if (!master_help) {
|
||||
ret = -ESHUTDOWN;
|
||||
|
@ -460,14 +460,14 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
|
|||
|
||||
spin_lock_bh(&nf_conntrack_expect_lock);
|
||||
ret = __nf_ct_expect_check(expect);
|
||||
if (ret <= 0)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
nf_ct_expect_insert(expect);
|
||||
|
||||
spin_unlock_bh(&nf_conntrack_expect_lock);
|
||||
nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
|
||||
return ret;
|
||||
return 0;
|
||||
out:
|
||||
spin_unlock_bh(&nf_conntrack_expect_lock);
|
||||
return ret;
|
||||
|
|
|
@ -528,6 +528,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
|
|||
if (!nft_ct_tmpl_alloc_pcpu())
|
||||
return -ENOMEM;
|
||||
nft_ct_pcpu_template_refcnt++;
|
||||
len = sizeof(u16);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
|
|
|
@ -258,7 +258,7 @@ static int nft_bitmap_init(const struct nft_set *set,
|
|||
{
|
||||
struct nft_bitmap *priv = nft_set_priv(set);
|
||||
|
||||
priv->bitmap_size = nft_bitmap_total_size(set->klen);
|
||||
priv->bitmap_size = nft_bitmap_size(set->klen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -111,8 +111,7 @@ static void rds_ib_dev_free(struct work_struct *work)
|
|||
kfree(i_ipaddr);
|
||||
}
|
||||
|
||||
if (rds_ibdev->vector_load)
|
||||
kfree(rds_ibdev->vector_load);
|
||||
kfree(rds_ibdev->vector_load);
|
||||
|
||||
kfree(rds_ibdev);
|
||||
}
|
||||
|
|
|
@ -641,12 +641,12 @@ static int rds_tcp_init(void)
|
|||
ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
|
||||
if (ret) {
|
||||
pr_warn("could not register rds_tcp_dev_notifier\n");
|
||||
goto out;
|
||||
goto out_slab;
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&rds_tcp_net_ops);
|
||||
if (ret)
|
||||
goto out_slab;
|
||||
goto out_notifier;
|
||||
|
||||
ret = rds_tcp_recv_init();
|
||||
if (ret)
|
||||
|
@ -664,9 +664,10 @@ out_recv:
|
|||
rds_tcp_recv_exit();
|
||||
out_pernet:
|
||||
unregister_pernet_subsys(&rds_tcp_net_ops);
|
||||
out_slab:
|
||||
out_notifier:
|
||||
if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
|
||||
pr_warn("could not unregister rds_tcp_dev_notifier\n");
|
||||
out_slab:
|
||||
kmem_cache_destroy(rds_tcp_conn_slab);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -1065,7 +1065,7 @@ static long rxrpc_read(const struct key *key,
|
|||
|
||||
switch (token->security_index) {
|
||||
case RXRPC_SECURITY_RXKAD:
|
||||
toksize += 8 * 4; /* viceid, kvno, key*2, begin,
|
||||
toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin,
|
||||
* end, primary, tktlen */
|
||||
toksize += RND(token->kad->ticket_len);
|
||||
break;
|
||||
|
|
|
@ -320,8 +320,10 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
|
|||
|
||||
/* Barriers against rxrpc_input_data(). */
|
||||
hard_ack = call->rx_hard_ack;
|
||||
top = smp_load_acquire(&call->rx_top);
|
||||
for (seq = hard_ack + 1; before_eq(seq, top); seq++) {
|
||||
seq = hard_ack + 1;
|
||||
while (top = smp_load_acquire(&call->rx_top),
|
||||
before_eq(seq, top)
|
||||
) {
|
||||
ix = seq & RXRPC_RXTX_BUFF_MASK;
|
||||
skb = call->rxtx_buffer[ix];
|
||||
if (!skb) {
|
||||
|
@ -394,6 +396,8 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
|
|||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seq++;
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -613,8 +613,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
|
|||
goto err_mod;
|
||||
}
|
||||
|
||||
err = nla_memdup_cookie(a, tb);
|
||||
if (err < 0) {
|
||||
if (nla_memdup_cookie(a, tb) < 0) {
|
||||
err = -ENOMEM;
|
||||
tcf_hash_release(a, bind);
|
||||
goto err_mod;
|
||||
}
|
||||
|
@ -859,10 +859,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
|
|||
goto out_module_put;
|
||||
|
||||
err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops);
|
||||
if (err < 0)
|
||||
if (err <= 0)
|
||||
goto out_module_put;
|
||||
if (err == 0)
|
||||
goto noflush_out;
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
|
@ -879,7 +877,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
|
|||
out_module_put:
|
||||
module_put(ops->owner);
|
||||
err_out:
|
||||
noflush_out:
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -199,6 +199,7 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
|
|||
sctp_scope_t scope, gfp_t gfp, int copy_flags)
|
||||
{
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
union sctp_addr laddr;
|
||||
int error = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -220,7 +221,10 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
|
|||
!(copy_flags & SCTP_ADDR6_PEERSUPP)))
|
||||
continue;
|
||||
|
||||
if (sctp_bind_addr_state(bp, &addr->a) != -1)
|
||||
laddr = addr->a;
|
||||
/* also works for setting ipv6 address port */
|
||||
laddr.v4.sin_port = htons(bp->port);
|
||||
if (sctp_bind_addr_state(bp, &laddr) != -1)
|
||||
continue;
|
||||
|
||||
error = sctp_add_bind_addr(bp, &addr->a, sizeof(addr->a),
|
||||
|
|
|
@ -4862,6 +4862,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
|
|||
if (!asoc)
|
||||
return -EINVAL;
|
||||
|
||||
/* If there is a thread waiting on more sndbuf space for
|
||||
* sending on this asoc, it cannot be peeled.
|
||||
*/
|
||||
if (waitqueue_active(&asoc->wait))
|
||||
return -EBUSY;
|
||||
|
||||
/* An association cannot be branched off from an already peeled-off
|
||||
* socket, nor is this supported for tcp style sockets.
|
||||
*/
|
||||
|
@ -7599,8 +7605,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|||
*/
|
||||
release_sock(sk);
|
||||
current_timeo = schedule_timeout(current_timeo);
|
||||
if (sk != asoc->base.sk)
|
||||
goto do_error;
|
||||
lock_sock(sk);
|
||||
|
||||
*timeo_p = current_timeo;
|
||||
|
|
|
@ -1505,19 +1505,21 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
|
|||
{
|
||||
struct sk_buff_head xmitq;
|
||||
struct tipc_node *n;
|
||||
struct tipc_msg *hdr = buf_msg(skb);
|
||||
int usr = msg_user(hdr);
|
||||
struct tipc_msg *hdr;
|
||||
int bearer_id = b->identity;
|
||||
struct tipc_link_entry *le;
|
||||
u16 bc_ack = msg_bcast_ack(hdr);
|
||||
u32 self = tipc_own_addr(net);
|
||||
int rc = 0;
|
||||
int usr, rc = 0;
|
||||
u16 bc_ack;
|
||||
|
||||
__skb_queue_head_init(&xmitq);
|
||||
|
||||
/* Ensure message is well-formed */
|
||||
/* Ensure message is well-formed before touching the header */
|
||||
if (unlikely(!tipc_msg_validate(skb)))
|
||||
goto discard;
|
||||
hdr = buf_msg(skb);
|
||||
usr = msg_user(hdr);
|
||||
bc_ack = msg_bcast_ack(hdr);
|
||||
|
||||
/* Handle arrival of discovery or broadcast packet */
|
||||
if (unlikely(msg_non_seq(hdr))) {
|
||||
|
|
|
@ -2836,14 +2836,8 @@ static unsigned int xfrm_mtu(const struct dst_entry *dst)
|
|||
return mtu ? : dst_mtu(dst->path);
|
||||
}
|
||||
|
||||
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
|
||||
struct sk_buff *skb,
|
||||
const void *daddr)
|
||||
{
|
||||
return dst->path->ops->neigh_lookup(dst, skb, daddr);
|
||||
}
|
||||
|
||||
static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
|
||||
static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
|
||||
const void *daddr)
|
||||
{
|
||||
const struct dst_entry *path = dst->path;
|
||||
|
||||
|
@ -2857,6 +2851,25 @@ static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
|
|||
else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
|
||||
daddr = &xfrm->id.daddr;
|
||||
}
|
||||
return daddr;
|
||||
}
|
||||
|
||||
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
|
||||
struct sk_buff *skb,
|
||||
const void *daddr)
|
||||
{
|
||||
const struct dst_entry *path = dst->path;
|
||||
|
||||
if (!skb)
|
||||
daddr = xfrm_get_dst_nexthop(dst, daddr);
|
||||
return path->ops->neigh_lookup(path, skb, daddr);
|
||||
}
|
||||
|
||||
static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
|
||||
{
|
||||
const struct dst_entry *path = dst->path;
|
||||
|
||||
daddr = xfrm_get_dst_nexthop(dst, daddr);
|
||||
path->ops->confirm_neigh(path, daddr);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue