Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Make allocations less aggressive in x_tables, from Minchal Hocko. 2) Fix netfilter flowtable Kconfig deps, from Pablo Neira Ayuso. 3) Fix connection loss problems in rtlwifi, from Larry Finger. 4) Correct DRAM dump length for some chips in ath10k driver, from Yu Wang. 5) Fix ABORT handling in rxrpc, from David Howells. 6) Add SPDX tags to Sun networking drivers, from Shannon Nelson. 7) Some ipv6 onlink handling fixes, from David Ahern. 8) Netem packet scheduler interval calcualtion fix from Md. Islam. 9) Don't put crypto buffers on-stack in rxrpc, from David Howells. 10) Fix handling of error non-delivery status in netlink multicast delivery over multiple namespaces, from Nicolas Dichtel. 11) Missing xdp flush in tuntap driver, from Jason Wang. 12) Synchonize RDS protocol netns/module teardown with rds object management, from Sowini Varadhan. 13) Add nospec annotations to mpls, from Dan Williams. 14) Fix SKB truesize handling in TIPC, from Hoang Le. 15) Interrupt masking fixes in stammc from Niklas Cassel. 16) Don't allow ptr_ring objects to be sized outside of kmalloc's limits, from Jason Wang. 17) Don't allow SCTP chunks to be built which will have a length exceeding the chunk header's 16-bit length field, from Alexey Kodanev. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (82 commits) ibmvnic: Remove skb->protocol checks in ibmvnic_xmit bpf: fix rlimit in reuseport net selftest sctp: verify size of a new chunk in _sctp_make_chunk() s390/qeth: fix SETIP command handling s390/qeth: fix underestimated count of buffer elements ptr_ring: try vmalloc() when kmalloc() fails ptr_ring: fail early if queue occupies more than KMALLOC_MAX_SIZE net: stmmac: remove redundant enable of PMT irq net: stmmac: rename GMAC_INT_DEFAULT_MASK for dwmac4 net: stmmac: discard disabled flags in interrupt status register ibmvnic: Reset long term map ID counter tools/libbpf: handle issues with bpf ELF objects containing .eh_frames selftests/bpf: add selftest that use test_libbpf_open selftests/bpf: add test program for loading BPF ELF files tools/libbpf: improve the pr_debug statements to contain section numbers bpf: Sync kernel ABI header with tooling header for bpf_common.h net: phy: fix phy_start to consider PHY_IGNORE_INTERRUPT net: thunder: change q_len's type to handle max ring size tipc: fix skb truesize/datasize ratio control net/sched: cls_u32: fix cls_u32 on filter replace ...
This commit is contained in:
commit
c839682c71
|
@ -738,13 +738,13 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
|
|||
#else
|
||||
/* this is pretty, but avoids _divdu3 and is mostly correct */
|
||||
mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
|
||||
if (rate_cps > (272 * mult))
|
||||
if (rate_cps > (272ULL * mult))
|
||||
buf = 4;
|
||||
else if (rate_cps > (204 * mult))
|
||||
else if (rate_cps > (204ULL * mult))
|
||||
buf = 3;
|
||||
else if (rate_cps > (136 * mult))
|
||||
else if (rate_cps > (136ULL * mult))
|
||||
buf = 2;
|
||||
else if (rate_cps > (68 * mult))
|
||||
else if (rate_cps > (68ULL * mult))
|
||||
buf = 1;
|
||||
else
|
||||
buf = 0;
|
||||
|
|
|
@ -213,7 +213,7 @@ struct rx_tx_queue_stats {
|
|||
struct q_desc_mem {
|
||||
dma_addr_t dma;
|
||||
u64 size;
|
||||
u16 q_len;
|
||||
u32 q_len;
|
||||
dma_addr_t phys_base;
|
||||
void *base;
|
||||
void *unalign_base;
|
||||
|
|
|
@ -5166,7 +5166,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
adapter->regs = regs;
|
||||
err = t4_wait_dev_ready(regs);
|
||||
if (err < 0)
|
||||
goto out_unmap_bar0;
|
||||
goto out_free_adapter;
|
||||
|
||||
/* We control everything through one PF */
|
||||
whoami = readl(regs + PL_WHOAMI_A);
|
||||
|
|
|
@ -354,6 +354,8 @@ static void release_stats_buffers(struct ibmvnic_adapter *adapter)
|
|||
{
|
||||
kfree(adapter->tx_stats_buffers);
|
||||
kfree(adapter->rx_stats_buffers);
|
||||
adapter->tx_stats_buffers = NULL;
|
||||
adapter->rx_stats_buffers = NULL;
|
||||
}
|
||||
|
||||
static int init_stats_buffers(struct ibmvnic_adapter *adapter)
|
||||
|
@ -599,6 +601,8 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
|
|||
|
||||
kfree(adapter->vpd->buff);
|
||||
kfree(adapter->vpd);
|
||||
|
||||
adapter->vpd = NULL;
|
||||
}
|
||||
|
||||
static void release_tx_pools(struct ibmvnic_adapter *adapter)
|
||||
|
@ -909,6 +913,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
|
|||
if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
|
||||
dev_err(dev, "Could not map VPD buffer\n");
|
||||
kfree(adapter->vpd->buff);
|
||||
adapter->vpd->buff = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1414,10 +1419,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
hdrs += 2;
|
||||
}
|
||||
/* determine if l2/3/4 headers are sent to firmware */
|
||||
if ((*hdrs >> 7) & 1 &&
|
||||
(skb->protocol == htons(ETH_P_IP) ||
|
||||
skb->protocol == htons(ETH_P_IPV6) ||
|
||||
skb->protocol == htons(ETH_P_ARP))) {
|
||||
if ((*hdrs >> 7) & 1) {
|
||||
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
|
||||
tx_crq.v1.n_crq_elem = num_entries;
|
||||
tx_buff->indir_arr[0] = tx_crq;
|
||||
|
@ -1639,6 +1641,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
return rc;
|
||||
} else if (adapter->req_rx_queues != old_num_rx_queues ||
|
||||
adapter->req_tx_queues != old_num_tx_queues) {
|
||||
adapter->map_id = 1;
|
||||
release_rx_pools(adapter);
|
||||
release_tx_pools(adapter);
|
||||
init_rx_pools(netdev);
|
||||
|
@ -1831,7 +1834,8 @@ restart_poll:
|
|||
u16 offset;
|
||||
u8 flags = 0;
|
||||
|
||||
if (unlikely(adapter->resetting)) {
|
||||
if (unlikely(adapter->resetting &&
|
||||
adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
|
||||
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
|
||||
napi_complete_done(napi, frames_processed);
|
||||
return frames_processed;
|
||||
|
@ -2908,8 +2912,12 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
|
|||
cpu_to_be64(u64_crq[1]));
|
||||
|
||||
if (rc) {
|
||||
if (rc == H_CLOSED)
|
||||
if (rc == H_CLOSED) {
|
||||
dev_warn(dev, "CRQ Queue closed\n");
|
||||
if (adapter->resetting)
|
||||
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
||||
}
|
||||
|
||||
dev_warn(dev, "Send error (rc=%d)\n", rc);
|
||||
}
|
||||
|
||||
|
|
|
@ -1785,7 +1785,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
|||
struct i40e_pf *pf = vsi->back;
|
||||
u16 sections = 0;
|
||||
u8 netdev_tc = 0;
|
||||
u16 numtc = 0;
|
||||
u16 numtc = 1;
|
||||
u16 qcount;
|
||||
u8 offset;
|
||||
u16 qmap;
|
||||
|
@ -1795,9 +1795,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
|||
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
|
||||
offset = 0;
|
||||
|
||||
/* Number of queues per enabled TC */
|
||||
num_tc_qps = vsi->alloc_queue_pairs;
|
||||
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
|
||||
/* Find numtc from enabled TC bitmap */
|
||||
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
|
||||
for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
|
||||
if (enabled_tc & BIT(i)) /* TC is enabled */
|
||||
numtc++;
|
||||
}
|
||||
|
@ -1805,18 +1807,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
|||
dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
|
||||
numtc = 1;
|
||||
}
|
||||
} else {
|
||||
/* At least TC0 is enabled in non-DCB, non-MQPRIO case */
|
||||
numtc = 1;
|
||||
num_tc_qps = num_tc_qps / numtc;
|
||||
num_tc_qps = min_t(int, num_tc_qps,
|
||||
i40e_pf_get_max_q_per_tc(pf));
|
||||
}
|
||||
|
||||
vsi->tc_config.numtc = numtc;
|
||||
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
|
||||
/* Number of queues per enabled TC */
|
||||
qcount = vsi->alloc_queue_pairs;
|
||||
|
||||
num_tc_qps = qcount / numtc;
|
||||
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
|
||||
|
||||
/* Do not allow use more TC queue pairs than MSI-X vectors exist */
|
||||
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
|
||||
|
@ -1831,9 +1828,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
|||
|
||||
switch (vsi->type) {
|
||||
case I40E_VSI_MAIN:
|
||||
qcount = min_t(int, pf->alloc_rss_size,
|
||||
num_tc_qps);
|
||||
break;
|
||||
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
|
||||
I40E_FLAG_FD_ATR_ENABLED)) ||
|
||||
vsi->tc_config.enabled_tc != 1) {
|
||||
qcount = min_t(int, pf->alloc_rss_size,
|
||||
num_tc_qps);
|
||||
break;
|
||||
}
|
||||
case I40E_VSI_FDIR:
|
||||
case I40E_VSI_SRIOV:
|
||||
case I40E_VSI_VMDQ2:
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
|
||||
#include "../nfpcore/nfp_cpp.h"
|
||||
#include "../nfpcore/nfp_nffw.h"
|
||||
#include "../nfpcore/nfp_nsp.h"
|
||||
#include "../nfp_app.h"
|
||||
#include "../nfp_main.h"
|
||||
#include "../nfp_net.h"
|
||||
|
@ -87,9 +88,20 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
|
|||
static int
|
||||
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
|
||||
{
|
||||
struct nfp_pf *pf = app->pf;
|
||||
struct nfp_bpf_vnic *bv;
|
||||
int err;
|
||||
|
||||
if (!pf->eth_tbl) {
|
||||
nfp_err(pf->cpp, "No ETH table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (pf->max_data_vnics != pf->eth_tbl->count) {
|
||||
nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
|
||||
pf->max_data_vnics, pf->eth_tbl->count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bv = kzalloc(sizeof(*bv), GFP_KERNEL);
|
||||
if (!bv)
|
||||
return -ENOMEM;
|
||||
|
@ -170,6 +182,7 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
|
|||
return err;
|
||||
|
||||
bv->tc_prog = cls_bpf->prog;
|
||||
nn->port->tc_offload_cnt = !!bv->tc_prog;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -207,13 +220,6 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
|
|||
}
|
||||
}
|
||||
|
||||
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
|
||||
{
|
||||
struct nfp_bpf_vnic *bv = nn->app_priv;
|
||||
|
||||
return !!bv->tc_prog;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
|
@ -417,7 +423,6 @@ const struct nfp_app_type app_bpf = {
|
|||
.ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
|
||||
|
||||
.setup_tc = nfp_bpf_setup_tc,
|
||||
.tc_busy = nfp_bpf_tc_busy,
|
||||
.bpf = nfp_ndo_bpf,
|
||||
.xdp_offload = nfp_bpf_xdp_offload,
|
||||
};
|
||||
|
|
|
@ -349,6 +349,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
struct tc_cls_flower_offload *flow, bool egress)
|
||||
{
|
||||
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
|
||||
struct nfp_port *port = nfp_port_from_netdev(netdev);
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_fl_payload *flow_pay;
|
||||
struct nfp_fl_key_ls *key_layer;
|
||||
|
@ -390,6 +391,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
INIT_HLIST_NODE(&flow_pay->link);
|
||||
flow_pay->tc_flower_cookie = flow->cookie;
|
||||
hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
|
||||
port->tc_offload_cnt++;
|
||||
|
||||
/* Deallocate flow payload when flower rule has been destroyed. */
|
||||
kfree(key_layer);
|
||||
|
@ -421,6 +423,7 @@ static int
|
|||
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
{
|
||||
struct nfp_port *port = nfp_port_from_netdev(netdev);
|
||||
struct nfp_fl_payload *nfp_flow;
|
||||
int err;
|
||||
|
||||
|
@ -442,6 +445,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
|
||||
err_free_flow:
|
||||
hash_del_rcu(&nfp_flow->link);
|
||||
port->tc_offload_cnt--;
|
||||
kfree(nfp_flow->action_data);
|
||||
kfree(nfp_flow->mask_data);
|
||||
kfree(nfp_flow->unmasked_data);
|
||||
|
|
|
@ -92,7 +92,6 @@ extern const struct nfp_app_type app_flower;
|
|||
* @stop: stop application logic
|
||||
* @ctrl_msg_rx: control message handler
|
||||
* @setup_tc: setup TC ndo
|
||||
* @tc_busy: TC HW offload busy (rules loaded)
|
||||
* @bpf: BPF ndo offload-related calls
|
||||
* @xdp_offload: offload an XDP program
|
||||
* @eswitch_mode_get: get SR-IOV eswitch mode
|
||||
|
@ -135,7 +134,6 @@ struct nfp_app_type {
|
|||
|
||||
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
|
||||
enum tc_setup_type type, void *type_data);
|
||||
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
|
||||
int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
|
||||
struct netdev_bpf *xdp);
|
||||
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
|
||||
|
@ -301,13 +299,6 @@ static inline bool nfp_app_has_tc(struct nfp_app *app)
|
|||
return app && app->type->setup_tc;
|
||||
}
|
||||
|
||||
static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn)
|
||||
{
|
||||
if (!app || !app->type->tc_busy)
|
||||
return false;
|
||||
return app->type->tc_busy(app, nn);
|
||||
}
|
||||
|
||||
static inline int nfp_app_setup_tc(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
enum tc_setup_type type, void *type_data)
|
||||
|
|
|
@ -107,7 +107,7 @@ u16 immed_get_value(u64 instr)
|
|||
if (!unreg_is_imm(reg))
|
||||
reg = FIELD_GET(OP_IMMED_B_SRC, instr);
|
||||
|
||||
return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
|
||||
return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr) << 8;
|
||||
}
|
||||
|
||||
void immed_set_value(u64 *instr, u16 immed)
|
||||
|
|
|
@ -649,3 +649,4 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw");
|
|||
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
|
||||
MODULE_VERSION(UTS_RELEASE);
|
||||
|
|
|
@ -3210,10 +3210,9 @@ static int nfp_net_set_features(struct net_device *netdev,
|
|||
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
|
||||
}
|
||||
|
||||
if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) {
|
||||
nn_err(nn, "Cannot disable HW TC offload while in use\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
err = nfp_port_set_features(netdev, features);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
|
||||
netdev->features, features, changed);
|
||||
|
@ -3734,7 +3733,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
|
|||
|
||||
netdev->features = netdev->hw_features;
|
||||
|
||||
if (nfp_app_has_tc(nn->app))
|
||||
if (nfp_app_has_tc(nn->app) && nn->port)
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
|
||||
/* Advertise but disable TSO by default. */
|
||||
|
@ -3751,6 +3750,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
|
|||
netdev->min_mtu = ETH_MIN_MTU;
|
||||
netdev->max_mtu = nn->max_mtu;
|
||||
|
||||
netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
nfp_net_set_ethtool_ops(netdev);
|
||||
|
|
|
@ -59,9 +59,12 @@
|
|||
#define NFP_NET_RX_OFFSET 32
|
||||
|
||||
/**
|
||||
* Maximum header size supported for LSO frames
|
||||
* LSO parameters
|
||||
* %NFP_NET_LSO_MAX_HDR_SZ: Maximum header size supported for LSO frames
|
||||
* %NFP_NET_LSO_MAX_SEGS: Maximum number of segments LSO frame can produce
|
||||
*/
|
||||
#define NFP_NET_LSO_MAX_HDR_SZ 255
|
||||
#define NFP_NET_LSO_MAX_SEGS 64
|
||||
|
||||
/**
|
||||
* Prepend field types
|
||||
|
|
|
@ -265,6 +265,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
|
|||
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
|
||||
.ndo_get_vf_config = nfp_app_get_vf_config,
|
||||
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
|
||||
.ndo_set_features = nfp_port_set_features,
|
||||
};
|
||||
|
||||
static void nfp_repr_clean(struct nfp_repr *repr)
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/switchdev.h>
|
||||
|
||||
#include "nfpcore/nfp_cpp.h"
|
||||
|
@ -100,6 +101,23 @@ int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
|
|||
return nfp_app_setup_tc(port->app, netdev, type, type_data);
|
||||
}
|
||||
|
||||
int nfp_port_set_features(struct net_device *netdev, netdev_features_t features)
|
||||
{
|
||||
struct nfp_port *port;
|
||||
|
||||
port = nfp_port_from_netdev(netdev);
|
||||
if (!port)
|
||||
return 0;
|
||||
|
||||
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
|
||||
port->tc_offload_cnt) {
|
||||
netdev_err(netdev, "Cannot disable HW TC offload while offloads active\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nfp_port *
|
||||
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
|
||||
{
|
||||
|
|
|
@ -72,6 +72,8 @@ enum nfp_port_flags {
|
|||
* @netdev: backpointer to associated netdev
|
||||
* @type: what port type does the entity represent
|
||||
* @flags: port flags
|
||||
* @tc_offload_cnt: number of active TC offloads, how offloads are counted
|
||||
* is not defined, use as a boolean
|
||||
* @app: backpointer to the app structure
|
||||
* @dl_port: devlink port structure
|
||||
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
|
||||
|
@ -87,6 +89,7 @@ struct nfp_port {
|
|||
enum nfp_port_type type;
|
||||
|
||||
unsigned long flags;
|
||||
unsigned long tc_offload_cnt;
|
||||
|
||||
struct nfp_app *app;
|
||||
|
||||
|
@ -121,6 +124,9 @@ static inline bool nfp_port_is_vnic(const struct nfp_port *port)
|
|||
return port->type == NFP_PORT_PF_PORT || port->type == NFP_PORT_VF_PORT;
|
||||
}
|
||||
|
||||
int
|
||||
nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
|
||||
|
||||
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
|
||||
struct nfp_port *
|
||||
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
|
||||
|
|
|
@ -74,8 +74,6 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
|
|||
/* Mask GMAC interrupts */
|
||||
value = GMAC_INT_DEFAULT_MASK;
|
||||
|
||||
if (hw->pmt)
|
||||
value &= ~GMAC_INT_DISABLE_PMT;
|
||||
if (hw->pcs)
|
||||
value &= ~GMAC_INT_DISABLE_PCS;
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@
|
|||
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
|
||||
GMAC_INT_PCS_ANE)
|
||||
|
||||
#define GMAC_INT_DEFAULT_MASK (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
|
||||
#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
|
||||
|
||||
enum dwmac4_irq_status {
|
||||
time_stamp_irq = 0x00001000,
|
||||
|
|
|
@ -61,10 +61,9 @@ static void dwmac4_core_init(struct mac_device_info *hw,
|
|||
|
||||
writel(value, ioaddr + GMAC_CONFIG);
|
||||
|
||||
/* Mask GMAC interrupts */
|
||||
value = GMAC_INT_DEFAULT_MASK;
|
||||
if (hw->pmt)
|
||||
value |= GMAC_INT_PMT_EN;
|
||||
/* Enable GMAC interrupts */
|
||||
value = GMAC_INT_DEFAULT_ENABLE;
|
||||
|
||||
if (hw->pcs)
|
||||
value |= GMAC_PCS_IRQ_DEFAULT;
|
||||
|
||||
|
@ -572,10 +571,12 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
|
|||
struct stmmac_extra_stats *x)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 intr_status;
|
||||
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
|
||||
u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
|
||||
int ret = 0;
|
||||
|
||||
intr_status = readl(ioaddr + GMAC_INT_STATUS);
|
||||
/* Discard disabled bits */
|
||||
intr_status &= intr_enable;
|
||||
|
||||
/* Not used events (e.g. MMC interrupts) are not handled. */
|
||||
if ((intr_status & mmc_tx_irq))
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Sun network device configuration
|
||||
#
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
|
||||
*
|
||||
* Copyright (C) 2004 Sun Microsystems Inc.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
|
||||
* cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
|
||||
*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver.
|
||||
*
|
||||
* Copyright (C) 2016-2017 Oracle. All rights reserved.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* niu.c: Neptune ethernet driver.
|
||||
*
|
||||
* Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
|
||||
*
|
||||
* Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
|
||||
* sungem.c: Sun GEM ethernet driver.
|
||||
*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
|
||||
* auto carrier detecting ethernet driver. Also known as the
|
||||
* "Happy Meal Ethernet" found on SunSwift SBUS cards.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
|
||||
* Once again I am out to prove that every ethernet
|
||||
* controller out there can be most efficiently programmed
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* sunvnet.c: Sun LDOM Virtual Network Driver.
|
||||
*
|
||||
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* sunvnet.c: Sun LDOM Virtual Network Driver.
|
||||
*
|
||||
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
|
||||
|
|
|
@ -1636,6 +1636,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|||
q_idx = q_idx % cpsw->tx_ch_num;
|
||||
|
||||
txch = cpsw->txv[q_idx].ch;
|
||||
txq = netdev_get_tx_queue(ndev, q_idx);
|
||||
ret = cpsw_tx_packet_submit(priv, skb, txch);
|
||||
if (unlikely(ret != 0)) {
|
||||
cpsw_err(priv, tx_err, "desc submit failed\n");
|
||||
|
@ -1646,15 +1647,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|||
* tell the kernel to stop sending us tx frames.
|
||||
*/
|
||||
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
|
||||
txq = netdev_get_tx_queue(ndev, q_idx);
|
||||
netif_tx_stop_queue(txq);
|
||||
|
||||
/* Barrier, so that stop_queue visible to other cpus */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (cpdma_check_free_tx_desc(txch))
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
fail:
|
||||
ndev->stats.tx_dropped++;
|
||||
txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
|
||||
netif_tx_stop_queue(txq);
|
||||
|
||||
/* Barrier, so that stop_queue visible to other cpus */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (cpdma_check_free_tx_desc(txch))
|
||||
netif_tx_wake_queue(txq);
|
||||
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -822,7 +822,7 @@ void phy_start(struct phy_device *phydev)
|
|||
phy_resume(phydev);
|
||||
|
||||
/* make sure interrupts are re-enabled for the PHY */
|
||||
if (phydev->irq != PHY_POLL) {
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
err = phy_enable_interrupts(phydev);
|
||||
if (err < 0)
|
||||
break;
|
||||
|
|
|
@ -181,6 +181,7 @@ struct tun_file {
|
|||
struct tun_struct *detached;
|
||||
struct ptr_ring tx_ring;
|
||||
struct xdp_rxq_info xdp_rxq;
|
||||
int xdp_pending_pkts;
|
||||
};
|
||||
|
||||
struct tun_flow_entry {
|
||||
|
@ -1665,6 +1666,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||
case XDP_REDIRECT:
|
||||
get_page(alloc_frag->page);
|
||||
alloc_frag->offset += buflen;
|
||||
++tfile->xdp_pending_pkts;
|
||||
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
|
||||
if (err)
|
||||
goto err_redirect;
|
||||
|
@ -1986,6 +1988,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
result = tun_get_user(tun, tfile, NULL, from,
|
||||
file->f_flags & O_NONBLOCK, false);
|
||||
|
||||
if (tfile->xdp_pending_pkts) {
|
||||
tfile->xdp_pending_pkts = 0;
|
||||
xdp_do_flush_map();
|
||||
}
|
||||
|
||||
tun_put(tun);
|
||||
return result;
|
||||
}
|
||||
|
@ -2322,6 +2329,13 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
|
|||
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
|
||||
m->msg_flags & MSG_DONTWAIT,
|
||||
m->msg_flags & MSG_MORE);
|
||||
|
||||
if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
|
||||
!(m->msg_flags & MSG_MORE)) {
|
||||
tfile->xdp_pending_pkts = 0;
|
||||
xdp_do_flush_map();
|
||||
}
|
||||
|
||||
tun_put(tun);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3153,6 +3167,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
|
|||
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
|
||||
|
||||
memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
|
||||
tfile->xdp_pending_pkts = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -90,6 +90,35 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
|||
.target_64bit = false,
|
||||
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
|
||||
},
|
||||
{
|
||||
.id = QCA988X_HW_2_0_VERSION,
|
||||
.dev_id = QCA988X_2_0_DEVICE_ID_UBNT,
|
||||
.name = "qca988x hw2.0 ubiquiti",
|
||||
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
|
||||
.uart_pin = 7,
|
||||
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
|
||||
.otp_exe_param = 0,
|
||||
.channel_counters_freq_hz = 88000,
|
||||
.max_probe_resp_desc_thres = 0,
|
||||
.cal_data_len = 2116,
|
||||
.fw = {
|
||||
.dir = QCA988X_HW_2_0_FW_DIR,
|
||||
.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
|
||||
.board_size = QCA988X_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
.n_cipher_suites = 8,
|
||||
.num_peers = TARGET_TLV_NUM_PEERS,
|
||||
.ast_skid_limit = 0x10,
|
||||
.num_wds_entries = 0x20,
|
||||
.target_64bit = false,
|
||||
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
|
||||
},
|
||||
{
|
||||
.id = QCA9887_HW_1_0_VERSION,
|
||||
.dev_id = QCA9887_1_0_DEVICE_ID,
|
||||
|
@ -1276,10 +1305,7 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
|
|||
len -= sizeof(*hdr);
|
||||
data = hdr->data;
|
||||
|
||||
/* jump over the padding */
|
||||
ie_len = ALIGN(ie_len, 4);
|
||||
|
||||
if (len < ie_len) {
|
||||
if (len < ALIGN(ie_len, 4)) {
|
||||
ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
|
||||
ie_id, ie_len, len);
|
||||
ret = -EINVAL;
|
||||
|
@ -1318,6 +1344,9 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* jump over the padding */
|
||||
ie_len = ALIGN(ie_len, 4);
|
||||
|
||||
len -= ie_len;
|
||||
data += ie_len;
|
||||
}
|
||||
|
@ -1448,9 +1477,6 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
|
|||
len -= sizeof(*hdr);
|
||||
data += sizeof(*hdr);
|
||||
|
||||
/* jump over the padding */
|
||||
ie_len = ALIGN(ie_len, 4);
|
||||
|
||||
if (len < ie_len) {
|
||||
ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
|
||||
ie_id, len, ie_len);
|
||||
|
@ -1556,6 +1582,9 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
|
|||
break;
|
||||
}
|
||||
|
||||
/* jump over the padding */
|
||||
ie_len = ALIGN(ie_len, 4);
|
||||
|
||||
len -= ie_len;
|
||||
data += ie_len;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -616,7 +617,7 @@ static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
|
|||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_DRAM,
|
||||
.start = 0x400000,
|
||||
.len = 0x90000,
|
||||
.len = 0xa8000,
|
||||
.name = "DRAM",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/*
|
||||
* Copyright (c) 2005-2011 Atheros Communications Inc.
|
||||
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -81,6 +82,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
|
|||
void ath10k_debug_print_board_info(struct ath10k *ar)
|
||||
{
|
||||
char boardinfo[100];
|
||||
const struct firmware *board;
|
||||
u32 crc;
|
||||
|
||||
if (ar->id.bmi_ids_valid)
|
||||
scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
|
||||
|
@ -88,11 +91,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
|
|||
else
|
||||
scnprintf(boardinfo, sizeof(boardinfo), "N/A");
|
||||
|
||||
board = ar->normal_mode_fw.board;
|
||||
if (!IS_ERR_OR_NULL(board))
|
||||
crc = crc32_le(0, board->data, board->size);
|
||||
else
|
||||
crc = 0;
|
||||
|
||||
ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
|
||||
ar->bd_api,
|
||||
boardinfo,
|
||||
crc32_le(0, ar->normal_mode_fw.board->data,
|
||||
ar->normal_mode_fw.board->size));
|
||||
crc);
|
||||
}
|
||||
|
||||
void ath10k_debug_print_boot_info(struct ath10k *ar)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#define ATH10K_FW_DIR "ath10k"
|
||||
|
||||
#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac)
|
||||
#define QCA988X_2_0_DEVICE_ID (0x003c)
|
||||
#define QCA6164_2_1_DEVICE_ID (0x0041)
|
||||
#define QCA6174_2_1_DEVICE_ID (0x003e)
|
||||
|
|
|
@ -58,6 +58,9 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
|
|||
#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
|
||||
|
||||
static const struct pci_device_id ath10k_pci_id_table[] = {
|
||||
/* PCI-E QCA988X V2 (Ubiquiti branded) */
|
||||
{ PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
|
||||
|
||||
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
|
||||
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
|
||||
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
|
||||
|
@ -74,6 +77,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
|
|||
* hacks. ath10k doesn't have them and these devices crash horribly
|
||||
* because of that.
|
||||
*/
|
||||
{ QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
|
||||
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
|
||||
|
||||
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
|
||||
|
@ -2193,6 +2197,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
|
|||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
switch (ar_pci->pdev->device) {
|
||||
case QCA988X_2_0_DEVICE_ID_UBNT:
|
||||
case QCA988X_2_0_DEVICE_ID:
|
||||
case QCA99X0_2_0_DEVICE_ID:
|
||||
case QCA9888_2_0_DEVICE_ID:
|
||||
|
@ -3424,6 +3429,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
|||
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
|
||||
|
||||
switch (pci_dev->device) {
|
||||
case QCA988X_2_0_DEVICE_ID_UBNT:
|
||||
case QCA988X_2_0_DEVICE_ID:
|
||||
hw_rev = ATH10K_HW_QCA988X;
|
||||
pci_ps = false;
|
||||
|
|
|
@ -72,7 +72,7 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
|
|||
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
|
||||
s16 nf)
|
||||
{
|
||||
s8 noise = ath9k_hw_get_default_nf(ah, chan, 0);
|
||||
s8 noise = ATH_DEFAULT_NOISE_FLOOR;
|
||||
|
||||
if (nf) {
|
||||
s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
|
||||
|
|
|
@ -24,6 +24,7 @@ static const struct usb_device_id ath9k_hif_usb_ids[] = {
|
|||
{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
|
||||
{ USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
|
||||
{ USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
|
||||
{ USB_DEVICE(0x07b8, 0x9271) }, /* Altai WA1011N-GU */
|
||||
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
|
||||
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
|
||||
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
|
||||
|
|
|
@ -98,6 +98,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
|
|||
reorder_work.work);
|
||||
struct mt76_dev *dev = tid->dev;
|
||||
struct sk_buff_head frames;
|
||||
int nframes;
|
||||
|
||||
__skb_queue_head_init(&frames);
|
||||
|
||||
|
@ -105,14 +106,44 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
|
|||
|
||||
spin_lock(&tid->lock);
|
||||
mt76_rx_aggr_check_release(tid, &frames);
|
||||
nframes = tid->nframes;
|
||||
spin_unlock(&tid->lock);
|
||||
|
||||
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
|
||||
if (nframes)
|
||||
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
|
||||
REORDER_TIMEOUT);
|
||||
mt76_rx_complete(dev, &frames, -1);
|
||||
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static void
|
||||
mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
|
||||
struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data;
|
||||
struct mt76_wcid *wcid = status->wcid;
|
||||
struct mt76_rx_tid *tid;
|
||||
u16 seqno;
|
||||
|
||||
if (!ieee80211_is_ctl(bar->frame_control))
|
||||
return;
|
||||
|
||||
if (!ieee80211_is_back_req(bar->frame_control))
|
||||
return;
|
||||
|
||||
status->tid = le16_to_cpu(bar->control) >> 12;
|
||||
seqno = le16_to_cpu(bar->start_seq_num) >> 4;
|
||||
tid = rcu_dereference(wcid->aggr[status->tid]);
|
||||
if (!tid)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&tid->lock);
|
||||
mt76_rx_aggr_release_frames(tid, frames, seqno);
|
||||
mt76_rx_aggr_release_head(tid, frames);
|
||||
spin_unlock_bh(&tid->lock);
|
||||
}
|
||||
|
||||
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
|
||||
|
@ -126,9 +157,14 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
|
|||
__skb_queue_tail(frames, skb);
|
||||
|
||||
sta = wcid_to_sta(wcid);
|
||||
if (!sta || !status->aggr)
|
||||
if (!sta)
|
||||
return;
|
||||
|
||||
if (!status->aggr) {
|
||||
mt76_rx_aggr_check_ctl(skb, frames);
|
||||
return;
|
||||
}
|
||||
|
||||
tid = rcu_dereference(wcid->aggr[status->tid]);
|
||||
if (!tid)
|
||||
return;
|
||||
|
|
|
@ -276,6 +276,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
|
|||
ieee80211_hw_set(hw, TX_AMSDU);
|
||||
ieee80211_hw_set(hw, TX_FRAG_LIST);
|
||||
ieee80211_hw_set(hw, MFP_CAPABLE);
|
||||
ieee80211_hw_set(hw, AP_LINK_PS);
|
||||
|
||||
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||
|
||||
|
@ -470,6 +471,53 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
struct ieee80211_sta *sta;
|
||||
struct mt76_wcid *wcid = status->wcid;
|
||||
bool ps;
|
||||
|
||||
if (!wcid || !wcid->sta)
|
||||
return;
|
||||
|
||||
sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv);
|
||||
|
||||
if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
|
||||
return;
|
||||
|
||||
if (ieee80211_is_pspoll(hdr->frame_control)) {
|
||||
ieee80211_sta_pspoll(sta);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ieee80211_has_morefrags(hdr->frame_control) ||
|
||||
!(ieee80211_is_mgmt(hdr->frame_control) ||
|
||||
ieee80211_is_data(hdr->frame_control)))
|
||||
return;
|
||||
|
||||
ps = ieee80211_has_pm(hdr->frame_control);
|
||||
|
||||
if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
|
||||
ieee80211_is_qos_nullfunc(hdr->frame_control)))
|
||||
ieee80211_sta_uapsd_trigger(sta, status->tid);
|
||||
|
||||
if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
|
||||
return;
|
||||
|
||||
if (ps) {
|
||||
set_bit(MT_WCID_FLAG_PS, &wcid->flags);
|
||||
mt76_stop_tx_queues(dev, sta, true);
|
||||
} else {
|
||||
clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
|
||||
}
|
||||
|
||||
ieee80211_sta_ps_transition(sta, ps);
|
||||
dev->drv->sta_ps(dev, sta, ps);
|
||||
}
|
||||
|
||||
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
|
||||
int queue)
|
||||
{
|
||||
|
@ -498,8 +546,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
|
|||
|
||||
__skb_queue_head_init(&frames);
|
||||
|
||||
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL)
|
||||
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
|
||||
mt76_check_ps(dev, skb);
|
||||
mt76_rx_aggr_reorder(skb, &frames);
|
||||
}
|
||||
|
||||
mt76_rx_complete(dev, &frames, q);
|
||||
}
|
||||
|
|
|
@ -121,11 +121,18 @@ struct mt76_queue_ops {
|
|||
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
|
||||
};
|
||||
|
||||
enum mt76_wcid_flags {
|
||||
MT_WCID_FLAG_CHECK_PS,
|
||||
MT_WCID_FLAG_PS,
|
||||
};
|
||||
|
||||
struct mt76_wcid {
|
||||
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
|
||||
|
||||
struct work_struct aggr_work;
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
u8 idx;
|
||||
u8 hw_key_idx;
|
||||
|
||||
|
@ -206,6 +213,9 @@ struct mt76_driver_ops {
|
|||
struct sk_buff *skb);
|
||||
|
||||
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
|
||||
|
||||
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
bool ps);
|
||||
};
|
||||
|
||||
struct mt76_channel_state {
|
||||
|
|
|
@ -218,6 +218,8 @@ void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
|
|||
void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
|
||||
void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
|
||||
|
||||
void mt76x2_update_channel(struct mt76_dev *mdev);
|
||||
|
||||
s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
|
||||
|
|
|
@ -630,6 +630,7 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
|
|||
.tx_complete_skb = mt76x2_tx_complete_skb,
|
||||
.rx_skb = mt76x2_queue_rx_skb,
|
||||
.rx_poll_complete = mt76x2_rx_poll_complete,
|
||||
.sta_ps = mt76x2_sta_ps,
|
||||
};
|
||||
struct ieee80211_hw *hw;
|
||||
struct mt76x2_dev *dev;
|
||||
|
|
|
@ -341,7 +341,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
|
|||
|
||||
mt76x2_remove_hdr_pad(skb, pad_len);
|
||||
|
||||
if (rxinfo & MT_RXINFO_BA)
|
||||
if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
|
||||
status->aggr = true;
|
||||
|
||||
if (WARN_ON_ONCE(len > skb->len))
|
||||
|
|
|
@ -282,6 +282,9 @@ mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
|
||||
mt76x2_txq_init(dev, sta->txq[i]);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
|
||||
|
||||
rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
|
||||
|
||||
out:
|
||||
|
@ -311,23 +314,14 @@ mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
|
||||
void
|
||||
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
|
||||
{
|
||||
struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
|
||||
struct mt76x2_dev *dev = hw->priv;
|
||||
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
|
||||
int idx = msta->wcid.idx;
|
||||
|
||||
switch (cmd) {
|
||||
case STA_NOTIFY_SLEEP:
|
||||
mt76x2_mac_wcid_set_drop(dev, idx, true);
|
||||
mt76_stop_tx_queues(&dev->mt76, sta, true);
|
||||
break;
|
||||
case STA_NOTIFY_AWAKE:
|
||||
mt76x2_mac_wcid_set_drop(dev, idx, false);
|
||||
break;
|
||||
}
|
||||
mt76x2_mac_wcid_set_drop(dev, idx, ps);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -549,6 +543,12 @@ static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
|
|||
mutex_unlock(&dev->mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct ieee80211_ops mt76x2_ops = {
|
||||
.tx = mt76x2_tx,
|
||||
.start = mt76x2_start,
|
||||
|
@ -560,7 +560,6 @@ const struct ieee80211_ops mt76x2_ops = {
|
|||
.bss_info_changed = mt76x2_bss_info_changed,
|
||||
.sta_add = mt76x2_sta_add,
|
||||
.sta_remove = mt76x2_sta_remove,
|
||||
.sta_notify = mt76x2_sta_notify,
|
||||
.set_key = mt76x2_set_key,
|
||||
.conf_tx = mt76x2_conf_tx,
|
||||
.sw_scan_start = mt76x2_sw_scan,
|
||||
|
@ -573,5 +572,6 @@ const struct ieee80211_ops mt76x2_ops = {
|
|||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
.set_coverage_class = mt76x2_set_coverage_class,
|
||||
.get_survey = mt76_get_survey,
|
||||
.set_tim = mt76x2_set_tim,
|
||||
};
|
||||
|
||||
|
|
|
@ -1123,7 +1123,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
|
|||
}
|
||||
if (0 == tmp) {
|
||||
read_addr = REG_DBI_RDATA + addr % 4;
|
||||
ret = rtl_read_word(rtlpriv, read_addr);
|
||||
ret = rtl_read_byte(rtlpriv, read_addr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1165,7 +1165,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
|
|||
}
|
||||
|
||||
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
|
||||
_rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
|
||||
_rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
|
||||
ASPM_L1_LATENCY << 3);
|
||||
|
||||
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
|
||||
_rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
|
||||
|
|
|
@ -99,6 +99,7 @@
|
|||
#define RTL_USB_MAX_RX_COUNT 100
|
||||
#define QBSS_LOAD_SIZE 5
|
||||
#define MAX_WMMELE_LENGTH 64
|
||||
#define ASPM_L1_LATENCY 7
|
||||
|
||||
#define TOTAL_CAM_ENTRY 32
|
||||
|
||||
|
|
|
@ -591,6 +591,11 @@ struct qeth_cmd_buffer {
|
|||
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
|
||||
};
|
||||
|
||||
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
|
||||
{
|
||||
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* definition of a qeth channel, used for read and write
|
||||
*/
|
||||
|
@ -846,7 +851,7 @@ struct qeth_trap_id {
|
|||
*/
|
||||
static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
|
||||
{
|
||||
return PFN_UP(end - 1) - PFN_DOWN(start);
|
||||
return PFN_UP(end) - PFN_DOWN(start);
|
||||
}
|
||||
|
||||
static inline int qeth_get_micros(void)
|
||||
|
|
|
@ -2120,7 +2120,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|||
unsigned long flags;
|
||||
struct qeth_reply *reply = NULL;
|
||||
unsigned long timeout, event_timeout;
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_ipa_cmd *cmd = NULL;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "sendctl");
|
||||
|
||||
|
@ -2146,10 +2146,13 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|||
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
|
||||
qeth_prepare_control_data(card, len, iob);
|
||||
|
||||
if (IS_IPA(iob->data))
|
||||
if (IS_IPA(iob->data)) {
|
||||
cmd = __ipa_cmd(iob);
|
||||
event_timeout = QETH_IPA_TIMEOUT;
|
||||
else
|
||||
} else {
|
||||
event_timeout = QETH_TIMEOUT;
|
||||
}
|
||||
|
||||
timeout = jiffies + event_timeout;
|
||||
|
||||
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
||||
|
@ -2174,9 +2177,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|||
|
||||
/* we have only one long running ipassist, since we can ensure
|
||||
process context of this command we can sleep */
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
if ((cmd->hdr.command == IPA_CMD_SETIP) &&
|
||||
(cmd->hdr.prot_version == QETH_PROT_IPV4)) {
|
||||
if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
|
||||
cmd->hdr.prot_version == QETH_PROT_IPV4) {
|
||||
if (!wait_event_timeout(reply->wait_q,
|
||||
atomic_read(&reply->received), event_timeout))
|
||||
goto time_err;
|
||||
|
|
|
@ -31,7 +31,7 @@ config SSB_BLOCKIO
|
|||
|
||||
config SSB_PCIHOST_POSSIBLE
|
||||
bool
|
||||
depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
|
||||
depends on SSB && (PCI = y || PCI = SSB) && (PCI_DRIVERS_LEGACY || !MIPS)
|
||||
default y
|
||||
|
||||
config SSB_PCIHOST
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#ifdef __KERNEL__
|
||||
struct device;
|
||||
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
|
||||
unsigned char *arch_get_platform_get_mac_address(void);
|
||||
unsigned char *arch_get_platform_mac_address(void);
|
||||
u32 eth_get_headlen(void *data, unsigned int max_len);
|
||||
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
extern const struct header_ops eth_header_ops;
|
||||
|
|
|
@ -149,6 +149,8 @@
|
|||
#define PCI_VENDOR_ID_DYNALINK 0x0675
|
||||
#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702
|
||||
|
||||
#define PCI_VENDOR_ID_UBIQUITI 0x0777
|
||||
|
||||
#define PCI_VENDOR_ID_BERKOM 0x0871
|
||||
#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1
|
||||
#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2
|
||||
|
|
|
@ -464,9 +464,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
|
|||
__PTR_RING_PEEK_CALL_v; \
|
||||
})
|
||||
|
||||
/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
|
||||
* documentation for vmalloc for which of them are legal.
|
||||
*/
|
||||
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
|
||||
{
|
||||
return kcalloc(size, sizeof(void *), gfp);
|
||||
if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
|
||||
return NULL;
|
||||
return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
|
||||
}
|
||||
|
||||
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
|
||||
|
@ -601,7 +606,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
|
|||
spin_unlock(&(r)->producer_lock);
|
||||
spin_unlock_irqrestore(&(r)->consumer_lock, flags);
|
||||
|
||||
kfree(old);
|
||||
kvfree(old);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -641,7 +646,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
|
|||
}
|
||||
|
||||
for (i = 0; i < nrings; ++i)
|
||||
kfree(queues[i]);
|
||||
kvfree(queues[i]);
|
||||
|
||||
kfree(queues);
|
||||
|
||||
|
@ -649,7 +654,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
|
|||
|
||||
nomem:
|
||||
while (--i >= 0)
|
||||
kfree(queues[i]);
|
||||
kvfree(queues[i]);
|
||||
|
||||
kfree(queues);
|
||||
|
||||
|
@ -664,7 +669,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
|
|||
if (destroy)
|
||||
while ((ptr = ptr_ring_consume(r)))
|
||||
destroy(ptr);
|
||||
kfree(r->queue);
|
||||
kvfree(r->queue);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_PTR_RING_H */
|
||||
|
|
|
@ -213,11 +213,6 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
|
|||
return nf_ct_delete(ct, 0, 0);
|
||||
}
|
||||
|
||||
/* These are for NAT. Icky. */
|
||||
extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
|
||||
enum ip_conntrack_dir dir,
|
||||
u32 seq);
|
||||
|
||||
/* Set all unconfirmed conntrack as dying */
|
||||
void nf_ct_unconfirmed_destroy(struct net *);
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ struct nf_flowtable_type {
|
|||
struct list_head list;
|
||||
int family;
|
||||
void (*gc)(struct work_struct *work);
|
||||
void (*free)(struct nf_flowtable *ft);
|
||||
const struct rhashtable_params *params;
|
||||
nf_hookfn *hook;
|
||||
struct module *owner;
|
||||
|
@ -89,12 +90,15 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
|
|||
void flow_offload_free(struct flow_offload *flow);
|
||||
|
||||
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
|
||||
void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow);
|
||||
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
|
||||
struct flow_offload_tuple *tuple);
|
||||
int nf_flow_table_iterate(struct nf_flowtable *flow_table,
|
||||
void (*iter)(struct flow_offload *flow, void *data),
|
||||
void *data);
|
||||
|
||||
void nf_flow_table_cleanup(struct net *net, struct net_device *dev);
|
||||
|
||||
void nf_flow_table_free(struct nf_flowtable *flow_table);
|
||||
void nf_flow_offload_work_gc(struct work_struct *work);
|
||||
extern const struct rhashtable_params nf_flow_offload_rhash_params;
|
||||
|
||||
|
|
|
@ -1983,6 +1983,11 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
|
|||
#define TCP_ULP_MAX 128
|
||||
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
|
||||
|
||||
enum {
|
||||
TCP_ULP_TLS,
|
||||
TCP_ULP_BPF,
|
||||
};
|
||||
|
||||
struct tcp_ulp_ops {
|
||||
struct list_head list;
|
||||
|
||||
|
@ -1991,12 +1996,15 @@ struct tcp_ulp_ops {
|
|||
/* cleanup ulp */
|
||||
void (*release)(struct sock *sk);
|
||||
|
||||
int uid;
|
||||
char name[TCP_ULP_NAME_MAX];
|
||||
bool user_visible;
|
||||
struct module *owner;
|
||||
};
|
||||
int tcp_register_ulp(struct tcp_ulp_ops *type);
|
||||
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
|
||||
int tcp_set_ulp(struct sock *sk, const char *name);
|
||||
int tcp_set_ulp_id(struct sock *sk, const int ulp);
|
||||
void tcp_get_available_ulp(char *buf, size_t len);
|
||||
void tcp_cleanup_ulp(struct sock *sk);
|
||||
|
||||
|
|
|
@ -86,9 +86,10 @@ struct smap_psock {
|
|||
struct work_struct tx_work;
|
||||
struct work_struct gc_work;
|
||||
|
||||
struct proto *sk_proto;
|
||||
void (*save_close)(struct sock *sk, long timeout);
|
||||
void (*save_data_ready)(struct sock *sk);
|
||||
void (*save_write_space)(struct sock *sk);
|
||||
void (*save_state_change)(struct sock *sk);
|
||||
};
|
||||
|
||||
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
|
||||
|
@ -96,12 +97,102 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
|
|||
return rcu_dereference_sk_user_data(sk);
|
||||
}
|
||||
|
||||
static struct proto tcp_bpf_proto;
|
||||
static int bpf_tcp_init(struct sock *sk)
|
||||
{
|
||||
struct smap_psock *psock;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = smap_psock_sk(sk);
|
||||
if (unlikely(!psock)) {
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(psock->sk_proto)) {
|
||||
rcu_read_unlock();
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
psock->save_close = sk->sk_prot->close;
|
||||
psock->sk_proto = sk->sk_prot;
|
||||
sk->sk_prot = &tcp_bpf_proto;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_tcp_release(struct sock *sk)
|
||||
{
|
||||
struct smap_psock *psock;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = smap_psock_sk(sk);
|
||||
|
||||
if (likely(psock)) {
|
||||
sk->sk_prot = psock->sk_proto;
|
||||
psock->sk_proto = NULL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
|
||||
|
||||
static void bpf_tcp_close(struct sock *sk, long timeout)
|
||||
{
|
||||
void (*close_fun)(struct sock *sk, long timeout);
|
||||
struct smap_psock_map_entry *e, *tmp;
|
||||
struct smap_psock *psock;
|
||||
struct sock *osk;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = smap_psock_sk(sk);
|
||||
if (unlikely(!psock)) {
|
||||
rcu_read_unlock();
|
||||
return sk->sk_prot->close(sk, timeout);
|
||||
}
|
||||
|
||||
/* The psock may be destroyed anytime after exiting the RCU critial
|
||||
* section so by the time we use close_fun the psock may no longer
|
||||
* be valid. However, bpf_tcp_close is called with the sock lock
|
||||
* held so the close hook and sk are still valid.
|
||||
*/
|
||||
close_fun = psock->save_close;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
|
||||
osk = cmpxchg(e->entry, sk, NULL);
|
||||
if (osk == sk) {
|
||||
list_del(&e->list);
|
||||
smap_release_sock(psock, sk);
|
||||
}
|
||||
}
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
rcu_read_unlock();
|
||||
close_fun(sk, timeout);
|
||||
}
|
||||
|
||||
enum __sk_action {
|
||||
__SK_DROP = 0,
|
||||
__SK_PASS,
|
||||
__SK_REDIRECT,
|
||||
};
|
||||
|
||||
static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
|
||||
.name = "bpf_tcp",
|
||||
.uid = TCP_ULP_BPF,
|
||||
.user_visible = false,
|
||||
.owner = NULL,
|
||||
.init = bpf_tcp_init,
|
||||
.release = bpf_tcp_release,
|
||||
};
|
||||
|
||||
static int bpf_tcp_ulp_register(void)
|
||||
{
|
||||
tcp_bpf_proto = tcp_prot;
|
||||
tcp_bpf_proto.close = bpf_tcp_close;
|
||||
return tcp_register_ulp(&bpf_tcp_ulp_ops);
|
||||
}
|
||||
|
||||
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
|
||||
{
|
||||
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
|
||||
|
@ -166,68 +257,6 @@ static void smap_report_sk_error(struct smap_psock *psock, int err)
|
|||
sk->sk_error_report(sk);
|
||||
}
|
||||
|
||||
static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
|
||||
|
||||
/* Called with lock_sock(sk) held */
|
||||
static void smap_state_change(struct sock *sk)
|
||||
{
|
||||
struct smap_psock_map_entry *e, *tmp;
|
||||
struct smap_psock *psock;
|
||||
struct socket_wq *wq;
|
||||
struct sock *osk;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* Allowing transitions into an established syn_recv states allows
|
||||
* for early binding sockets to a smap object before the connection
|
||||
* is established.
|
||||
*/
|
||||
switch (sk->sk_state) {
|
||||
case TCP_SYN_SENT:
|
||||
case TCP_SYN_RECV:
|
||||
case TCP_ESTABLISHED:
|
||||
break;
|
||||
case TCP_CLOSE_WAIT:
|
||||
case TCP_CLOSING:
|
||||
case TCP_LAST_ACK:
|
||||
case TCP_FIN_WAIT1:
|
||||
case TCP_FIN_WAIT2:
|
||||
case TCP_LISTEN:
|
||||
break;
|
||||
case TCP_CLOSE:
|
||||
/* Only release if the map entry is in fact the sock in
|
||||
* question. There is a case where the operator deletes
|
||||
* the sock from the map, but the TCP sock is closed before
|
||||
* the psock is detached. Use cmpxchg to verify correct
|
||||
* sock is removed.
|
||||
*/
|
||||
psock = smap_psock_sk(sk);
|
||||
if (unlikely(!psock))
|
||||
break;
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
|
||||
osk = cmpxchg(e->entry, sk, NULL);
|
||||
if (osk == sk) {
|
||||
list_del(&e->list);
|
||||
smap_release_sock(psock, sk);
|
||||
}
|
||||
}
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
break;
|
||||
default:
|
||||
psock = smap_psock_sk(sk);
|
||||
if (unlikely(!psock))
|
||||
break;
|
||||
smap_report_sk_error(psock, EPIPE);
|
||||
break;
|
||||
}
|
||||
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_all(&wq->wait);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void smap_read_sock_strparser(struct strparser *strp,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
@ -322,10 +351,8 @@ static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
|
|||
return;
|
||||
sk->sk_data_ready = psock->save_data_ready;
|
||||
sk->sk_write_space = psock->save_write_space;
|
||||
sk->sk_state_change = psock->save_state_change;
|
||||
psock->save_data_ready = NULL;
|
||||
psock->save_write_space = NULL;
|
||||
psock->save_state_change = NULL;
|
||||
strp_stop(&psock->strp);
|
||||
psock->strp_enabled = false;
|
||||
}
|
||||
|
@ -350,6 +377,7 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
|
|||
if (psock->refcnt)
|
||||
return;
|
||||
|
||||
tcp_cleanup_ulp(sock);
|
||||
smap_stop_sock(psock, sock);
|
||||
clear_bit(SMAP_TX_RUNNING, &psock->state);
|
||||
rcu_assign_sk_user_data(sock, NULL);
|
||||
|
@ -427,10 +455,8 @@ static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
|
|||
return;
|
||||
psock->save_data_ready = sk->sk_data_ready;
|
||||
psock->save_write_space = sk->sk_write_space;
|
||||
psock->save_state_change = sk->sk_state_change;
|
||||
sk->sk_data_ready = smap_data_ready;
|
||||
sk->sk_write_space = smap_write_space;
|
||||
sk->sk_state_change = smap_state_change;
|
||||
psock->strp_enabled = true;
|
||||
}
|
||||
|
||||
|
@ -509,6 +535,10 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
|||
if (attr->value_size > KMALLOC_MAX_SIZE)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
err = bpf_tcp_ulp_register();
|
||||
if (err && err != -EEXIST)
|
||||
return ERR_PTR(err);
|
||||
|
||||
stab = kzalloc(sizeof(*stab), GFP_USER);
|
||||
if (!stab)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -590,11 +620,6 @@ static void sock_map_free(struct bpf_map *map)
|
|||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (stab->bpf_verdict)
|
||||
bpf_prog_put(stab->bpf_verdict);
|
||||
if (stab->bpf_parse)
|
||||
bpf_prog_put(stab->bpf_parse);
|
||||
|
||||
sock_map_remove_complete(stab);
|
||||
}
|
||||
|
||||
|
@ -754,6 +779,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
|||
goto out_progs;
|
||||
}
|
||||
|
||||
err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
|
||||
if (err)
|
||||
goto out_progs;
|
||||
|
||||
set_bit(SMAP_TX_RUNNING, &psock->state);
|
||||
}
|
||||
|
||||
|
@ -866,6 +895,19 @@ static int sock_map_update_elem(struct bpf_map *map,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void sock_map_release(struct bpf_map *map, struct file *map_file)
|
||||
{
|
||||
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
|
||||
struct bpf_prog *orig;
|
||||
|
||||
orig = xchg(&stab->bpf_parse, NULL);
|
||||
if (orig)
|
||||
bpf_prog_put(orig);
|
||||
orig = xchg(&stab->bpf_verdict, NULL);
|
||||
if (orig)
|
||||
bpf_prog_put(orig);
|
||||
}
|
||||
|
||||
const struct bpf_map_ops sock_map_ops = {
|
||||
.map_alloc = sock_map_alloc,
|
||||
.map_free = sock_map_free,
|
||||
|
@ -873,6 +915,7 @@ const struct bpf_map_ops sock_map_ops = {
|
|||
.map_get_next_key = sock_map_get_next_key,
|
||||
.map_update_elem = sock_map_update_elem,
|
||||
.map_delete_elem = sock_map_delete_elem,
|
||||
.map_release = sock_map_release,
|
||||
};
|
||||
|
||||
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
|
|
|
@ -83,6 +83,7 @@ struct bpf_test {
|
|||
__u32 result;
|
||||
} test[MAX_SUBTESTS];
|
||||
int (*fill_helper)(struct bpf_test *self);
|
||||
int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
|
||||
__u8 frag_data[MAX_DATA];
|
||||
int stack_depth; /* for eBPF only, since tests don't call verifier */
|
||||
};
|
||||
|
@ -2026,7 +2027,9 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
{ },
|
||||
{ }
|
||||
{ },
|
||||
.fill_helper = NULL,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{
|
||||
"check: div_k_0",
|
||||
|
@ -2036,7 +2039,9 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
{ },
|
||||
{ }
|
||||
{ },
|
||||
.fill_helper = NULL,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{
|
||||
"check: unknown insn",
|
||||
|
@ -2047,7 +2052,9 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
CLASSIC | FLAG_EXPECTED_FAIL,
|
||||
{ },
|
||||
{ }
|
||||
{ },
|
||||
.fill_helper = NULL,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{
|
||||
"check: out of range spill/fill",
|
||||
|
@ -2057,7 +2064,9 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
{ },
|
||||
{ }
|
||||
{ },
|
||||
.fill_helper = NULL,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{
|
||||
"JUMPS + HOLES",
|
||||
|
@ -2149,6 +2158,8 @@ static struct bpf_test tests[] = {
|
|||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
{ },
|
||||
{ },
|
||||
.fill_helper = NULL,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{
|
||||
"check: LDX + RET X",
|
||||
|
@ -2159,6 +2170,8 @@ static struct bpf_test tests[] = {
|
|||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
{ },
|
||||
{ },
|
||||
.fill_helper = NULL,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{ /* Mainly checking JIT here. */
|
||||
"M[]: alt STX + LDX",
|
||||
|
@ -2333,6 +2346,8 @@ static struct bpf_test tests[] = {
|
|||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
{ },
|
||||
{ },
|
||||
.fill_helper = NULL,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{ /* Passes checker but fails during runtime. */
|
||||
"LD [SKF_AD_OFF-1]",
|
||||
|
@ -5395,6 +5410,7 @@ static struct bpf_test tests[] = {
|
|||
{ },
|
||||
{ },
|
||||
.fill_helper = bpf_fill_maxinsns4,
|
||||
.expected_errcode = -EINVAL,
|
||||
},
|
||||
{ /* Mainly checking JIT here. */
|
||||
"BPF_MAXINSNS: Very long jump",
|
||||
|
@ -5450,10 +5466,15 @@ static struct bpf_test tests[] = {
|
|||
{
|
||||
"BPF_MAXINSNS: Jump, gap, jump, ...",
|
||||
{ },
|
||||
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
|
||||
#else
|
||||
CLASSIC | FLAG_NO_DATA,
|
||||
#endif
|
||||
{ },
|
||||
{ { 0, 0xababcbac } },
|
||||
.fill_helper = bpf_fill_maxinsns11,
|
||||
.expected_errcode = -ENOTSUPP,
|
||||
},
|
||||
{
|
||||
"BPF_MAXINSNS: ld_abs+get_processor_id",
|
||||
|
@ -6344,7 +6365,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
|
|||
|
||||
*err = bpf_prog_create(&fp, &fprog);
|
||||
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
|
||||
if (*err == -EINVAL) {
|
||||
if (*err == tests[which].expected_errcode) {
|
||||
pr_cont("PASS\n");
|
||||
/* Verifier rejected filter as expected. */
|
||||
*err = 0;
|
||||
|
|
|
@ -1951,6 +1951,38 @@ static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
|
|||
return net;
|
||||
}
|
||||
|
||||
/* Verify that rtnetlink requests do not pass additional properties
|
||||
* potentially referring to different network namespaces.
|
||||
*/
|
||||
static int rtnl_ensure_unique_netns(struct nlattr *tb[],
|
||||
struct netlink_ext_ack *extack,
|
||||
bool netns_id_only)
|
||||
{
|
||||
|
||||
if (netns_id_only) {
|
||||
if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
|
||||
return 0;
|
||||
|
||||
NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
|
||||
goto invalid_attr;
|
||||
|
||||
if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD]))
|
||||
goto invalid_attr;
|
||||
|
||||
if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID]))
|
||||
goto invalid_attr;
|
||||
|
||||
return 0;
|
||||
|
||||
invalid_attr:
|
||||
NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
|
||||
{
|
||||
if (dev) {
|
||||
|
@ -2553,6 +2585,10 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
err = rtnl_ensure_unique_netns(tb, extack, false);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
if (tb[IFLA_IFNAME])
|
||||
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
|
||||
else
|
||||
|
@ -2649,6 +2685,10 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = rtnl_ensure_unique_netns(tb, extack, true);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[IFLA_IFNAME])
|
||||
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
|
||||
|
||||
|
@ -2802,6 +2842,10 @@ replay:
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = rtnl_ensure_unique_netns(tb, extack, false);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[IFLA_IFNAME])
|
||||
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
|
||||
else
|
||||
|
@ -3045,6 +3089,10 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = rtnl_ensure_unique_netns(tb, extack, true);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[IFLA_IF_NETNSID]) {
|
||||
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
|
||||
tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
|
||||
|
|
|
@ -3894,10 +3894,12 @@ EXPORT_SYMBOL_GPL(skb_gro_receive);
|
|||
|
||||
void __init skb_init(void)
|
||||
{
|
||||
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
|
||||
skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
|
||||
sizeof(struct sk_buff),
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
|
||||
offsetof(struct sk_buff, cb),
|
||||
sizeof_field(struct sk_buff, cb),
|
||||
NULL);
|
||||
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
|
||||
sizeof(struct sk_buff_fclones),
|
||||
|
|
|
@ -80,8 +80,7 @@ endif # NF_TABLES
|
|||
|
||||
config NF_FLOW_TABLE_IPV4
|
||||
tristate "Netfilter flow table IPv4 module"
|
||||
depends on NF_CONNTRACK && NF_TABLES
|
||||
select NF_FLOW_TABLE
|
||||
depends on NF_FLOW_TABLE
|
||||
help
|
||||
This option adds the flow table IPv4 support.
|
||||
|
||||
|
|
|
@ -260,6 +260,7 @@ static struct nf_flowtable_type flowtable_ipv4 = {
|
|||
.family = NFPROTO_IPV4,
|
||||
.params = &nf_flow_offload_rhash_params,
|
||||
.gc = nf_flow_offload_work_gc,
|
||||
.free = nf_flow_table_free,
|
||||
.hook = nf_flow_offload_ip_hook,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
|
|
@ -705,7 +705,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
if (sk) {
|
||||
arg.bound_dev_if = sk->sk_bound_dev_if;
|
||||
trace_tcp_send_reset(sk, skb);
|
||||
if (sk_fullsock(sk))
|
||||
trace_tcp_send_reset(sk, skb);
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
|
||||
|
|
|
@ -29,6 +29,18 @@ static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct tcp_ulp_ops *tcp_ulp_find_id(const int ulp)
|
||||
{
|
||||
struct tcp_ulp_ops *e;
|
||||
|
||||
list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
|
||||
if (e->uid == ulp)
|
||||
return e;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
|
||||
{
|
||||
const struct tcp_ulp_ops *ulp = NULL;
|
||||
|
@ -51,6 +63,18 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
|
|||
return ulp;
|
||||
}
|
||||
|
||||
static const struct tcp_ulp_ops *__tcp_ulp_lookup(const int uid)
|
||||
{
|
||||
const struct tcp_ulp_ops *ulp;
|
||||
|
||||
rcu_read_lock();
|
||||
ulp = tcp_ulp_find_id(uid);
|
||||
if (!ulp || !try_module_get(ulp->owner))
|
||||
ulp = NULL;
|
||||
rcu_read_unlock();
|
||||
return ulp;
|
||||
}
|
||||
|
||||
/* Attach new upper layer protocol to the list
|
||||
* of available protocols.
|
||||
*/
|
||||
|
@ -59,13 +83,10 @@ int tcp_register_ulp(struct tcp_ulp_ops *ulp)
|
|||
int ret = 0;
|
||||
|
||||
spin_lock(&tcp_ulp_list_lock);
|
||||
if (tcp_ulp_find(ulp->name)) {
|
||||
pr_notice("%s already registered or non-unique name\n",
|
||||
ulp->name);
|
||||
if (tcp_ulp_find(ulp->name))
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
else
|
||||
list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
|
||||
}
|
||||
spin_unlock(&tcp_ulp_list_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -124,6 +145,34 @@ int tcp_set_ulp(struct sock *sk, const char *name)
|
|||
if (!ulp_ops)
|
||||
return -ENOENT;
|
||||
|
||||
if (!ulp_ops->user_visible) {
|
||||
module_put(ulp_ops->owner);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
err = ulp_ops->init(sk);
|
||||
if (err) {
|
||||
module_put(ulp_ops->owner);
|
||||
return err;
|
||||
}
|
||||
|
||||
icsk->icsk_ulp_ops = ulp_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tcp_set_ulp_id(struct sock *sk, int ulp)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
const struct tcp_ulp_ops *ulp_ops;
|
||||
int err;
|
||||
|
||||
if (icsk->icsk_ulp_ops)
|
||||
return -EEXIST;
|
||||
|
||||
ulp_ops = __tcp_ulp_lookup(ulp);
|
||||
if (!ulp_ops)
|
||||
return -ENOENT;
|
||||
|
||||
err = ulp_ops->init(sk);
|
||||
if (err) {
|
||||
module_put(ulp_ops->owner);
|
||||
|
|
|
@ -73,8 +73,7 @@ endif # NF_TABLES
|
|||
|
||||
config NF_FLOW_TABLE_IPV6
|
||||
tristate "Netfilter flow table IPv6 module"
|
||||
depends on NF_CONNTRACK && NF_TABLES
|
||||
select NF_FLOW_TABLE
|
||||
depends on NF_FLOW_TABLE
|
||||
help
|
||||
This option adds the flow table IPv6 support.
|
||||
|
||||
|
|
|
@ -264,6 +264,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
* this case. -DaveM
|
||||
*/
|
||||
pr_debug("end of fragment not rounded to 8 bytes.\n");
|
||||
inet_frag_kill(&fq->q, &nf_frags);
|
||||
return -EPROTO;
|
||||
}
|
||||
if (end > fq->q.len) {
|
||||
|
|
|
@ -253,6 +253,7 @@ static struct nf_flowtable_type flowtable_ipv6 = {
|
|||
.family = NFPROTO_IPV6,
|
||||
.params = &nf_flow_offload_rhash_params,
|
||||
.gc = nf_flow_offload_work_gc,
|
||||
.free = nf_flow_table_free,
|
||||
.hook = nf_flow_offload_ipv6_hook,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
|
|
@ -2479,7 +2479,7 @@ static int ip6_route_check_nh_onlink(struct net *net,
|
|||
struct net_device *dev,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
|
||||
u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
|
||||
const struct in6_addr *gw_addr = &cfg->fc_gateway;
|
||||
u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
|
||||
struct rt6_info *grt;
|
||||
|
@ -2488,8 +2488,10 @@ static int ip6_route_check_nh_onlink(struct net *net,
|
|||
err = 0;
|
||||
grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
|
||||
if (grt) {
|
||||
if (grt->rt6i_flags & flags || dev != grt->dst.dev) {
|
||||
NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
|
||||
if (!grt->dst.error &&
|
||||
(grt->rt6i_flags & flags || dev != grt->dst.dev)) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Nexthop has invalid gateway or device mismatch");
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -942,7 +942,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
if (sk) {
|
||||
oif = sk->sk_bound_dev_if;
|
||||
trace_tcp_send_reset(sk, skb);
|
||||
if (sk_fullsock(sk))
|
||||
trace_tcp_send_reset(sk, skb);
|
||||
}
|
||||
|
||||
tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/ipv6.h>
|
||||
#include <linux/mpls.h>
|
||||
#include <linux/netconf.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <net/ip.h>
|
||||
|
@ -935,24 +936,27 @@ errout:
|
|||
return err;
|
||||
}
|
||||
|
||||
static bool mpls_label_ok(struct net *net, unsigned int index,
|
||||
static bool mpls_label_ok(struct net *net, unsigned int *index,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
bool is_ok = true;
|
||||
|
||||
/* Reserved labels may not be set */
|
||||
if (index < MPLS_LABEL_FIRST_UNRESERVED) {
|
||||
if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
|
||||
return false;
|
||||
is_ok = false;
|
||||
}
|
||||
|
||||
/* The full 20 bit range may not be supported. */
|
||||
if (index >= net->mpls.platform_labels) {
|
||||
if (is_ok && *index >= net->mpls.platform_labels) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"Label >= configured maximum in platform_labels");
|
||||
return false;
|
||||
is_ok = false;
|
||||
}
|
||||
|
||||
return true;
|
||||
*index = array_index_nospec(*index, net->mpls.platform_labels);
|
||||
return is_ok;
|
||||
}
|
||||
|
||||
static int mpls_route_add(struct mpls_route_config *cfg,
|
||||
|
@ -975,7 +979,7 @@ static int mpls_route_add(struct mpls_route_config *cfg,
|
|||
index = find_free_label(net);
|
||||
}
|
||||
|
||||
if (!mpls_label_ok(net, index, extack))
|
||||
if (!mpls_label_ok(net, &index, extack))
|
||||
goto errout;
|
||||
|
||||
/* Append makes no sense with mpls */
|
||||
|
@ -1052,7 +1056,7 @@ static int mpls_route_del(struct mpls_route_config *cfg,
|
|||
|
||||
index = cfg->rc_label;
|
||||
|
||||
if (!mpls_label_ok(net, index, extack))
|
||||
if (!mpls_label_ok(net, &index, extack))
|
||||
goto errout;
|
||||
|
||||
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
|
||||
|
@ -1810,7 +1814,7 @@ static int rtm_to_route_config(struct sk_buff *skb,
|
|||
goto errout;
|
||||
|
||||
if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
|
||||
cfg->rc_label, extack))
|
||||
&cfg->rc_label, extack))
|
||||
goto errout;
|
||||
break;
|
||||
}
|
||||
|
@ -2137,7 +2141,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
|
|||
goto errout;
|
||||
}
|
||||
|
||||
if (!mpls_label_ok(net, in_label, extack)) {
|
||||
if (!mpls_label_ok(net, &in_label, extack)) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
|
|
|
@ -666,8 +666,8 @@ endif # NF_TABLES
|
|||
|
||||
config NF_FLOW_TABLE_INET
|
||||
tristate "Netfilter flow table mixed IPv4/IPv6 module"
|
||||
depends on NF_FLOW_TABLE_IPV4 && NF_FLOW_TABLE_IPV6
|
||||
select NF_FLOW_TABLE
|
||||
depends on NF_FLOW_TABLE_IPV4
|
||||
depends on NF_FLOW_TABLE_IPV6
|
||||
help
|
||||
This option adds the flow table mixed IPv4/IPv6 support.
|
||||
|
||||
|
@ -675,7 +675,9 @@ config NF_FLOW_TABLE_INET
|
|||
|
||||
config NF_FLOW_TABLE
|
||||
tristate "Netfilter flow table module"
|
||||
depends on NF_CONNTRACK && NF_TABLES
|
||||
depends on NETFILTER_INGRESS
|
||||
depends on NF_CONNTRACK
|
||||
depends on NF_TABLES
|
||||
help
|
||||
This option adds the flow table core infrastructure.
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/netfilter.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
|
@ -124,7 +125,9 @@ void flow_offload_free(struct flow_offload *flow)
|
|||
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
|
||||
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
|
||||
e = container_of(flow, struct flow_offload_entry, flow);
|
||||
kfree(e);
|
||||
nf_ct_delete(e->ct, 0, 0);
|
||||
nf_ct_put(e->ct);
|
||||
kfree_rcu(e, rcu_head);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flow_offload_free);
|
||||
|
||||
|
@ -148,11 +151,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(flow_offload_add);
|
||||
|
||||
void flow_offload_del(struct nf_flowtable *flow_table,
|
||||
struct flow_offload *flow)
|
||||
static void flow_offload_del(struct nf_flowtable *flow_table,
|
||||
struct flow_offload *flow)
|
||||
{
|
||||
struct flow_offload_entry *e;
|
||||
|
||||
rhashtable_remove_fast(&flow_table->rhashtable,
|
||||
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
|
||||
*flow_table->type->params);
|
||||
|
@ -160,10 +161,8 @@ void flow_offload_del(struct nf_flowtable *flow_table,
|
|||
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
|
||||
*flow_table->type->params);
|
||||
|
||||
e = container_of(flow, struct flow_offload_entry, flow);
|
||||
kfree_rcu(e, rcu_head);
|
||||
flow_offload_free(flow);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flow_offload_del);
|
||||
|
||||
struct flow_offload_tuple_rhash *
|
||||
flow_offload_lookup(struct nf_flowtable *flow_table,
|
||||
|
@ -174,15 +173,6 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(flow_offload_lookup);
|
||||
|
||||
static void nf_flow_release_ct(const struct flow_offload *flow)
|
||||
{
|
||||
struct flow_offload_entry *e;
|
||||
|
||||
e = container_of(flow, struct flow_offload_entry, flow);
|
||||
nf_ct_delete(e->ct, 0, 0);
|
||||
nf_ct_put(e->ct);
|
||||
}
|
||||
|
||||
int nf_flow_table_iterate(struct nf_flowtable *flow_table,
|
||||
void (*iter)(struct flow_offload *flow, void *data),
|
||||
void *data)
|
||||
|
@ -231,19 +221,16 @@ static inline bool nf_flow_is_dying(const struct flow_offload *flow)
|
|||
return flow->flags & FLOW_OFFLOAD_DYING;
|
||||
}
|
||||
|
||||
void nf_flow_offload_work_gc(struct work_struct *work)
|
||||
static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
|
||||
{
|
||||
struct flow_offload_tuple_rhash *tuplehash;
|
||||
struct nf_flowtable *flow_table;
|
||||
struct rhashtable_iter hti;
|
||||
struct flow_offload *flow;
|
||||
int err;
|
||||
|
||||
flow_table = container_of(work, struct nf_flowtable, gc_work.work);
|
||||
|
||||
err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
|
||||
if (err)
|
||||
goto schedule;
|
||||
return 0;
|
||||
|
||||
rhashtable_walk_start(&hti);
|
||||
|
||||
|
@ -261,15 +248,22 @@ void nf_flow_offload_work_gc(struct work_struct *work)
|
|||
flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
|
||||
|
||||
if (nf_flow_has_expired(flow) ||
|
||||
nf_flow_is_dying(flow)) {
|
||||
nf_flow_is_dying(flow))
|
||||
flow_offload_del(flow_table, flow);
|
||||
nf_flow_release_ct(flow);
|
||||
}
|
||||
}
|
||||
out:
|
||||
rhashtable_walk_stop(&hti);
|
||||
rhashtable_walk_exit(&hti);
|
||||
schedule:
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void nf_flow_offload_work_gc(struct work_struct *work)
|
||||
{
|
||||
struct nf_flowtable *flow_table;
|
||||
|
||||
flow_table = container_of(work, struct nf_flowtable, gc_work.work);
|
||||
nf_flow_offload_gc_step(flow_table);
|
||||
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
|
||||
|
@ -425,5 +419,35 @@ int nf_flow_dnat_port(const struct flow_offload *flow,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
|
||||
|
||||
static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
|
||||
{
|
||||
struct net_device *dev = data;
|
||||
|
||||
if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
|
||||
return;
|
||||
|
||||
flow_offload_dead(flow);
|
||||
}
|
||||
|
||||
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
|
||||
void *data)
|
||||
{
|
||||
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, data);
|
||||
flush_delayed_work(&flowtable->gc_work);
|
||||
}
|
||||
|
||||
void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
|
||||
{
|
||||
nft_flow_table_iterate(net, nf_flow_table_iterate_cleanup, dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
|
||||
|
||||
void nf_flow_table_free(struct nf_flowtable *flow_table)
|
||||
{
|
||||
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
|
||||
WARN_ON(!nf_flow_offload_gc_step(flow_table));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_table_free);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
|
||||
|
|
|
@ -24,6 +24,7 @@ static struct nf_flowtable_type flowtable_inet = {
|
|||
.family = NFPROTO_INET,
|
||||
.params = &nf_flow_offload_rhash_params,
|
||||
.gc = nf_flow_offload_work_gc,
|
||||
.free = nf_flow_table_free,
|
||||
.hook = nf_flow_offload_inet_hook,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
|
|
@ -5006,13 +5006,13 @@ void nft_flow_table_iterate(struct net *net,
|
|||
struct nft_flowtable *flowtable;
|
||||
const struct nft_table *table;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(table, &net->nft.tables, list) {
|
||||
list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
list_for_each_entry(table, &net->nft.tables, list) {
|
||||
list_for_each_entry(flowtable, &table->flowtables, list) {
|
||||
iter(&flowtable->data, data);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
|
||||
|
||||
|
@ -5399,17 +5399,12 @@ err:
|
|||
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
|
||||
}
|
||||
|
||||
static void nft_flowtable_destroy(void *ptr, void *arg)
|
||||
{
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
|
||||
{
|
||||
cancel_delayed_work_sync(&flowtable->data.gc_work);
|
||||
kfree(flowtable->name);
|
||||
rhashtable_free_and_destroy(&flowtable->data.rhashtable,
|
||||
nft_flowtable_destroy, NULL);
|
||||
flowtable->data.type->free(&flowtable->data);
|
||||
rhashtable_destroy(&flowtable->data.rhashtable);
|
||||
module_put(flowtable->data.type->owner);
|
||||
}
|
||||
|
||||
|
|
|
@ -194,22 +194,6 @@ static struct nft_expr_type nft_flow_offload_type __read_mostly = {
|
|||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static void flow_offload_iterate_cleanup(struct flow_offload *flow, void *data)
|
||||
{
|
||||
struct net_device *dev = data;
|
||||
|
||||
if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
|
||||
return;
|
||||
|
||||
flow_offload_dead(flow);
|
||||
}
|
||||
|
||||
static void nft_flow_offload_iterate_cleanup(struct nf_flowtable *flowtable,
|
||||
void *data)
|
||||
{
|
||||
nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data);
|
||||
}
|
||||
|
||||
static int flow_offload_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
|
@ -218,7 +202,7 @@ static int flow_offload_netdev_event(struct notifier_block *this,
|
|||
if (event != NETDEV_DOWN)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
nft_flow_table_iterate(dev_net(dev), nft_flow_offload_iterate_cleanup, dev);
|
||||
nf_flow_table_cleanup(dev_net(dev), dev);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -246,14 +230,8 @@ register_expr:
|
|||
|
||||
static void __exit nft_flow_offload_module_exit(void)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
nft_unregister_expr(&nft_flow_offload_type);
|
||||
unregister_netdevice_notifier(&flow_offload_netdev_notifier);
|
||||
rtnl_lock();
|
||||
for_each_net(net)
|
||||
nft_flow_table_iterate(net, nft_flow_offload_iterate_cleanup, NULL);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
module_init(nft_flow_offload_module_init);
|
||||
|
|
|
@ -1008,7 +1008,12 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
|||
if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
|
||||
return NULL;
|
||||
|
||||
info = kvmalloc(sz, GFP_KERNEL);
|
||||
/* __GFP_NORETRY is not fully supported by kvmalloc but it should
|
||||
* work reasonably well if sz is too large and bail out rather
|
||||
* than shoot all processes down before realizing there is nothing
|
||||
* more to reclaim.
|
||||
*/
|
||||
info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
|
||||
if (!info)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -39,23 +39,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
|
|||
hlist_add_head(&est->list, &rateest_hash[h]);
|
||||
}
|
||||
|
||||
struct xt_rateest *xt_rateest_lookup(const char *name)
|
||||
static struct xt_rateest *__xt_rateest_lookup(const char *name)
|
||||
{
|
||||
struct xt_rateest *est;
|
||||
unsigned int h;
|
||||
|
||||
h = xt_rateest_hash(name);
|
||||
mutex_lock(&xt_rateest_mutex);
|
||||
hlist_for_each_entry(est, &rateest_hash[h], list) {
|
||||
if (strcmp(est->name, name) == 0) {
|
||||
est->refcnt++;
|
||||
mutex_unlock(&xt_rateest_mutex);
|
||||
return est;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&xt_rateest_mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct xt_rateest *xt_rateest_lookup(const char *name)
|
||||
{
|
||||
struct xt_rateest *est;
|
||||
|
||||
mutex_lock(&xt_rateest_mutex);
|
||||
est = __xt_rateest_lookup(name);
|
||||
mutex_unlock(&xt_rateest_mutex);
|
||||
return est;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_rateest_lookup);
|
||||
|
||||
void xt_rateest_put(struct xt_rateest *est)
|
||||
|
@ -100,8 +108,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
|
|||
|
||||
net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
|
||||
|
||||
est = xt_rateest_lookup(info->name);
|
||||
mutex_lock(&xt_rateest_mutex);
|
||||
est = __xt_rateest_lookup(info->name);
|
||||
if (est) {
|
||||
mutex_unlock(&xt_rateest_mutex);
|
||||
/*
|
||||
* If estimator parameters are specified, they must match the
|
||||
* existing estimator.
|
||||
|
@ -139,11 +149,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
|
|||
|
||||
info->est = est;
|
||||
xt_rateest_hash_insert(est);
|
||||
mutex_unlock(&xt_rateest_mutex);
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
kfree(est);
|
||||
err1:
|
||||
mutex_unlock(&xt_rateest_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
info->priv = NULL;
|
||||
if (info->has_path) {
|
||||
cgrp = cgroup_get_from_path(info->path);
|
||||
if (IS_ERR(cgrp)) {
|
||||
|
|
|
@ -1081,6 +1081,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
|
|||
{
|
||||
struct sk_buff *tmp;
|
||||
struct net *net, *prev = NULL;
|
||||
bool delivered = false;
|
||||
int err;
|
||||
|
||||
for_each_net_rcu(net) {
|
||||
|
@ -1092,14 +1093,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
|
|||
}
|
||||
err = nlmsg_multicast(prev->genl_sock, tmp,
|
||||
portid, group, flags);
|
||||
if (err)
|
||||
if (!err)
|
||||
delivered = true;
|
||||
else if (err != -ESRCH)
|
||||
goto error;
|
||||
}
|
||||
|
||||
prev = net;
|
||||
}
|
||||
|
||||
return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
|
||||
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
|
||||
if (!err)
|
||||
delivered = true;
|
||||
else if (err != -ESRCH)
|
||||
goto error;
|
||||
return delivered ? 0 : -ESRCH;
|
||||
error:
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
|
|
|
@ -223,7 +223,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
|
|||
|
||||
rcu_read_lock();
|
||||
if (!test_and_set_bit(0, &conn->c_map_queued) &&
|
||||
!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
|
||||
!rds_destroy_pending(cp->cp_conn)) {
|
||||
rds_stats_inc(s_cong_update_queued);
|
||||
/* We cannot inline the call to rds_send_xmit() here
|
||||
* for two reasons (both pertaining to a TCP transport):
|
||||
|
|
|
@ -220,8 +220,13 @@ static struct rds_connection *__rds_conn_create(struct net *net,
|
|||
is_outgoing);
|
||||
conn->c_path[i].cp_index = i;
|
||||
}
|
||||
ret = trans->conn_alloc(conn, gfp);
|
||||
rcu_read_lock();
|
||||
if (rds_destroy_pending(conn))
|
||||
ret = -ENETDOWN;
|
||||
else
|
||||
ret = trans->conn_alloc(conn, gfp);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
kfree(conn->c_path);
|
||||
kmem_cache_free(rds_conn_slab, conn);
|
||||
conn = ERR_PTR(ret);
|
||||
|
@ -283,6 +288,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
|
|||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&rds_conn_lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return conn;
|
||||
|
@ -382,13 +388,10 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
|
|||
{
|
||||
struct rds_message *rm, *rtmp;
|
||||
|
||||
set_bit(RDS_DESTROY_PENDING, &cp->cp_flags);
|
||||
|
||||
if (!cp->cp_transport_data)
|
||||
return;
|
||||
|
||||
/* make sure lingering queued work won't try to ref the conn */
|
||||
synchronize_rcu();
|
||||
cancel_delayed_work_sync(&cp->cp_send_w);
|
||||
cancel_delayed_work_sync(&cp->cp_recv_w);
|
||||
|
||||
|
@ -691,7 +694,7 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
|
|||
atomic_set(&cp->cp_state, RDS_CONN_ERROR);
|
||||
|
||||
rcu_read_lock();
|
||||
if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
|
||||
if (!destroy && rds_destroy_pending(cp->cp_conn)) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
@ -714,7 +717,7 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
|
|||
void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
|
||||
{
|
||||
rcu_read_lock();
|
||||
if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
|
||||
if (rds_destroy_pending(cp->cp_conn)) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
|
17
net/rds/ib.c
17
net/rds/ib.c
|
@ -48,6 +48,7 @@
|
|||
static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
|
||||
static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
|
||||
unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
|
||||
static atomic_t rds_ib_unloading;
|
||||
|
||||
module_param(rds_ib_mr_1m_pool_size, int, 0444);
|
||||
MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
|
||||
|
@ -378,8 +379,23 @@ static void rds_ib_unregister_client(void)
|
|||
flush_workqueue(rds_wq);
|
||||
}
|
||||
|
||||
static void rds_ib_set_unloading(void)
|
||||
{
|
||||
atomic_set(&rds_ib_unloading, 1);
|
||||
}
|
||||
|
||||
static bool rds_ib_is_unloading(struct rds_connection *conn)
|
||||
{
|
||||
struct rds_conn_path *cp = &conn->c_path[0];
|
||||
|
||||
return (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags) ||
|
||||
atomic_read(&rds_ib_unloading) != 0);
|
||||
}
|
||||
|
||||
void rds_ib_exit(void)
|
||||
{
|
||||
rds_ib_set_unloading();
|
||||
synchronize_rcu();
|
||||
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
|
||||
rds_ib_unregister_client();
|
||||
rds_ib_destroy_nodev_conns();
|
||||
|
@ -413,6 +429,7 @@ struct rds_transport rds_ib_transport = {
|
|||
.flush_mrs = rds_ib_flush_mrs,
|
||||
.t_owner = THIS_MODULE,
|
||||
.t_name = "infiniband",
|
||||
.t_unloading = rds_ib_is_unloading,
|
||||
.t_type = RDS_TRANS_IB
|
||||
};
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
|
|||
&conn->c_laddr, &conn->c_faddr,
|
||||
RDS_PROTOCOL_MAJOR(conn->c_version),
|
||||
RDS_PROTOCOL_MINOR(conn->c_version));
|
||||
set_bit(RDS_DESTROY_PENDING, &conn->c_path[0].cp_flags);
|
||||
rds_conn_destroy(conn);
|
||||
return;
|
||||
} else {
|
||||
|
|
|
@ -518,6 +518,7 @@ struct rds_transport {
|
|||
void (*sync_mr)(void *trans_private, int direction);
|
||||
void (*free_mr)(void *trans_private, int invalidate);
|
||||
void (*flush_mrs)(void);
|
||||
bool (*t_unloading)(struct rds_connection *conn);
|
||||
};
|
||||
|
||||
struct rds_sock {
|
||||
|
@ -862,6 +863,12 @@ static inline void rds_mr_put(struct rds_mr *mr)
|
|||
__rds_put_mr_final(mr);
|
||||
}
|
||||
|
||||
static inline bool rds_destroy_pending(struct rds_connection *conn)
|
||||
{
|
||||
return !check_net(rds_conn_net(conn)) ||
|
||||
(conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
|
||||
}
|
||||
|
||||
/* stats.c */
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
|
||||
#define rds_stats_inc_which(which, member) do { \
|
||||
|
|
|
@ -162,7 +162,7 @@ restart:
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
|
||||
if (rds_destroy_pending(cp->cp_conn)) {
|
||||
release_in_xmit(cp);
|
||||
ret = -ENETUNREACH; /* dont requeue send work */
|
||||
goto out;
|
||||
|
@ -444,7 +444,7 @@ over_batch:
|
|||
if (batch_count < send_batch_count)
|
||||
goto restart;
|
||||
rcu_read_lock();
|
||||
if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
|
||||
if (rds_destroy_pending(cp->cp_conn))
|
||||
ret = -ENETUNREACH;
|
||||
else
|
||||
queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
|
||||
|
@ -1162,7 +1162,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
|
|||
else
|
||||
cpath = &conn->c_path[0];
|
||||
|
||||
if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
|
||||
if (rds_destroy_pending(conn)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1209,7 +1209,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
|
|||
if (ret == -ENOMEM || ret == -EAGAIN) {
|
||||
ret = 0;
|
||||
rcu_read_lock();
|
||||
if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
|
||||
if (rds_destroy_pending(cpath->cp_conn))
|
||||
ret = -ENETUNREACH;
|
||||
else
|
||||
queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
|
||||
|
@ -1295,7 +1295,7 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
|
|||
|
||||
/* schedule the send work on rds_wq */
|
||||
rcu_read_lock();
|
||||
if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
|
||||
if (!rds_destroy_pending(cp->cp_conn))
|
||||
queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ static unsigned int rds_tcp_tc_count;
|
|||
/* Track rds_tcp_connection structs so they can be cleaned up */
|
||||
static DEFINE_SPINLOCK(rds_tcp_conn_lock);
|
||||
static LIST_HEAD(rds_tcp_conn_list);
|
||||
static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
|
||||
|
||||
static struct kmem_cache *rds_tcp_conn_slab;
|
||||
|
||||
|
@ -274,14 +275,13 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
|
|||
static void rds_tcp_conn_free(void *arg)
|
||||
{
|
||||
struct rds_tcp_connection *tc = arg;
|
||||
unsigned long flags;
|
||||
|
||||
rdsdebug("freeing tc %p\n", tc);
|
||||
|
||||
spin_lock_irqsave(&rds_tcp_conn_lock, flags);
|
||||
spin_lock_bh(&rds_tcp_conn_lock);
|
||||
if (!tc->t_tcp_node_detached)
|
||||
list_del(&tc->t_tcp_node);
|
||||
spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
|
||||
spin_unlock_bh(&rds_tcp_conn_lock);
|
||||
|
||||
kmem_cache_free(rds_tcp_conn_slab, tc);
|
||||
}
|
||||
|
@ -296,7 +296,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
|
|||
tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
|
||||
if (!tc) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
goto fail;
|
||||
}
|
||||
mutex_init(&tc->t_conn_path_lock);
|
||||
tc->t_sock = NULL;
|
||||
|
@ -306,14 +306,19 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
|
|||
|
||||
conn->c_path[i].cp_transport_data = tc;
|
||||
tc->t_cpath = &conn->c_path[i];
|
||||
tc->t_tcp_node_detached = true;
|
||||
|
||||
spin_lock_irq(&rds_tcp_conn_lock);
|
||||
tc->t_tcp_node_detached = false;
|
||||
list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
|
||||
spin_unlock_irq(&rds_tcp_conn_lock);
|
||||
rdsdebug("rds_conn_path [%d] tc %p\n", i,
|
||||
conn->c_path[i].cp_transport_data);
|
||||
}
|
||||
spin_lock_bh(&rds_tcp_conn_lock);
|
||||
for (i = 0; i < RDS_MPATH_WORKERS; i++) {
|
||||
tc = conn->c_path[i].cp_transport_data;
|
||||
tc->t_tcp_node_detached = false;
|
||||
list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
|
||||
}
|
||||
spin_unlock_bh(&rds_tcp_conn_lock);
|
||||
fail:
|
||||
if (ret) {
|
||||
for (j = 0; j < i; j++)
|
||||
rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
|
||||
|
@ -332,6 +337,16 @@ static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void rds_tcp_set_unloading(void)
|
||||
{
|
||||
atomic_set(&rds_tcp_unloading, 1);
|
||||
}
|
||||
|
||||
static bool rds_tcp_is_unloading(struct rds_connection *conn)
|
||||
{
|
||||
return atomic_read(&rds_tcp_unloading) != 0;
|
||||
}
|
||||
|
||||
static void rds_tcp_destroy_conns(void)
|
||||
{
|
||||
struct rds_tcp_connection *tc, *_tc;
|
||||
|
@ -370,6 +385,7 @@ struct rds_transport rds_tcp_transport = {
|
|||
.t_type = RDS_TRANS_TCP,
|
||||
.t_prefer_loopback = 1,
|
||||
.t_mp_capable = 1,
|
||||
.t_unloading = rds_tcp_is_unloading,
|
||||
};
|
||||
|
||||
static unsigned int rds_tcp_netid;
|
||||
|
@ -513,7 +529,7 @@ static void rds_tcp_kill_sock(struct net *net)
|
|||
|
||||
rtn->rds_tcp_listen_sock = NULL;
|
||||
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
|
||||
spin_lock_irq(&rds_tcp_conn_lock);
|
||||
spin_lock_bh(&rds_tcp_conn_lock);
|
||||
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
|
||||
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
|
||||
|
||||
|
@ -526,7 +542,7 @@ static void rds_tcp_kill_sock(struct net *net)
|
|||
tc->t_tcp_node_detached = true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&rds_tcp_conn_lock);
|
||||
spin_unlock_bh(&rds_tcp_conn_lock);
|
||||
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
|
||||
rds_conn_destroy(tc->t_cpath->cp_conn);
|
||||
}
|
||||
|
@ -574,7 +590,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
|
|||
{
|
||||
struct rds_tcp_connection *tc, *_tc;
|
||||
|
||||
spin_lock_irq(&rds_tcp_conn_lock);
|
||||
spin_lock_bh(&rds_tcp_conn_lock);
|
||||
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
|
||||
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
|
||||
|
||||
|
@ -584,7 +600,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
|
|||
/* reconnect with new parameters */
|
||||
rds_conn_path_drop(tc->t_cpath, false);
|
||||
}
|
||||
spin_unlock_irq(&rds_tcp_conn_lock);
|
||||
spin_unlock_bh(&rds_tcp_conn_lock);
|
||||
}
|
||||
|
||||
static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
|
||||
|
@ -607,6 +623,8 @@ static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
|
|||
|
||||
static void rds_tcp_exit(void)
|
||||
{
|
||||
rds_tcp_set_unloading();
|
||||
synchronize_rcu();
|
||||
rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
|
||||
unregister_pernet_subsys(&rds_tcp_net_ops);
|
||||
if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
|
||||
|
|
|
@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
|
|||
cp->cp_conn, tc, sock);
|
||||
|
||||
if (sock) {
|
||||
if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
|
||||
if (rds_destroy_pending(cp->cp_conn))
|
||||
rds_tcp_set_linger(sock);
|
||||
sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
|
||||
lock_sock(sock->sk);
|
||||
|
|
|
@ -323,7 +323,7 @@ void rds_tcp_data_ready(struct sock *sk)
|
|||
|
||||
if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
|
||||
rcu_read_lock();
|
||||
if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
|
||||
if (!rds_destroy_pending(cp->cp_conn))
|
||||
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ void rds_tcp_write_space(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
|
||||
!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
|
||||
!rds_destroy_pending(cp->cp_conn))
|
||||
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
|
|||
cp->cp_reconnect_jiffies = 0;
|
||||
set_bit(0, &cp->cp_conn->c_map_queued);
|
||||
rcu_read_lock();
|
||||
if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
|
||||
if (!rds_destroy_pending(cp->cp_conn)) {
|
||||
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
|
||||
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
|
|||
if (cp->cp_reconnect_jiffies == 0) {
|
||||
cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
|
||||
rcu_read_lock();
|
||||
if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
|
||||
if (!rds_destroy_pending(cp->cp_conn))
|
||||
queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
|
@ -149,7 +149,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
|
|||
rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
|
||||
conn, &conn->c_laddr, &conn->c_faddr);
|
||||
rcu_read_lock();
|
||||
if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
|
||||
if (!rds_destroy_pending(cp->cp_conn))
|
||||
queue_delayed_work(rds_wq, &cp->cp_conn_w,
|
||||
rand % cp->cp_reconnect_jiffies);
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -834,7 +834,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
|||
* can be skipped if we find a follow-on call. The first DATA packet
|
||||
* of the follow on call will implicitly ACK this call.
|
||||
*/
|
||||
if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
|
||||
if (call->completion == RXRPC_CALL_SUCCEEDED &&
|
||||
test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
|
||||
unsigned long final_ack_at = jiffies + 2;
|
||||
|
||||
WRITE_ONCE(chan->final_ack_at, final_ack_at);
|
||||
|
|
|
@ -460,6 +460,7 @@ void rxrpc_process_connection(struct work_struct *work)
|
|||
case -EKEYEXPIRED:
|
||||
case -EKEYREJECTED:
|
||||
goto protocol_error;
|
||||
case -ENOMEM:
|
||||
case -EAGAIN:
|
||||
goto requeue_and_leave;
|
||||
case -ECONNABORTED:
|
||||
|
|
|
@ -177,13 +177,21 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
|
|||
* through the channel, whilst disposing of the actual call record.
|
||||
*/
|
||||
trace_rxrpc_disconnect_call(call);
|
||||
if (call->abort_code) {
|
||||
chan->last_abort = call->abort_code;
|
||||
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
|
||||
} else {
|
||||
switch (call->completion) {
|
||||
case RXRPC_CALL_SUCCEEDED:
|
||||
chan->last_seq = call->rx_hard_ack;
|
||||
chan->last_type = RXRPC_PACKET_TYPE_ACK;
|
||||
break;
|
||||
case RXRPC_CALL_LOCALLY_ABORTED:
|
||||
chan->last_abort = call->abort_code;
|
||||
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
|
||||
break;
|
||||
default:
|
||||
chan->last_abort = RX_USER_ABORT;
|
||||
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Sync with rxrpc_conn_retransmit(). */
|
||||
smp_wmb();
|
||||
chan->last_call = chan->call_id;
|
||||
|
|
|
@ -773,8 +773,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
|
|||
{
|
||||
const struct rxrpc_key_token *token;
|
||||
struct rxkad_challenge challenge;
|
||||
struct rxkad_response resp
|
||||
__attribute__((aligned(8))); /* must be aligned for crypto */
|
||||
struct rxkad_response *resp;
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
const char *eproto;
|
||||
u32 version, nonce, min_level, abort_code;
|
||||
|
@ -818,26 +817,29 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
|
|||
token = conn->params.key->payload.data[0];
|
||||
|
||||
/* build the response packet */
|
||||
memset(&resp, 0, sizeof(resp));
|
||||
resp = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
|
||||
if (!resp)
|
||||
return -ENOMEM;
|
||||
|
||||
resp.version = htonl(RXKAD_VERSION);
|
||||
resp.encrypted.epoch = htonl(conn->proto.epoch);
|
||||
resp.encrypted.cid = htonl(conn->proto.cid);
|
||||
resp.encrypted.securityIndex = htonl(conn->security_ix);
|
||||
resp.encrypted.inc_nonce = htonl(nonce + 1);
|
||||
resp.encrypted.level = htonl(conn->params.security_level);
|
||||
resp.kvno = htonl(token->kad->kvno);
|
||||
resp.ticket_len = htonl(token->kad->ticket_len);
|
||||
|
||||
resp.encrypted.call_id[0] = htonl(conn->channels[0].call_counter);
|
||||
resp.encrypted.call_id[1] = htonl(conn->channels[1].call_counter);
|
||||
resp.encrypted.call_id[2] = htonl(conn->channels[2].call_counter);
|
||||
resp.encrypted.call_id[3] = htonl(conn->channels[3].call_counter);
|
||||
resp->version = htonl(RXKAD_VERSION);
|
||||
resp->encrypted.epoch = htonl(conn->proto.epoch);
|
||||
resp->encrypted.cid = htonl(conn->proto.cid);
|
||||
resp->encrypted.securityIndex = htonl(conn->security_ix);
|
||||
resp->encrypted.inc_nonce = htonl(nonce + 1);
|
||||
resp->encrypted.level = htonl(conn->params.security_level);
|
||||
resp->kvno = htonl(token->kad->kvno);
|
||||
resp->ticket_len = htonl(token->kad->ticket_len);
|
||||
resp->encrypted.call_id[0] = htonl(conn->channels[0].call_counter);
|
||||
resp->encrypted.call_id[1] = htonl(conn->channels[1].call_counter);
|
||||
resp->encrypted.call_id[2] = htonl(conn->channels[2].call_counter);
|
||||
resp->encrypted.call_id[3] = htonl(conn->channels[3].call_counter);
|
||||
|
||||
/* calculate the response checksum and then do the encryption */
|
||||
rxkad_calc_response_checksum(&resp);
|
||||
rxkad_encrypt_response(conn, &resp, token->kad);
|
||||
return rxkad_send_response(conn, &sp->hdr, &resp, token->kad);
|
||||
rxkad_calc_response_checksum(resp);
|
||||
rxkad_encrypt_response(conn, resp, token->kad);
|
||||
ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad);
|
||||
kfree(resp);
|
||||
return ret;
|
||||
|
||||
protocol_error:
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
|
||||
|
@ -1048,8 +1050,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
|||
struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
{
|
||||
struct rxkad_response response
|
||||
__attribute__((aligned(8))); /* must be aligned for crypto */
|
||||
struct rxkad_response *response;
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_crypt session_key;
|
||||
const char *eproto;
|
||||
|
@ -1061,17 +1062,22 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
|||
|
||||
_enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
|
||||
|
||||
ret = -ENOMEM;
|
||||
response = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
|
||||
if (!response)
|
||||
goto temporary_error;
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_short");
|
||||
abort_code = RXKADPACKETSHORT;
|
||||
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
|
||||
&response, sizeof(response)) < 0)
|
||||
response, sizeof(*response)) < 0)
|
||||
goto protocol_error;
|
||||
if (!pskb_pull(skb, sizeof(response)))
|
||||
if (!pskb_pull(skb, sizeof(*response)))
|
||||
BUG();
|
||||
|
||||
version = ntohl(response.version);
|
||||
ticket_len = ntohl(response.ticket_len);
|
||||
kvno = ntohl(response.kvno);
|
||||
version = ntohl(response->version);
|
||||
ticket_len = ntohl(response->ticket_len);
|
||||
kvno = ntohl(response->kvno);
|
||||
_proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
|
||||
sp->hdr.serial, version, kvno, ticket_len);
|
||||
|
||||
|
@ -1105,31 +1111,31 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
|||
ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
|
||||
&expiry, _abort_code);
|
||||
if (ret < 0)
|
||||
goto temporary_error_free;
|
||||
goto temporary_error_free_resp;
|
||||
|
||||
/* use the session key from inside the ticket to decrypt the
|
||||
* response */
|
||||
rxkad_decrypt_response(conn, &response, &session_key);
|
||||
rxkad_decrypt_response(conn, response, &session_key);
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_param");
|
||||
abort_code = RXKADSEALEDINCON;
|
||||
if (ntohl(response.encrypted.epoch) != conn->proto.epoch)
|
||||
if (ntohl(response->encrypted.epoch) != conn->proto.epoch)
|
||||
goto protocol_error_free;
|
||||
if (ntohl(response.encrypted.cid) != conn->proto.cid)
|
||||
if (ntohl(response->encrypted.cid) != conn->proto.cid)
|
||||
goto protocol_error_free;
|
||||
if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
|
||||
if (ntohl(response->encrypted.securityIndex) != conn->security_ix)
|
||||
goto protocol_error_free;
|
||||
csum = response.encrypted.checksum;
|
||||
response.encrypted.checksum = 0;
|
||||
rxkad_calc_response_checksum(&response);
|
||||
csum = response->encrypted.checksum;
|
||||
response->encrypted.checksum = 0;
|
||||
rxkad_calc_response_checksum(response);
|
||||
eproto = tracepoint_string("rxkad_rsp_csum");
|
||||
if (response.encrypted.checksum != csum)
|
||||
if (response->encrypted.checksum != csum)
|
||||
goto protocol_error_free;
|
||||
|
||||
spin_lock(&conn->channel_lock);
|
||||
for (i = 0; i < RXRPC_MAXCALLS; i++) {
|
||||
struct rxrpc_call *call;
|
||||
u32 call_id = ntohl(response.encrypted.call_id[i]);
|
||||
u32 call_id = ntohl(response->encrypted.call_id[i]);
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_callid");
|
||||
if (call_id > INT_MAX)
|
||||
|
@ -1153,12 +1159,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
|||
|
||||
eproto = tracepoint_string("rxkad_rsp_seq");
|
||||
abort_code = RXKADOUTOFSEQUENCE;
|
||||
if (ntohl(response.encrypted.inc_nonce) != conn->security_nonce + 1)
|
||||
if (ntohl(response->encrypted.inc_nonce) != conn->security_nonce + 1)
|
||||
goto protocol_error_free;
|
||||
|
||||
eproto = tracepoint_string("rxkad_rsp_level");
|
||||
abort_code = RXKADLEVELFAIL;
|
||||
level = ntohl(response.encrypted.level);
|
||||
level = ntohl(response->encrypted.level);
|
||||
if (level > RXRPC_SECURITY_ENCRYPT)
|
||||
goto protocol_error_free;
|
||||
conn->params.security_level = level;
|
||||
|
@ -1168,9 +1174,10 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
|
|||
* as for a client connection */
|
||||
ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
|
||||
if (ret < 0)
|
||||
goto temporary_error_free;
|
||||
goto temporary_error_free_ticket;
|
||||
|
||||
kfree(ticket);
|
||||
kfree(response);
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
|
@ -1179,12 +1186,15 @@ protocol_error_unlock:
|
|||
protocol_error_free:
|
||||
kfree(ticket);
|
||||
protocol_error:
|
||||
kfree(response);
|
||||
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
|
||||
*_abort_code = abort_code;
|
||||
return -EPROTO;
|
||||
|
||||
temporary_error_free:
|
||||
temporary_error_free_ticket:
|
||||
kfree(ticket);
|
||||
temporary_error_free_resp:
|
||||
kfree(response);
|
||||
temporary_error:
|
||||
/* Ignore the response packet if we got a temporary error such as
|
||||
* ENOMEM. We just want to send the challenge again. Note that we
|
||||
|
|
|
@ -947,7 +947,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (n->flags != flags) {
|
||||
if ((n->flags ^ flags) &
|
||||
~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -327,7 +327,7 @@ static s64 tabledist(s64 mu, s32 sigma,
|
|||
|
||||
/* default uniform distribution */
|
||||
if (dist == NULL)
|
||||
return (rnd % (2 * sigma)) - sigma + mu;
|
||||
return ((rnd % (2 * sigma)) + mu) - sigma;
|
||||
|
||||
t = dist->table[rnd % dist->size];
|
||||
x = (sigma % NETEM_DIST_SCALE) * t;
|
||||
|
|
|
@ -1380,9 +1380,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
|
|||
struct sctp_chunk *retval;
|
||||
struct sk_buff *skb;
|
||||
struct sock *sk;
|
||||
int chunklen;
|
||||
|
||||
chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
|
||||
if (chunklen > SCTP_MAX_CHUNK_LEN)
|
||||
goto nodata;
|
||||
|
||||
/* No need to allocate LL here, as this is only a chunk. */
|
||||
skb = alloc_skb(SCTP_PAD4(sizeof(*chunk_hdr) + paylen), gfp);
|
||||
skb = alloc_skb(chunklen, gfp);
|
||||
if (!skb)
|
||||
goto nodata;
|
||||
|
||||
|
|
|
@ -208,8 +208,8 @@ bool tipc_msg_validate(struct sk_buff **_skb)
|
|||
int msz, hsz;
|
||||
|
||||
/* Ensure that flow control ratio condition is satisfied */
|
||||
if (unlikely(skb->truesize / buf_roundup_len(skb) > 4)) {
|
||||
skb = skb_copy(skb, GFP_ATOMIC);
|
||||
if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
|
||||
skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return false;
|
||||
kfree_skb(*_skb);
|
||||
|
|
|
@ -484,6 +484,8 @@ out:
|
|||
|
||||
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
|
||||
.name = "tls",
|
||||
.uid = TCP_ULP_TLS,
|
||||
.user_visible = true,
|
||||
.owner = THIS_MODULE,
|
||||
.init = tls_init,
|
||||
};
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue