Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Two cases of overlapping changes, nothing fancy.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-07-08 19:48:57 -07:00
commit af144a9834
129 changed files with 1270 additions and 904 deletions

View File

@ -30,7 +30,7 @@ ip_ttl_propagate - BOOL
0 - disabled / RFC 3443 [Short] Pipe Model 0 - disabled / RFC 3443 [Short] Pipe Model
1 - enabled / RFC 3443 Uniform Model (default) 1 - enabled / RFC 3443 Uniform Model (default)
default_ttl - BOOL default_ttl - INTEGER
Default TTL value to use for MPLS packets where it cannot be Default TTL value to use for MPLS packets where it cannot be
propagated from an IP header, either because one isn't present propagated from an IP header, either because one isn't present
or ip_ttl_propagate has been disabled. or ip_ttl_propagate has been disabled.

View File

@ -17312,6 +17312,7 @@ N: xdp
XDP SOCKETS (AF_XDP) XDP SOCKETS (AF_XDP)
M: Björn Töpel <bjorn.topel@intel.com> M: Björn Töpel <bjorn.topel@intel.com>
M: Magnus Karlsson <magnus.karlsson@intel.com> M: Magnus Karlsson <magnus.karlsson@intel.com>
R: Jonathan Lemon <jonathan.lemon@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: bpf@vger.kernel.org L: bpf@vger.kernel.org
S: Maintained S: Maintained

View File

@ -732,9 +732,6 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
int cnt = 0; int cnt = 0;
static int jmp_label1 = -1;
static int jmp_label2 = -1;
static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -753,79 +750,23 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
/* mov ecx,src_lo */ /* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shld dreg_hi,dreg_lo,cl */
EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo));
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */
/* cmp ecx,32 */ /* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* Jumps when >= 32 */ /* skip the next two instructions (4 bytes) when < 32 */
if (is_imm8(jmp_label(jmp_label1, 2))) EMIT2(IA32_JB, 4);
EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
/* < 32 */
/* shl dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
/* mov ebx,dreg_lo */
EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* IA32_ECX = -IA32_ECX + 32 */
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shr ebx,cl */
EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
/* or dreg_hi,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 32 */
if (jmp_label1 == -1)
jmp_label1 = cnt;
/* cmp ecx,64 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
/* Jumps when >= 64 */
if (is_imm8(jmp_label(jmp_label2, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
/* >= 32 && < 64 */
/* sub ecx,32 */
EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* mov dreg_hi,dreg_lo */ /* mov dreg_hi,dreg_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo)); EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
/* xor dreg_lo,dreg_lo */ /* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 64 */
if (jmp_label2 == -1)
jmp_label2 = cnt;
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
if (jmp_label3 == -1)
jmp_label3 = cnt;
if (dstk) { if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */ /* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@ -844,9 +785,6 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
int cnt = 0; int cnt = 0;
static int jmp_label1 = -1;
static int jmp_label2 = -1;
static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -865,79 +803,23 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
/* mov ecx,src_lo */ /* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shrd dreg_lo,dreg_hi,cl */
EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* sar dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */
/* cmp ecx,32 */ /* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* Jumps when >= 32 */ /* skip the next two instructions (5 bytes) when < 32 */
if (is_imm8(jmp_label(jmp_label1, 2))) EMIT2(IA32_JB, 5);
EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
/* < 32 */
/* lshr dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* ashr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* IA32_ECX = -IA32_ECX + 32 */
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 32 */
if (jmp_label1 == -1)
jmp_label1 = cnt;
/* cmp ecx,64 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
/* Jumps when >= 64 */
if (is_imm8(jmp_label(jmp_label2, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
/* >= 32 && < 64 */
/* sub ecx,32 */
EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
/* ashr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* mov dreg_lo,dreg_hi */ /* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* sar dreg_hi,31 */
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 64 */
if (jmp_label2 == -1)
jmp_label2 = cnt;
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
if (jmp_label3 == -1)
jmp_label3 = cnt;
if (dstk) { if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */ /* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@ -956,9 +838,6 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
int cnt = 0; int cnt = 0;
static int jmp_label1 = -1;
static int jmp_label2 = -1;
static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -977,77 +856,23 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
/* mov ecx,src_lo */ /* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shrd dreg_lo,dreg_hi,cl */
EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */
/* cmp ecx,32 */ /* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* Jumps when >= 32 */ /* skip the next two instructions (4 bytes) when < 32 */
if (is_imm8(jmp_label(jmp_label1, 2))) EMIT2(IA32_JB, 4);
EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
/* < 32 */
/* lshr dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* IA32_ECX = -IA32_ECX + 32 */
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 32 */
if (jmp_label1 == -1)
jmp_label1 = cnt;
/* cmp ecx,64 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
/* Jumps when >= 64 */
if (is_imm8(jmp_label(jmp_label2, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
/* >= 32 && < 64 */
/* sub ecx,32 */
EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* mov dreg_lo,dreg_hi */ /* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* xor dreg_hi,dreg_hi */ /* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 64 */
if (jmp_label2 == -1)
jmp_label2 = cnt;
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
if (jmp_label3 == -1)
jmp_label3 = cnt;
if (dstk) { if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */ /* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@ -1077,27 +902,10 @@ static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val,
} }
/* Do LSH operation */ /* Do LSH operation */
if (val < 32) { if (val < 32) {
/* shl dreg_hi,imm8 */ /* shld dreg_hi,dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val); EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val);
/* mov ebx,dreg_lo */
EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* shl dreg_lo,imm8 */ /* shl dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val); EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
/* IA32_ECX = 32 - val */
/* mov ecx,val */
EMIT2(0xB1, val);
/* movzx ecx,ecx */
EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shr ebx,cl */
EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
/* or dreg_hi,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
} else if (val >= 32 && val < 64) { } else if (val >= 32 && val < 64) {
u32 value = val - 32; u32 value = val - 32;
@ -1143,27 +951,10 @@ static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val,
/* Do RSH operation */ /* Do RSH operation */
if (val < 32) { if (val < 32) {
/* shr dreg_lo,imm8 */ /* shrd dreg_lo,dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* shr dreg_hi,imm8 */ /* shr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val); EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
/* IA32_ECX = 32 - val */
/* mov ecx,val */
EMIT2(0xB1, val);
/* movzx ecx,ecx */
EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) { } else if (val >= 32 && val < 64) {
u32 value = val - 32; u32 value = val - 32;
@ -1208,27 +999,10 @@ static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val,
} }
/* Do RSH operation */ /* Do RSH operation */
if (val < 32) { if (val < 32) {
/* shr dreg_lo,imm8 */ /* shrd dreg_lo,dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* ashr dreg_hi,imm8 */ /* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val); EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
/* IA32_ECX = 32 - val */
/* mov ecx,val */
EMIT2(0xB1, val);
/* movzx ecx,ecx */
EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) { } else if (val >= 32 && val < 64) {
u32 value = val - 32; u32 value = val - 32;

View File

@ -3859,8 +3859,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
struct net_device *bond_dev) struct net_device *bond_dev)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct iphdr *iph = ip_hdr(skb);
struct slave *slave; struct slave *slave;
int slave_cnt;
u32 slave_id; u32 slave_id;
/* Start with the curr_active_slave that joined the bond as the /* Start with the curr_active_slave that joined the bond as the
@ -3869,23 +3869,32 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
* send the join/membership reports. The curr_active_slave found * send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic. * will send all of this type of traffic.
*/ */
if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
slave = rcu_dereference(bond->curr_active_slave); int noff = skb_network_offset(skb);
if (slave) struct iphdr *iph;
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
} else {
int slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) { if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
slave_id = bond_rr_gen_slave_id(bond); goto non_igmp;
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else { iph = ip_hdr(skb);
bond_tx_drop(bond_dev, skb); if (iph->protocol == IPPROTO_IGMP) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
return NETDEV_TX_OK;
} }
} }
non_igmp:
slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else {
bond_tx_drop(bond_dev, skb);
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }

View File

@ -955,13 +955,13 @@ static int b53_setup(struct dsa_switch *ds)
if (ret) if (ret)
dev_err(ds->dev, "failed to apply configuration\n"); dev_err(ds->dev, "failed to apply configuration\n");
/* Configure IMP/CPU port, disable unused ports. Enabled /* Configure IMP/CPU port, disable all other ports. Enabled
* ports will be configured with .port_enable * ports will be configured with .port_enable
*/ */
for (port = 0; port < dev->num_ports; port++) { for (port = 0; port < dev->num_ports; port++) {
if (dsa_is_cpu_port(ds, port)) if (dsa_is_cpu_port(ds, port))
b53_enable_cpu_port(dev, port); b53_enable_cpu_port(dev, port);
else if (dsa_is_unused_port(ds, port)) else
b53_disable_port(ds, port); b53_disable_port(ds, port);
} }

View File

@ -4786,6 +4786,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
err = PTR_ERR(chip->reset); err = PTR_ERR(chip->reset);
goto out; goto out;
} }
if (chip->reset)
usleep_range(1000, 2000);
err = mv88e6xxx_detect(chip); err = mv88e6xxx_detect(chip);
if (err) if (err)

View File

@ -224,8 +224,8 @@ static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void emac_get_drvinfo(struct net_device *dev, static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info) struct ethtool_drvinfo *info)
{ {
strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME)); strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
} }

View File

@ -3857,9 +3857,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
if (!(bp->flags & TX_TIMESTAMPING_EN)) { if (!(bp->flags & TX_TIMESTAMPING_EN)) {
bp->eth_stats.ptp_skip_tx_ts++;
BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (bp->ptp_tx_skb) { } else if (bp->ptp_tx_skb) {
BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); bp->eth_stats.ptp_skip_tx_ts++;
netdev_err_once(bp->dev,
"Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
} else { } else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */ /* schedule check for Tx timestamp */

View File

@ -182,7 +182,9 @@ static const struct {
{ STATS_OFFSET32(driver_filtered_tx_pkt), { STATS_OFFSET32(driver_filtered_tx_pkt),
4, false, "driver_filtered_tx_pkt" }, 4, false, "driver_filtered_tx_pkt" },
{ STATS_OFFSET32(eee_tx_lpi), { STATS_OFFSET32(eee_tx_lpi),
4, true, "Tx LPI entry count"} 4, true, "Tx LPI entry count"},
{ STATS_OFFSET32(ptp_skip_tx_ts),
4, false, "ptp_skipped_tx_tstamp" },
}; };
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)

View File

@ -15214,11 +15214,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
u32 val_seq; u32 val_seq;
u64 timestamp, ns; u64 timestamp, ns;
struct skb_shared_hwtstamps shhwtstamps; struct skb_shared_hwtstamps shhwtstamps;
bool bail = true;
int i;
/* Read Tx timestamp registers */ /* FW may take a while to complete timestamping; try a bit and if it's
val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : * still not complete, may indicate an error state - bail out then.
NIG_REG_P0_TLLH_PTP_BUF_SEQID); */
if (val_seq & 0x10000) { for (i = 0; i < 10; i++) {
/* Read Tx timestamp registers */
val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
NIG_REG_P0_TLLH_PTP_BUF_SEQID);
if (val_seq & 0x10000) {
bail = false;
break;
}
msleep(1 << i);
}
if (!bail) {
/* There is a valid timestamp value */ /* There is a valid timestamp value */
timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
@ -15233,16 +15246,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
memset(&shhwtstamps, 0, sizeof(shhwtstamps)); memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns); shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(bp->ptp_tx_skb);
bp->ptp_tx_skb = NULL;
DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
timestamp, ns); timestamp, ns);
} else { } else {
DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n"); DP(BNX2X_MSG_PTP,
/* Reschedule to keep checking for a valid timestamp value */ "Tx timestamp is not recorded (register read=%u)\n",
schedule_work(&bp->ptp_task); val_seq);
bp->eth_stats.ptp_skip_tx_ts++;
} }
dev_kfree_skb_any(bp->ptp_tx_skb);
bp->ptp_tx_skb = NULL;
} }
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)

View File

@ -207,6 +207,9 @@ struct bnx2x_eth_stats {
u32 driver_filtered_tx_pkt; u32 driver_filtered_tx_pkt;
/* src: Clear-on-Read register; Will not survive PMF Migration */ /* src: Clear-on-Read register; Will not survive PMF Migration */
u32 eee_tx_lpi; u32 eee_tx_lpi;
/* PTP */
u32 ptp_skip_tx_ts;
}; };
struct bnx2x_eth_q_stats { struct bnx2x_eth_q_stats {

View File

@ -5567,7 +5567,16 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp) static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{ {
return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
int cp = bp->cp_nr_rings;
if (!ulp_stat)
return cp;
if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
return bnxt_get_ulp_msix_base(bp) + ulp_stat;
return cp + ulp_stat;
} }
static bool bnxt_need_reserve_rings(struct bnxt *bp) static bool bnxt_need_reserve_rings(struct bnxt *bp)
@ -7536,11 +7545,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
{ {
unsigned int stat; return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
stat -= bp->cp_nr_rings;
return stat;
} }
int bnxt_get_avail_msix(struct bnxt *bp, int num) int bnxt_get_avail_msix(struct bnxt *bp, int num)
@ -10322,10 +10327,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp); bnxt_dcb_free(bp);
kfree(bp->edev); kfree(bp->edev);
bp->edev = NULL; bp->edev = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx); kfree(bp->ctx);
bp->ctx = NULL; bp->ctx = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_port_stats(bp); bnxt_free_port_stats(bp);
free_netdev(dev); free_netdev(dev);
} }
@ -10919,6 +10924,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF) { if (system_state == SYSTEM_POWER_OFF) {
bnxt_clear_int_mode(bp); bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
pci_wake_from_d3(pdev, bp->wol); pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot); pci_set_power_state(pdev, PCI_D3hot);
} }

View File

@ -396,7 +396,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
if (bp->max_dscp_value < 0x3f) if (bp->max_dscp_value < 0x3f)

View File

@ -2842,7 +2842,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bool offline = false; bool offline = false;
u8 test_results = 0; u8 test_results = 0;
u8 test_mask = 0; u8 test_mask = 0;
int rc, i; int rc = 0, i;
if (!bp->num_tests || !BNXT_SINGLE_PF(bp)) if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
return; return;
@ -2913,9 +2913,9 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
} }
bnxt_hwrm_phy_loopback(bp, false, false); bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp); bnxt_half_close_nic(bp);
bnxt_open_nic(bp, false, true); rc = bnxt_open_nic(bp, false, true);
} }
if (bnxt_test_irq(bp)) { if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1; buf[BNXT_IRQ_TEST_IDX] = 1;
etest->flags |= ETH_TEST_FL_FAILED; etest->flags |= ETH_TEST_FL_FAILED;
} }

View File

@ -157,8 +157,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (BNXT_NEW_RM(bp)) { if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc; struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int resv_msix;
avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings; resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
avail_msix = min_t(int, resv_msix, avail_msix);
edev->ulp_tbl[ulp_id].msix_requested = avail_msix; edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
} }
bnxt_fill_msix_vecs(bp, ent); bnxt_fill_msix_vecs(bp, ent);

View File

@ -496,7 +496,11 @@
/* Bitfields in TISUBN */ /* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0 #define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCR_SIZE 16 #define GEM_SUBNSINCRL_OFFSET 24
#define GEM_SUBNSINCRL_SIZE 8
#define GEM_SUBNSINCRH_OFFSET 0
#define GEM_SUBNSINCRH_SIZE 16
#define GEM_SUBNSINCR_SIZE 24
/* Bitfields in TI */ /* Bitfields in TI */
#define GEM_NSINCR_OFFSET 0 #define GEM_NSINCR_OFFSET 0
@ -834,6 +838,9 @@ struct gem_tx_ts {
/* limit RX checksum offload to TCP and UDP packets */ /* limit RX checksum offload to TCP and UDP packets */
#define GEM_RX_CSUM_CHECKED_MASK 2 #define GEM_RX_CSUM_CHECKED_MASK 2
/* Scaled PPM fraction */
#define PPM_FRACTION 16
/* struct macb_tx_skb - data about an skb which is being transmitted /* struct macb_tx_skb - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer * @skb: skb currently being transmitted, only set for the last buffer
* of the frame * of the frame

View File

@ -104,7 +104,10 @@ static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec)
* to take effect. * to take effect.
*/ */
spin_lock_irqsave(&bp->tsu_clk_lock, flags); spin_lock_irqsave(&bp->tsu_clk_lock, flags);
gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns)); /* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */
gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) |
GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >>
GEM_SUBNSINCRL_SIZE)));
gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns)); gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
@ -135,7 +138,7 @@ static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
* (temp / USEC_PER_SEC) + 0.5 * (temp / USEC_PER_SEC) + 0.5
*/ */
adj += (USEC_PER_SEC >> 1); adj += (USEC_PER_SEC >> 1);
adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */ adj >>= PPM_FRACTION; /* remove fractions */
adj = div_u64(adj, USEC_PER_SEC); adj = div_u64(adj, USEC_PER_SEC);
adj = neg_adj ? (word - adj) : (word + adj); adj = neg_adj ? (word - adj) : (word + adj);

View File

@ -2370,6 +2370,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->max_mtu = MAC_MAX_MTU_V2 - ndev->max_mtu = MAC_MAX_MTU_V2 -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break; break;

View File

@ -312,7 +312,8 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
} }
/* Set value */ /* Set value */
pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK; pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */ /* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,

View File

@ -102,13 +102,15 @@ static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
unsigned long next_id = (unsigned long)id + 1; unsigned long next_id = (unsigned long)id + 1;
struct mlx5_fc *counter; struct mlx5_fc *counter;
unsigned long tmp;
rcu_read_lock(); rcu_read_lock();
/* skip counters that are in idr, but not yet in counters list */ /* skip counters that are in idr, but not yet in counters list */
while ((counter = idr_get_next_ul(&fc_stats->counters_idr, idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
&next_id)) != NULL && counter, tmp, next_id) {
list_empty(&counter->list)) if (!list_empty(&counter->list))
next_id++; break;
}
rcu_read_unlock(); rcu_read_unlock();
return counter ? &counter->list : &fc_stats->counters; return counter ? &counter->list : &fc_stats->counters;

View File

@ -990,7 +990,7 @@ static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed) struct ethtool_drvinfo *ed)
{ {
strlcpy(ed->driver, "nixge", sizeof(ed->driver)); strlcpy(ed->driver, "nixge", sizeof(ed->driver));
strlcpy(ed->bus_info, "platform", sizeof(ed->driver)); strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
} }
static int nixge_ethtools_get_coalesce(struct net_device *ndev, static int nixge_ethtools_get_coalesce(struct net_device *ndev,

View File

@ -3058,17 +3058,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage oversized TCP frames for GMAC4 device */ /* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) { if (skb_is_gso(skb) && priv->tso) {
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
/*
* There is no way to determine the number of TSO
* capable Queues. Let's use always the Queue 0
* because if TSO is supported then at least this
* one will be capable.
*/
skb_set_queue_mapping(skb, 0);
return stmmac_tso_xmit(skb, dev); return stmmac_tso_xmit(skb, dev);
}
} }
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@ -3886,6 +3877,22 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
} }
} }
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
/*
* There is no way to determine the number of TSO
* capable Queues. Let's use always the Queue 0
* because if TSO is supported then at least this
* one will be capable.
*/
return 0;
}
return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int stmmac_set_mac_address(struct net_device *ndev, void *addr) static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{ {
struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_priv *priv = netdev_priv(ndev);
@ -4102,6 +4109,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_tx_timeout = stmmac_tx_timeout, .ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl, .ndo_do_ioctl = stmmac_ioctl,
.ndo_setup_tc = stmmac_setup_tc, .ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller, .ndo_poll_controller = stmmac_poll_controller,
#endif #endif

View File

@ -2573,6 +2573,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return ret; return ret;
} }
slave_data->slave_node = slave_node;
slave_data->phy_node = of_parse_phandle(slave_node, slave_data->phy_node = of_parse_phandle(slave_node,
"phy-handle", 0); "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp); parp = of_get_property(slave_node, "phy_id", &lenp);
@ -2723,6 +2724,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
/* register the network device */ /* register the network device */
SET_NETDEV_DEV(ndev, cpsw->dev); SET_NETDEV_DEV(ndev, cpsw->dev);
ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
ret = register_netdev(ndev); ret = register_netdev(ndev);
if (ret) if (ret)
dev_err(cpsw->dev, "cpsw: error registering net device\n"); dev_err(cpsw->dev, "cpsw: error registering net device\n");
@ -2900,6 +2902,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* register the network device */ /* register the network device */
SET_NETDEV_DEV(ndev, dev); SET_NETDEV_DEV(ndev, dev);
ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
ret = register_netdev(ndev); ret = register_netdev(ndev);
if (ret) { if (ret) {
dev_err(dev, "error registering net device\n"); dev_err(dev, "error registering net device\n");

View File

@ -272,6 +272,7 @@ struct cpsw_host_regs {
}; };
struct cpsw_slave_data { struct cpsw_slave_data {
struct device_node *slave_node;
struct device_node *phy_node; struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE]; char phy_id[MII_BUS_ID_SIZE];
int phy_if; int phy_if;

View File

@ -285,16 +285,29 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return gtp_rx(pctx, skb, hdrlen, gtp->role); return gtp_rx(pctx, skb, hdrlen, gtp->role);
} }
static void gtp_encap_destroy(struct sock *sk) static void __gtp_encap_destroy(struct sock *sk)
{ {
struct gtp_dev *gtp; struct gtp_dev *gtp;
gtp = rcu_dereference_sk_user_data(sk); lock_sock(sk);
gtp = sk->sk_user_data;
if (gtp) { if (gtp) {
if (gtp->sk0 == sk)
gtp->sk0 = NULL;
else
gtp->sk1u = NULL;
udp_sk(sk)->encap_type = 0; udp_sk(sk)->encap_type = 0;
rcu_assign_sk_user_data(sk, NULL); rcu_assign_sk_user_data(sk, NULL);
sock_put(sk); sock_put(sk);
} }
release_sock(sk);
}
static void gtp_encap_destroy(struct sock *sk)
{
rtnl_lock();
__gtp_encap_destroy(sk);
rtnl_unlock();
} }
static void gtp_encap_disable_sock(struct sock *sk) static void gtp_encap_disable_sock(struct sock *sk)
@ -302,7 +315,7 @@ static void gtp_encap_disable_sock(struct sock *sk)
if (!sk) if (!sk)
return; return;
gtp_encap_destroy(sk); __gtp_encap_destroy(sk);
} }
static void gtp_encap_disable(struct gtp_dev *gtp) static void gtp_encap_disable(struct gtp_dev *gtp)
@ -681,7 +694,6 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
{ {
struct gtp_dev *gtp = netdev_priv(dev); struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
gtp_hashtable_free(gtp); gtp_hashtable_free(gtp);
list_del_rcu(&gtp->list); list_del_rcu(&gtp->list);
unregister_netdevice_queue(dev, head); unregister_netdevice_queue(dev, head);
@ -796,7 +808,8 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
goto out_sock; goto out_sock;
} }
if (rcu_dereference_sk_user_data(sock->sk)) { lock_sock(sock->sk);
if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY); sk = ERR_PTR(-EBUSY);
goto out_sock; goto out_sock;
} }
@ -812,6 +825,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
out_sock: out_sock:
release_sock(sock->sk);
sockfd_put(sock); sockfd_put(sock);
return sk; return sk;
} }
@ -843,8 +857,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
if (data[IFLA_GTP_ROLE]) { if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]); role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN) if (role > GTP_ROLE_SGSN) {
if (sk0)
gtp_encap_disable_sock(sk0);
if (sk1u)
gtp_encap_disable_sock(sk1u);
return -EINVAL; return -EINVAL;
}
} }
gtp->sk0 = sk0; gtp->sk0 = sk0;
@ -945,7 +964,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
} }
pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL); pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL) if (pctx == NULL)
return -ENOMEM; return -ENOMEM;
@ -1034,6 +1053,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
return -EINVAL; return -EINVAL;
} }
rtnl_lock();
rcu_read_lock(); rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
@ -1058,6 +1078,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
rtnl_unlock();
return err; return err;
} }
@ -1360,9 +1381,9 @@ late_initcall(gtp_init);
static void __exit gtp_fini(void) static void __exit gtp_fini(void)
{ {
unregister_pernet_subsys(&gtp_net_ops);
genl_unregister_family(&gtp_genl_family); genl_unregister_family(&gtp_genl_family);
rtnl_link_unregister(&gtp_link_ops); rtnl_link_unregister(&gtp_link_ops);
unregister_pernet_subsys(&gtp_net_ops);
pr_info("GTP module unloaded\n"); pr_info("GTP module unloaded\n");
} }

View File

@ -865,6 +865,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
{ {
skb->ip_summed = CHECKSUM_NONE;
memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
skb_pull(skb, hdr_len); skb_pull(skb, hdr_len);
pskb_trim_unique(skb, skb->len - icv_len); pskb_trim_unique(skb, skb->len - icv_len);
@ -1099,10 +1100,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
} }
skb = skb_unshare(skb, GFP_ATOMIC); skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) { *pskb = skb;
*pskb = NULL; if (!skb)
return RX_HANDLER_CONSUMED; return RX_HANDLER_CONSUMED;
}
pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
if (!pulled_sci) { if (!pulled_sci) {

View File

@ -226,7 +226,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf) static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{ {
int ret = 0; int ret = 0;
u8 buf[ETH_ALEN]; u8 buf[ETH_ALEN] = {0};
int i; int i;
unsigned long gpio_bits = dev->driver_info->data; unsigned long gpio_bits = dev->driver_info->data;
@ -677,7 +677,7 @@ static int asix_resume(struct usb_interface *intf)
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{ {
int ret, i; int ret, i;
u8 buf[ETH_ALEN], chipcode = 0; u8 buf[ETH_ALEN] = {0}, chipcode = 0;
u32 phyid; u32 phyid;
struct asix_common_private *priv; struct asix_common_private *priv;
@ -1061,7 +1061,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{ {
int ret; int ret;
u8 buf[ETH_ALEN]; u8 buf[ETH_ALEN] = {0};
usbnet_get_endpoints(dev,intf); usbnet_get_endpoints(dev,intf);

View File

@ -28,7 +28,7 @@
#define NETNEXT_VERSION "09" #define NETNEXT_VERSION "09"
/* Information for net */ /* Information for net */
#define NET_VERSION "9" #define NET_VERSION "10"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@ -53,6 +53,9 @@
#define PAL_BDC_CR 0xd1a0 #define PAL_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc #define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8 #define PLA_REALWOW_TIMER 0xd2e8
#define PLA_SUSPEND_FLAG 0xd38a
#define PLA_INDICATE_FALG 0xd38c
#define PLA_EXTRA_STATUS 0xd398
#define PLA_EFUSE_DATA 0xdd00 #define PLA_EFUSE_DATA 0xdd00
#define PLA_EFUSE_CMD 0xdd02 #define PLA_EFUSE_CMD 0xdd02
#define PLA_LEDSEL 0xdd90 #define PLA_LEDSEL 0xdd90
@ -336,6 +339,15 @@
/* PLA_BOOT_CTRL */ /* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002 #define AUTOLOAD_DONE 0x0002
/* PLA_SUSPEND_FLAG */
#define LINK_CHG_EVENT BIT(0)
/* PLA_INDICATE_FALG */
#define UPCOMING_RUNTIME_D3 BIT(0)
/* PLA_EXTRA_STATUS */
#define LINK_CHANGE_FLAG BIT(8)
/* USB_USB2PHY */ /* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001 #define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002 #define USB2PHY_L1 0x0002
@ -813,6 +825,14 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
return ret; return ret;
} }
static void rtl_set_unplug(struct r8152 *tp)
{
if (tp->udev->state == USB_STATE_NOTATTACHED) {
set_bit(RTL8152_UNPLUG, &tp->flags);
smp_mb__after_atomic();
}
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
void *data, u16 type) void *data, u16 type)
{ {
@ -851,7 +871,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
} }
if (ret == -ENODEV) if (ret == -ENODEV)
set_bit(RTL8152_UNPLUG, &tp->flags); rtl_set_unplug(tp);
return ret; return ret;
} }
@ -921,7 +941,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
error1: error1:
if (ret == -ENODEV) if (ret == -ENODEV)
set_bit(RTL8152_UNPLUG, &tp->flags); rtl_set_unplug(tp);
return ret; return ret;
} }
@ -1309,7 +1329,7 @@ static void read_bulk_callback(struct urb *urb)
napi_schedule(&tp->napi); napi_schedule(&tp->napi);
return; return;
case -ESHUTDOWN: case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags); rtl_set_unplug(tp);
netif_device_detach(tp->netdev); netif_device_detach(tp->netdev);
return; return;
case -ENOENT: case -ENOENT:
@ -1429,7 +1449,7 @@ static void intr_callback(struct urb *urb)
resubmit: resubmit:
res = usb_submit_urb(urb, GFP_ATOMIC); res = usb_submit_urb(urb, GFP_ATOMIC);
if (res == -ENODEV) { if (res == -ENODEV) {
set_bit(RTL8152_UNPLUG, &tp->flags); rtl_set_unplug(tp);
netif_device_detach(tp->netdev); netif_device_detach(tp->netdev);
} else if (res) { } else if (res) {
netif_err(tp, intr, tp->netdev, netif_err(tp, intr, tp->netdev,
@ -2024,7 +2044,7 @@ static void tx_bottom(struct r8152 *tp)
struct net_device *netdev = tp->netdev; struct net_device *netdev = tp->netdev;
if (res == -ENODEV) { if (res == -ENODEV) {
set_bit(RTL8152_UNPLUG, &tp->flags); rtl_set_unplug(tp);
netif_device_detach(netdev); netif_device_detach(netdev);
} else { } else {
struct net_device_stats *stats = &netdev->stats; struct net_device_stats *stats = &netdev->stats;
@ -2098,7 +2118,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
ret = usb_submit_urb(agg->urb, mem_flags); ret = usb_submit_urb(agg->urb, mem_flags);
if (ret == -ENODEV) { if (ret == -ENODEV) {
set_bit(RTL8152_UNPLUG, &tp->flags); rtl_set_unplug(tp);
netif_device_detach(tp->netdev); netif_device_detach(tp->netdev);
} else if (ret) { } else if (ret) {
struct urb *urb = agg->urb; struct urb *urb = agg->urb;
@ -2355,6 +2375,12 @@ static int rtl_stop_rx(struct r8152 *tp)
return 0; return 0;
} }
static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
{
ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
OWN_UPDATE | OWN_CLEAR);
}
static int rtl_enable(struct r8152 *tp) static int rtl_enable(struct r8152 *tp)
{ {
u32 ocp_data; u32 ocp_data;
@ -2365,6 +2391,15 @@ static int rtl_enable(struct r8152 *tp)
ocp_data |= CR_RE | CR_TE; ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
switch (tp->version) {
case RTL_VER_08:
case RTL_VER_09:
r8153b_rx_agg_chg_indicate(tp);
break;
default:
break;
}
rxdy_gated_en(tp, false); rxdy_gated_en(tp, false);
return 0; return 0;
@ -2381,12 +2416,6 @@ static int rtl8152_enable(struct r8152 *tp)
return rtl_enable(tp); return rtl_enable(tp);
} }
static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
{
ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
OWN_UPDATE | OWN_CLEAR);
}
static void r8153_set_rx_early_timeout(struct r8152 *tp) static void r8153_set_rx_early_timeout(struct r8152 *tp)
{ {
u32 ocp_data = tp->coalesce / 8; u32 ocp_data = tp->coalesce / 8;
@ -2409,7 +2438,6 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
128 / 8); 128 / 8);
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
ocp_data); ocp_data);
r8153b_rx_agg_chg_indicate(tp);
break; break;
default: default:
@ -2433,7 +2461,6 @@ static void r8153_set_rx_early_size(struct r8152 *tp)
case RTL_VER_09: case RTL_VER_09:
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
ocp_data / 8); ocp_data / 8);
r8153b_rx_agg_chg_indicate(tp);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
@ -2806,20 +2833,24 @@ static void r8153b_power_cut_en(struct r8152 *tp, bool enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
} }
static void r8153b_queue_wake(struct r8152 *tp, bool enable) static void r8153_queue_wake(struct r8152 *tp, bool enable)
{ {
u32 ocp_data; u32 ocp_data;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38a); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG);
if (enable) if (enable)
ocp_data |= BIT(0); ocp_data |= UPCOMING_RUNTIME_D3;
else else
ocp_data &= ~BIT(0); ocp_data &= ~UPCOMING_RUNTIME_D3;
ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38a, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38c); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG);
ocp_data &= ~BIT(0); ocp_data &= ~LINK_CHG_EVENT;
ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38c, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
ocp_data &= ~LINK_CHANGE_FLAG;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
} }
static bool rtl_can_wakeup(struct r8152 *tp) static bool rtl_can_wakeup(struct r8152 *tp)
@ -2887,14 +2918,14 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
{ {
if (enable) { if (enable) {
r8153b_queue_wake(tp, true); r8153_queue_wake(tp, true);
r8153b_u1u2en(tp, false); r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false); r8153_u2p3en(tp, false);
rtl_runtime_suspend_enable(tp, true); rtl_runtime_suspend_enable(tp, true);
r8153b_ups_en(tp, true); r8153b_ups_en(tp, true);
} else { } else {
r8153b_ups_en(tp, false); r8153b_ups_en(tp, false);
r8153b_queue_wake(tp, false); r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false); rtl_runtime_suspend_enable(tp, false);
r8153_u2p3en(tp, true); r8153_u2p3en(tp, true);
r8153b_u1u2en(tp, true); r8153b_u1u2en(tp, true);
@ -4221,7 +4252,7 @@ static void r8153b_init(struct r8152 *tp)
r8153b_power_cut_en(tp, false); r8153b_power_cut_en(tp, false);
r8153b_ups_en(tp, false); r8153b_ups_en(tp, false);
r8153b_queue_wake(tp, false); r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false); rtl_runtime_suspend_enable(tp, false);
r8153b_u1u2en(tp, true); r8153b_u1u2en(tp, true);
usb_enable_lpm(tp->udev); usb_enable_lpm(tp->udev);
@ -4903,8 +4934,17 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
if (tp->coalesce != coalesce->rx_coalesce_usecs) { if (tp->coalesce != coalesce->rx_coalesce_usecs) {
tp->coalesce = coalesce->rx_coalesce_usecs; tp->coalesce = coalesce->rx_coalesce_usecs;
if (netif_running(tp->netdev) && netif_carrier_ok(netdev)) if (netif_running(netdev) && netif_carrier_ok(netdev)) {
r8153_set_rx_early_timeout(tp); netif_stop_queue(netdev);
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
tp->rtl_ops.enable(tp);
rtl_start_rx(tp);
clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
_rtl8152_set_rx_mode(netdev);
napi_enable(&tp->napi);
netif_wake_queue(netdev);
}
} }
mutex_unlock(&tp->control); mutex_unlock(&tp->control);
@ -5323,10 +5363,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL); usb_set_intfdata(intf, NULL);
if (tp) { if (tp) {
struct usb_device *udev = tp->udev; rtl_set_unplug(tp);
if (udev->state == USB_STATE_NOTATTACHED)
set_bit(RTL8152_UNPLUG, &tp->flags);
netif_napi_del(&tp->napi); netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev); unregister_netdev(tp->netdev);

View File

@ -812,6 +812,14 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
return f; return f;
} }
static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
__be32 src_vni, struct vxlan_fdb *f)
{
++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist,
vxlan_fdb_head(vxlan, mac, src_vni));
}
static int vxlan_fdb_create(struct vxlan_dev *vxlan, static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip, const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni, __u16 state, __be16 port, __be32 src_vni,
@ -837,18 +845,13 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return rc; return rc;
} }
++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist,
vxlan_fdb_head(vxlan, mac, src_vni));
*fdb = f; *fdb = f;
return 0; return 0;
} }
static void vxlan_fdb_free(struct rcu_head *head) static void __vxlan_fdb_free(struct vxlan_fdb *f)
{ {
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
struct vxlan_rdst *rd, *nd; struct vxlan_rdst *rd, *nd;
list_for_each_entry_safe(rd, nd, &f->remotes, list) { list_for_each_entry_safe(rd, nd, &f->remotes, list) {
@ -858,6 +861,13 @@ static void vxlan_fdb_free(struct rcu_head *head)
kfree(f); kfree(f);
} }
static void vxlan_fdb_free(struct rcu_head *head)
{
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
__vxlan_fdb_free(f);
}
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify, bool swdev_notify) bool do_notify, bool swdev_notify)
{ {
@ -985,6 +995,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
if (rc < 0) if (rc < 0)
return rc; return rc;
vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack); swdev_notify, extack);
if (rc) if (rc)
@ -3588,12 +3599,17 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
if (err) if (err)
goto errout; goto errout;
/* notify default fdb entry */
if (f) { if (f) {
vxlan_fdb_insert(vxlan, all_zeros_mac,
vxlan->default_dst.remote_vni, f);
/* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack); RTM_NEWNEIGH, true, extack);
if (err) if (err) {
goto errout; vxlan_fdb_destroy(vxlan, f, false, false);
goto unregister;
}
} }
list_add(&vxlan->next, &vn->vxlan_list); list_add(&vxlan->next, &vn->vxlan_list);
@ -3605,7 +3621,8 @@ errout:
* destroy the entry by hand here. * destroy the entry by hand here.
*/ */
if (f) if (f)
vxlan_fdb_destroy(vxlan, f, false, false); __vxlan_fdb_free(f);
unregister:
if (unregister) if (unregister)
unregister_netdevice(dev); unregister_netdevice(dev);
return err; return err;

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config ATH_COMMON config ATH_COMMON
tristate tristate

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: ISC
obj-$(CONFIG_ATH5K) += ath5k/ obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/ obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/ obj-$(CONFIG_CARL9170) += carl9170/

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config AR5523 config AR5523
tristate "Atheros AR5523 wireless driver support" tristate "Atheros AR5523 wireless driver support"
depends on MAC80211 && USB depends on MAC80211 && USB

View File

@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
obj-$(CONFIG_AR5523) := ar5523.o obj-$(CONFIG_AR5523) := ar5523.o

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config ATH10K config ATH10K
tristate "Atheros 802.11ac wireless cards support" tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211 && HAS_DMA depends on MAC80211 && HAS_DMA

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config ATH5K config ATH5K
tristate "Atheros 5xxx wireless cards support" tristate "Atheros 5xxx wireless cards support"
depends on (PCI || ATH25) && MAC80211 depends on (PCI || ATH25) && MAC80211

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: ISC
ath5k-y += caps.o ath5k-y += caps.o
ath5k-y += initvals.o ath5k-y += initvals.o
ath5k-y += eeprom.o ath5k-y += eeprom.o

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config ATH6KL config ATH6KL
tristate "Atheros mobile chipsets support" tristate "Atheros mobile chipsets support"
depends on CFG80211 depends on CFG80211

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: ISC */
#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#include <net/cfg80211.h> #include <net/cfg80211.h>

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config ATH9K_HW config ATH9K_HW
tristate tristate
config ATH9K_COMMON config ATH9K_COMMON

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: ISC
ath9k-y += beacon.o \ ath9k-y += beacon.o \
gpio.o \ gpio.o \
init.o \ init.o \

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config WCN36XX config WCN36XX
tristate "Qualcomm Atheros WCN3660/3680 support" tristate "Qualcomm Atheros WCN3660/3680 support"
depends on MAC80211 && HAS_DMA depends on MAC80211 && HAS_DMA

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: ISC
obj-$(CONFIG_WCN36XX) := wcn36xx.o obj-$(CONFIG_WCN36XX) := wcn36xx.o
wcn36xx-y += main.o \ wcn36xx-y += main.o \
dxe.o \ dxe.o \

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: ISC
config WIL6210 config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support" tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP select WANT_DEV_COREDUMP

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: ISC
obj-$(CONFIG_WIL6210) += wil6210.o obj-$(CONFIG_WIL6210) += wil6210.o
wil6210-y := main.o wil6210-y := main.o

View File

@ -82,6 +82,7 @@
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" #define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
@ -106,6 +107,8 @@
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@ -241,6 +244,18 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
}; };
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax101_cfg_quz_hr = { const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.name = "Intel(R) Wi-Fi 6 AX101", .name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
@ -253,6 +268,42 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
}; };
const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax200_cfg_cc = { const struct iwl_cfg iwl_ax200_cfg_cc = {
.name = "Intel(R) Wi-Fi 6 AX200 160MHz", .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
.fw_name_pre = IWL_CC_A_FW_PRE, .fw_name_pre = IWL_CC_A_FW_PRE,
@ -333,6 +384,90 @@ const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
}; };
const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
.name = "Intel(R) Dual Band Wireless AC 9461",
.fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
.name = "Intel(R) Dual Band Wireless AC 9462",
.fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = { const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE, .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
@ -424,12 +559,12 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
}; };
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz", .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE, .fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210, IWL_DEVICE_AX210,
}; };
const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = { const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX211 160MHz", .name = "Intel(R) Wi-Fi 7 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE, .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true, .uhb_supported = true,
@ -443,8 +578,8 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
IWL_DEVICE_AX210, IWL_DEVICE_AX210,
}; };
const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = { const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz", .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE, .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
IWL_DEVICE_AX210, IWL_DEVICE_AX210,
}; };
@ -457,6 +592,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View File

@ -540,14 +540,20 @@ extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9560_2ac_160_cfg; extern const struct iwl_cfg iwl9560_2ac_160_cfg;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
@ -562,6 +568,10 @@ extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr; extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650x_2ax_cfg;
@ -580,9 +590,9 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ #endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */ #endif /* __IWL_CONFIG_H__ */

View File

@ -336,6 +336,7 @@ enum {
/* RF_ID value */ /* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_JF (0x00105100)
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
#define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100)
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000) #define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000) #define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)

View File

@ -513,62 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */ /* 9000 Series */
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
@ -621,7 +615,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -630,7 +623,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
@ -708,7 +700,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
@ -717,7 +708,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
@ -764,7 +754,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -773,7 +762,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
@ -833,7 +821,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@ -842,7 +829,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
@ -890,63 +876,80 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)}, {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)},
/* 22000 Series */ /* 22000 Series */
{IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x2074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x4244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
@ -958,13 +961,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */

View File

@ -3571,15 +3571,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->cfg = &iwlax210_2ax_cfg_so_jf_a0; trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
trans->cfg = &iwlax210_2ax_cfg_so_gf_a0; trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0; trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
} }
} else if (cfg == &iwl_ax101_cfg_qu_hr) { } else if (cfg == &iwl_ax101_cfg_qu_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
(CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
@ -3601,8 +3603,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
((trans->cfg != &iwl_ax200_cfg_cc && ((trans->cfg != &iwl_ax200_cfg_cc &&
trans->cfg != &killer1650x_2ax_cfg && trans->cfg != &killer1650x_2ax_cfg &&
trans->cfg != &killer1650w_2ax_cfg) || trans->cfg != &killer1650w_2ax_cfg &&
trans->cfg != &iwl_ax201_cfg_quz_hr) ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status; u32 hw_status;

View File

@ -1759,9 +1759,10 @@ struct mwifiex_ie_types_wmm_queue_status {
struct ieee_types_vendor_header { struct ieee_types_vendor_header {
u8 element_id; u8 element_id;
u8 len; u8 len;
u8 oui[4]; /* 0~2: oui, 3: oui_type */ struct {
u8 oui_subtype; u8 oui[3];
u8 version; u8 oui_type;
} __packed oui;
} __packed; } __packed;
struct ieee_types_wmm_parameter { struct ieee_types_wmm_parameter {
@ -1775,6 +1776,9 @@ struct ieee_types_wmm_parameter {
* Version [1] * Version [1]
*/ */
struct ieee_types_vendor_header vend_hdr; struct ieee_types_vendor_header vend_hdr;
u8 oui_subtype;
u8 version;
u8 qos_info_bitmap; u8 qos_info_bitmap;
u8 reserved; u8 reserved;
struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS]; struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
@ -1792,6 +1796,8 @@ struct ieee_types_wmm_info {
* Version [1] * Version [1]
*/ */
struct ieee_types_vendor_header vend_hdr; struct ieee_types_vendor_header vend_hdr;
u8 oui_subtype;
u8 version;
u8 qos_info_bitmap; u8 qos_info_bitmap;
} __packed; } __packed;

View File

@ -1361,21 +1361,25 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break; break;
case WLAN_EID_VENDOR_SPECIFIC: case WLAN_EID_VENDOR_SPECIFIC:
if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
return -EINVAL;
vendor_ie = (struct ieee_types_vendor_specific *) vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr; current_ptr;
if (!memcmp /* 802.11 requires at least 3-byte OUI. */
(vendor_ie->vend_hdr.oui, wpa_oui, if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
sizeof(wpa_oui))) { return -EINVAL;
/* Not long enough for a match? Skip it. */
if (element_len < sizeof(wpa_oui))
break;
if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
sizeof(wpa_oui))) {
bss_entry->bcn_wpa_ie = bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *) (struct ieee_types_vendor_specific *)
current_ptr; current_ptr;
bss_entry->wpa_offset = (u16) bss_entry->wpa_offset = (u16)
(current_ptr - bss_entry->beacon_buf); (current_ptr - bss_entry->beacon_buf);
} else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui, } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) { sizeof(wmm_oui))) {
if (total_ie_len == if (total_ie_len ==
sizeof(struct ieee_types_wmm_parameter) || sizeof(struct ieee_types_wmm_parameter) ||

View File

@ -1351,7 +1351,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
/* Test to see if it is a WPA IE, if not, then /* Test to see if it is a WPA IE, if not, then
* it is a gen IE * it is a gen IE
*/ */
if (!memcmp(pvendor_ie->oui, wpa_oui, if (!memcmp(&pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) { sizeof(wpa_oui))) {
/* IE is a WPA/WPA2 IE so call set_wpa function /* IE is a WPA/WPA2 IE so call set_wpa function
*/ */
@ -1361,7 +1361,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
goto next_ie; goto next_ie;
} }
if (!memcmp(pvendor_ie->oui, wps_oui, if (!memcmp(&pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) { sizeof(wps_oui))) {
/* Test to see if it is a WPS IE, /* Test to see if it is a WPS IE,
* if so, enable wps session flag * if so, enable wps session flag

View File

@ -239,7 +239,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, INFO, mwifiex_dbg(priv->adapter, INFO,
"info: WMM Parameter IE: version=%d,\t" "info: WMM Parameter IE: version=%d,\t"
"qos_info Parameter Set Count=%d, Reserved=%#x\n", "qos_info Parameter Set Count=%d, Reserved=%#x\n",
wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap & wmm_ie->version, wmm_ie->qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK, IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
wmm_ie->reserved); wmm_ie->reserved);

View File

@ -30,6 +30,7 @@
#define MT_TX_RING_SIZE 256 #define MT_TX_RING_SIZE 256
#define MT_MCU_RING_SIZE 32 #define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048 #define MT_RX_BUF_SIZE 2048
#define MT_SKB_HEAD_LEN 128
struct mt76_dev; struct mt76_dev;
struct mt76_wcid; struct mt76_wcid;

View File

@ -429,6 +429,42 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
return dma_len; return dma_len;
} }
static struct sk_buff *
mt76u_build_rx_skb(void *data, int len, int buf_size)
{
struct sk_buff *skb;
if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
struct page *page;
/* slow path, not enough space for data and
* skb_shared_info
*/
skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
if (!skb)
return NULL;
skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
page = virt_to_head_page(data);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, data - page_address(page),
len - MT_SKB_HEAD_LEN, buf_size);
return skb;
}
/* fast path */
skb = build_skb(data, buf_size);
if (!skb)
return NULL;
skb_reserve(skb, MT_DMA_HDR_LEN);
__skb_put(skb, len);
return skb;
}
static int static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{ {
@ -446,19 +482,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
return 0; return 0;
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN); data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) { skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
return 0;
}
skb = build_skb(data, q->buf_size);
if (!skb) if (!skb)
return 0; return 0;
skb_reserve(skb, MT_DMA_HDR_LEN);
__skb_put(skb, data_len);
len -= data_len; len -= data_len;
while (len > 0 && nsgs < urb->num_sgs) { while (len > 0 && nsgs < urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length); data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,

View File

@ -1847,44 +1847,6 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
}, },
}; };
static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
},
};
static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_GO),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
},
};
static const struct ieee80211_iface_combination static const struct ieee80211_iface_combination
wl18xx_iface_combinations[] = { wl18xx_iface_combinations[] = {
{ {

View File

@ -191,14 +191,17 @@ static inline void idr_preload_end(void)
* idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
* @idr: IDR handle. * @idr: IDR handle.
* @entry: The type * to use as cursor. * @entry: The type * to use as cursor.
* @tmp: A temporary placeholder for ID.
* @id: Entry ID. * @id: Entry ID.
* *
* @entry and @id do not need to be initialized before the loop, and * @entry and @id do not need to be initialized before the loop, and
* after normal termination @entry is left with the value NULL. This * after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value. * is convenient for a "not found" value.
*/ */
#define idr_for_each_entry_ul(idr, entry, id) \ #define idr_for_each_entry_ul(idr, entry, tmp, id) \
for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) for (tmp = 0, id = 0; \
tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
tmp = id, ++id)
/** /**
* idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
@ -213,6 +216,20 @@ static inline void idr_preload_end(void)
entry; \ entry; \
++id, (entry) = idr_get_next((idr), &(id))) ++id, (entry) = idr_get_next((idr), &(id)))
/**
* idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
* @idr: IDR handle.
* @entry: The type * to use as a cursor.
* @tmp: A temporary placeholder for ID.
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
*/
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
for (tmp = id; \
tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
tmp = id, ++id)
/* /*
* IDA - ID Allocator, use when translation from id to pointer isn't necessary. * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/ */

View File

@ -109,12 +109,19 @@ struct phylink_mac_ops {
* Note that the PHY may be able to transform from one connection * Note that the PHY may be able to transform from one connection
* technology to another, so, eg, don't clear 1000BaseX just * technology to another, so, eg, don't clear 1000BaseX just
* because the MAC is unable to BaseX mode. This is more about * because the MAC is unable to BaseX mode. This is more about
* clearing unsupported speeds and duplex settings. * clearing unsupported speeds and duplex settings. The port modes
* should not be cleared; phylink_set_port_modes() will help with this.
* *
* If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
* or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
* based on @state->advertising and/or @state->speed and update * based on @state->advertising and/or @state->speed and update
* @state->interface accordingly. * @state->interface accordingly. See phylink_helper_basex_speed().
*
* When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
* MAC driver to return all supported link modes.
*
* If the @state->interface mode is not supported, then the @supported
* mask must be cleared.
*/ */
void validate(struct phylink_config *config, unsigned long *supported, void validate(struct phylink_config *config, unsigned long *supported,
struct phylink_link_state *state); struct phylink_link_state *state);

View File

@ -183,7 +183,7 @@ static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
} }
/* Kernel-internal feature bits that are unallocated in user space. */ /* Kernel-internal feature bits that are unallocated in user space. */
#define DST_FEATURE_ECN_CA (1 << 31) #define DST_FEATURE_ECN_CA (1U << 31)
#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA) #define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN) #define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
@ -302,8 +302,9 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
* @skb: buffer * @skb: buffer
* *
* If dst is not yet refcounted and not destroyed, grab a ref on it. * If dst is not yet refcounted and not destroyed, grab a ref on it.
* Returns true if dst is refcounted.
*/ */
static inline void skb_dst_force(struct sk_buff *skb) static inline bool skb_dst_force(struct sk_buff *skb)
{ {
if (skb_dst_is_noref(skb)) { if (skb_dst_is_noref(skb)) {
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
@ -314,6 +315,8 @@ static inline void skb_dst_force(struct sk_buff *skb)
skb->_skb_refdst = (unsigned long)dst; skb->_skb_refdst = (unsigned long)dst;
} }
return skb->_skb_refdst != 0UL;
} }

View File

@ -60,7 +60,7 @@ struct guehdr {
/* Private flags in the private option extension */ /* Private flags in the private option extension */
#define GUE_PFLAG_REMCSUM htonl(1 << 31) #define GUE_PFLAG_REMCSUM htonl(1U << 31)
#define GUE_PLEN_REMCSUM 4 #define GUE_PLEN_REMCSUM 4
#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM) #define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM)

View File

@ -810,11 +810,12 @@ struct ipvs_master_sync_state {
struct ip_vs_sync_buff *sync_buff; struct ip_vs_sync_buff *sync_buff;
unsigned long sync_queue_len; unsigned long sync_queue_len;
unsigned int sync_queue_delay; unsigned int sync_queue_delay;
struct task_struct *master_thread;
struct delayed_work master_wakeup_work; struct delayed_work master_wakeup_work;
struct netns_ipvs *ipvs; struct netns_ipvs *ipvs;
}; };
struct ip_vs_sync_thread_data;
/* How much time to keep dests in trash */ /* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ) #define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
@ -945,7 +946,8 @@ struct netns_ipvs {
spinlock_t sync_lock; spinlock_t sync_lock;
struct ipvs_master_sync_state *ms; struct ipvs_master_sync_state *ms;
spinlock_t sync_buff_lock; spinlock_t sync_buff_lock;
struct task_struct **backup_threads; struct ip_vs_sync_thread_data *master_tinfo;
struct ip_vs_sync_thread_data *backup_tinfo;
int threads_mask; int threads_mask;
volatile int sync_state; volatile int sync_state;
struct mutex sync_mutex; struct mutex sync_mutex;

View File

@ -347,6 +347,7 @@ struct tls_offload_context_rx {
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \ #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
void tls_ctx_free(struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo); int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval, int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen); int __user *optlen);

View File

@ -59,6 +59,11 @@ struct xdp_sock {
struct list_head flush_node; struct list_head flush_node;
u16 queue_id; u16 queue_id;
bool zc; bool zc;
enum {
XSK_READY = 0,
XSK_BOUND,
XSK_UNBOUND,
} state;
/* Protects multiple processes in the control path */ /* Protects multiple processes in the control path */
struct mutex mutex; struct mutex mutex;
struct xsk_queue *tx ____cacheline_aligned_in_smp; struct xsk_queue *tx ____cacheline_aligned_in_smp;

View File

@ -1379,7 +1379,7 @@ TRACE_EVENT(rxrpc_rx_eproto,
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call->debug_id; __entry->call = call ? call->debug_id : 0;
__entry->serial = serial; __entry->serial = serial;
__entry->why = why; __entry->why = why;
), ),

View File

@ -3193,6 +3193,7 @@ struct bpf_prog_info {
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
__u32 ifindex; __u32 ifindex;
__u32 gpl_compatible:1; __u32 gpl_compatible:1;
__u32 :31; /* alignment pad */
__u64 netns_dev; __u64 netns_dev;
__u64 netns_ino; __u64 netns_ino;
__u32 nr_jited_ksyms; __u32 nr_jited_ksyms;

View File

@ -123,7 +123,7 @@ struct tpacket_auxdata {
/* Rx and Tx ring - header status */ /* Rx and Tx ring - header status */
#define TP_STATUS_TS_SOFTWARE (1 << 29) #define TP_STATUS_TS_SOFTWARE (1 << 29)
#define TP_STATUS_TS_SYS_HARDWARE (1 << 30) /* deprecated, never set */ #define TP_STATUS_TS_SYS_HARDWARE (1 << 30) /* deprecated, never set */
#define TP_STATUS_TS_RAW_HARDWARE (1 << 31) #define TP_STATUS_TS_RAW_HARDWARE (1U << 31)
/* Rx ring - feature request bits */ /* Rx ring - feature request bits */
#define TP_FT_REQ_FILL_RXHASH 0x1 #define TP_FT_REQ_FILL_RXHASH 0x1

View File

@ -5334,7 +5334,7 @@ enum nl80211_feature_flags {
NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28, NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28,
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29, NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29,
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30, NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30,
NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31, NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1U << 31,
}; };
/** /**

View File

@ -1928,8 +1928,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
/* Check array->index_type */ /* Check array->index_type */
index_type_id = array->index_type; index_type_id = array->index_type;
index_type = btf_type_by_id(btf, index_type_id); index_type = btf_type_by_id(btf, index_type_id);
if (btf_type_is_resolve_source_only(index_type) || if (btf_type_nosize_or_null(index_type) ||
btf_type_nosize_or_null(index_type)) { btf_type_is_resolve_source_only(index_type)) {
btf_verifier_log_type(env, v->t, "Invalid index"); btf_verifier_log_type(env, v->t, "Invalid index");
return -EINVAL; return -EINVAL;
} }
@ -1948,8 +1948,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
/* Check array->type */ /* Check array->type */
elem_type_id = array->type; elem_type_id = array->type;
elem_type = btf_type_by_id(btf, elem_type_id); elem_type = btf_type_by_id(btf, elem_type_id);
if (btf_type_is_resolve_source_only(elem_type) || if (btf_type_nosize_or_null(elem_type) ||
btf_type_nosize_or_null(elem_type)) { btf_type_is_resolve_source_only(elem_type)) {
btf_verifier_log_type(env, v->t, btf_verifier_log_type(env, v->t,
"Invalid elem"); "Invalid elem");
return -EINVAL; return -EINVAL;
@ -2170,8 +2170,8 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
const struct btf_type *member_type = btf_type_by_id(env->btf, const struct btf_type *member_type = btf_type_by_id(env->btf,
member_type_id); member_type_id);
if (btf_type_is_resolve_source_only(member_type) || if (btf_type_nosize_or_null(member_type) ||
btf_type_nosize_or_null(member_type)) { btf_type_is_resolve_source_only(member_type)) {
btf_verifier_log_member(env, v->t, member, btf_verifier_log_member(env, v->t, member,
"Invalid member"); "Invalid member");
return -EINVAL; return -EINVAL;

View File

@ -1364,10 +1364,10 @@ select_insn:
insn++; insn++;
CONT; CONT;
ALU_ARSH_X: ALU_ARSH_X:
DST = (u64) (u32) ((*(s32 *) &DST) >> SRC); DST = (u64) (u32) (((s32) DST) >> SRC);
CONT; CONT;
ALU_ARSH_K: ALU_ARSH_K:
DST = (u64) (u32) ((*(s32 *) &DST) >> IMM); DST = (u64) (u32) (((s32) DST) >> IMM);
CONT; CONT;
ALU64_ARSH_X: ALU64_ARSH_X:
(*(s64 *) &DST) >>= SRC; (*(s64 *) &DST) >>= SRC;

View File

@ -2337,7 +2337,7 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
return ret; return ret;
} }
static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface) static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
{ {
/* begin scheduling originator messages on that interface */ /* begin scheduling originator messages on that interface */
batadv_iv_ogm_schedule(hard_iface); batadv_iv_ogm_schedule(hard_iface);
@ -2683,8 +2683,8 @@ unlock:
static struct batadv_algo_ops batadv_batman_iv __read_mostly = { static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.name = "BATMAN_IV", .name = "BATMAN_IV",
.iface = { .iface = {
.activate = batadv_iv_iface_activate,
.enable = batadv_iv_ogm_iface_enable, .enable = batadv_iv_ogm_iface_enable,
.enabled = batadv_iv_iface_enabled,
.disable = batadv_iv_ogm_iface_disable, .disable = batadv_iv_ogm_iface_disable,
.update_mac = batadv_iv_ogm_iface_update_mac, .update_mac = batadv_iv_ogm_iface_update_mac,
.primary_set = batadv_iv_ogm_primary_iface_set, .primary_set = batadv_iv_ogm_primary_iface_set,

View File

@ -796,6 +796,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
batadv_hardif_recalc_extra_skbroom(soft_iface); batadv_hardif_recalc_extra_skbroom(soft_iface);
if (bat_priv->algo_ops->iface.enabled)
bat_priv->algo_ops->iface.enabled(hard_iface);
out: out:
return 0; return 0;

View File

@ -3813,6 +3813,8 @@ static void batadv_tt_purge(struct work_struct *work)
*/ */
void batadv_tt_free(struct batadv_priv *bat_priv) void batadv_tt_free(struct batadv_priv *bat_priv)
{ {
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1); batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1); batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);

View File

@ -2170,6 +2170,9 @@ struct batadv_algo_iface_ops {
/** @enable: init routing info when hard-interface is enabled */ /** @enable: init routing info when hard-interface is enabled */
int (*enable)(struct batadv_hard_iface *hard_iface); int (*enable)(struct batadv_hard_iface *hard_iface);
/** @enabled: notification when hard-interface was enabled (optional) */
void (*enabled)(struct batadv_hard_iface *hard_iface);
/** @disable: de-init routing info when hard-interface is disabled */ /** @disable: de-init routing info when hard-interface is disabled */
void (*disable)(struct batadv_hard_iface *hard_iface); void (*disable)(struct batadv_hard_iface *hard_iface);

View File

@ -55,7 +55,7 @@ static void loop(void)
int main(void) int main(void)
{ {
debug_fd = open("/dev/console", 00000002); debug_fd = open("/dev/kmsg", 00000002);
dprintf(debug_fd, "Started bpfilter\n"); dprintf(debug_fd, "Started bpfilter\n");
loop(); loop();
close(debug_fd); close(debug_fd);

View File

@ -74,7 +74,6 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
struct net_bridge_fdb_entry *dst = NULL; struct net_bridge_fdb_entry *dst = NULL;
struct net_bridge_mdb_entry *mdst; struct net_bridge_mdb_entry *mdst;
bool local_rcv, mcast_hit = false; bool local_rcv, mcast_hit = false;
const unsigned char *dest;
struct net_bridge *br; struct net_bridge *br;
u16 vid = 0; u16 vid = 0;
@ -92,10 +91,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false); br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
local_rcv = !!(br->dev->flags & IFF_PROMISC); local_rcv = !!(br->dev->flags & IFF_PROMISC);
dest = eth_hdr(skb)->h_dest; if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
if (is_multicast_ether_addr(dest)) {
/* by definition the broadcast is also a multicast address */ /* by definition the broadcast is also a multicast address */
if (is_broadcast_ether_addr(dest)) { if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
pkt_type = BR_PKT_BROADCAST; pkt_type = BR_PKT_BROADCAST;
local_rcv = true; local_rcv = true;
} else { } else {
@ -145,7 +143,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
} }
break; break;
case BR_PKT_UNICAST: case BR_PKT_UNICAST:
dst = br_fdb_find_rcu(br, dest, vid); dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
default: default:
break; break;
} }

View File

@ -911,6 +911,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
int type; int type;
int err = 0; int err = 0;
__be32 group; __be32 group;
u16 nsrcs;
ih = igmpv3_report_hdr(skb); ih = igmpv3_report_hdr(skb);
num = ntohs(ih->ngrec); num = ntohs(ih->ngrec);
@ -924,8 +925,9 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
grec = (void *)(skb->data + len - sizeof(*grec)); grec = (void *)(skb->data + len - sizeof(*grec));
group = grec->grec_mca; group = grec->grec_mca;
type = grec->grec_type; type = grec->grec_type;
nsrcs = ntohs(grec->grec_nsrcs);
len += ntohs(grec->grec_nsrcs) * 4; len += nsrcs * 4;
if (!ip_mc_may_pull(skb, len)) if (!ip_mc_may_pull(skb, len))
return -EINVAL; return -EINVAL;
@ -946,7 +948,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
src = eth_hdr(skb)->h_source; src = eth_hdr(skb)->h_source;
if ((type == IGMPV3_CHANGE_TO_INCLUDE || if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
type == IGMPV3_MODE_IS_INCLUDE) && type == IGMPV3_MODE_IS_INCLUDE) &&
ntohs(grec->grec_nsrcs) == 0) { nsrcs == 0) {
br_ip4_multicast_leave_group(br, port, group, vid, src); br_ip4_multicast_leave_group(br, port, group, vid, src);
} else { } else {
err = br_ip4_multicast_add_group(br, port, group, vid, err = br_ip4_multicast_add_group(br, port, group, vid,
@ -983,7 +985,8 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
len = skb_transport_offset(skb) + sizeof(*icmp6h); len = skb_transport_offset(skb) + sizeof(*icmp6h);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
__be16 *nsrcs, _nsrcs; __be16 *_nsrcs, __nsrcs;
u16 nsrcs;
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
@ -991,12 +994,13 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
nsrcs_offset + sizeof(_nsrcs)) nsrcs_offset + sizeof(_nsrcs))
return -EINVAL; return -EINVAL;
nsrcs = skb_header_pointer(skb, nsrcs_offset, _nsrcs = skb_header_pointer(skb, nsrcs_offset,
sizeof(_nsrcs), &_nsrcs); sizeof(__nsrcs), &__nsrcs);
if (!nsrcs) if (!_nsrcs)
return -EINVAL; return -EINVAL;
grec_len = struct_size(grec, grec_src, ntohs(*nsrcs)); nsrcs = ntohs(*_nsrcs);
grec_len = struct_size(grec, grec_src, nsrcs);
if (!ipv6_mc_may_pull(skb, len + grec_len)) if (!ipv6_mc_may_pull(skb, len + grec_len))
return -EINVAL; return -EINVAL;
@ -1021,7 +1025,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
src = eth_hdr(skb)->h_source; src = eth_hdr(skb)->h_source;
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
grec->grec_type == MLD2_MODE_IS_INCLUDE) && grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
ntohs(*nsrcs) == 0) { nsrcs == 0) {
br_ip6_multicast_leave_group(br, port, &grec->grec_mca, br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
vid, src); vid, src);
} else { } else {
@ -1275,7 +1279,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
u16 vid) u16 vid)
{ {
unsigned int transport_len = ipv6_transport_len(skb); unsigned int transport_len = ipv6_transport_len(skb);
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld_msg *mld; struct mld_msg *mld;
struct net_bridge_mdb_entry *mp; struct net_bridge_mdb_entry *mp;
struct mld2_query *mld2q; struct mld2_query *mld2q;
@ -1319,7 +1322,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (is_general_query) { if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6); saddr.proto = htons(ETH_P_IPV6);
saddr.u.ip6 = ip6h->saddr; saddr.u.ip6 = ipv6_hdr(skb)->saddr;
br_multicast_query_received(br, port, &br->ip6_other_query, br_multicast_query_received(br, port, &br->ip6_other_query,
&saddr, max_delay); &saddr, max_delay);

View File

@ -143,7 +143,6 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p; struct net_bridge_port *p;
struct net_bridge *br; struct net_bridge *br;
const unsigned char *buf; const unsigned char *buf;
@ -172,7 +171,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (p->state == BR_STATE_DISABLED) if (p->state == BR_STATE_DISABLED)
goto out; goto out;
if (!ether_addr_equal(dest, br->group_addr)) if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
goto out; goto out;
if (p->flags & BR_BPDU_GUARD) { if (p->flags & BR_BPDU_GUARD) {

View File

@ -4740,7 +4740,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return -ENODEV; return -ENODEV;
idev = __in6_dev_get_safely(dev); idev = __in6_dev_get_safely(dev);
if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) if (unlikely(!idev || !idev->cnf.forwarding))
return BPF_FIB_LKUP_RET_FWD_DISABLED; return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) { if (flags & BPF_FIB_LOOKUP_OUTPUT) {

View File

@ -227,9 +227,13 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
struct hsr_port *master; struct hsr_port *master;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
skb->dev = master->dev; if (master) {
hsr_forward_skb(skb, master); skb->dev = master->dev;
hsr_forward_skb(skb, master);
} else {
atomic_long_inc(&dev->tx_dropped);
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -344,27 +348,26 @@ static void hsr_announce(struct timer_list *t)
rcu_read_unlock(); rcu_read_unlock();
} }
/* According to comments in the declaration of struct net_device, this function void hsr_dev_destroy(struct net_device *hsr_dev)
* is "Called from unregister, can be used to call free_netdev". Ok then...
*/
static void hsr_dev_destroy(struct net_device *hsr_dev)
{ {
struct hsr_priv *hsr; struct hsr_priv *hsr;
struct hsr_port *port; struct hsr_port *port;
struct hsr_port *tmp;
hsr = netdev_priv(hsr_dev); hsr = netdev_priv(hsr_dev);
hsr_debugfs_term(hsr); hsr_debugfs_term(hsr);
rtnl_lock(); list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
hsr_for_each_port(hsr, port)
hsr_del_port(port); hsr_del_port(port);
rtnl_unlock();
del_timer_sync(&hsr->prune_timer); del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->announce_timer); del_timer_sync(&hsr->announce_timer);
synchronize_rcu(); synchronize_rcu();
hsr_del_self_node(&hsr->self_node_db);
hsr_del_nodes(&hsr->node_db);
} }
static const struct net_device_ops hsr_device_ops = { static const struct net_device_ops hsr_device_ops = {
@ -391,7 +394,6 @@ void hsr_dev_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_NO_QUEUE;
dev->needs_free_netdev = true; dev->needs_free_netdev = true;
dev->priv_destructor = hsr_dev_destroy;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
@ -428,6 +430,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
{ {
struct hsr_priv *hsr; struct hsr_priv *hsr;
struct hsr_port *port; struct hsr_port *port;
struct hsr_port *tmp;
int res; int res;
hsr = netdev_priv(hsr_dev); hsr = netdev_priv(hsr_dev);
@ -492,10 +495,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
return 0; return 0;
fail: fail:
hsr_for_each_port(hsr, port) list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
hsr_del_port(port); hsr_del_port(port);
err_add_port: err_add_port:
hsr_del_node(&hsr->self_node_db); hsr_del_self_node(&hsr->self_node_db);
return res; return res;
} }

View File

@ -14,6 +14,7 @@
void hsr_dev_setup(struct net_device *dev); void hsr_dev_setup(struct net_device *dev);
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version); unsigned char multicast_spec, u8 protocol_version);
void hsr_dev_destroy(struct net_device *hsr_dev);
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr); void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
bool is_hsr_master(struct net_device *dev); bool is_hsr_master(struct net_device *dev);
int hsr_get_max_mtu(struct hsr_priv *hsr); int hsr_get_max_mtu(struct hsr_priv *hsr);

View File

@ -104,7 +104,7 @@ int hsr_create_self_node(struct list_head *self_node_db,
return 0; return 0;
} }
void hsr_del_node(struct list_head *self_node_db) void hsr_del_self_node(struct list_head *self_node_db)
{ {
struct hsr_node *node; struct hsr_node *node;
@ -117,6 +117,15 @@ void hsr_del_node(struct list_head *self_node_db)
} }
} }
void hsr_del_nodes(struct list_head *node_db)
{
struct hsr_node *node;
struct hsr_node *tmp;
list_for_each_entry_safe(node, tmp, node_db, mac_list)
kfree(node);
}
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A; /* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
* seq_out is used to initialize filtering of outgoing duplicate frames * seq_out is used to initialize filtering of outgoing duplicate frames
* originating from the newly added node. * originating from the newly added node.

View File

@ -12,7 +12,8 @@
struct hsr_node; struct hsr_node;
void hsr_del_node(struct list_head *self_node_db); void hsr_del_self_node(struct list_head *self_node_db);
void hsr_del_nodes(struct list_head *node_db);
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out); u16 seq_out);
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,

View File

@ -69,6 +69,12 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version); return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
} }
static void hsr_dellink(struct net_device *hsr_dev, struct list_head *head)
{
hsr_dev_destroy(hsr_dev);
unregister_netdevice_queue(hsr_dev, head);
}
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{ {
struct hsr_priv *hsr; struct hsr_priv *hsr;
@ -113,6 +119,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
.priv_size = sizeof(struct hsr_priv), .priv_size = sizeof(struct hsr_priv),
.setup = hsr_dev_setup, .setup = hsr_dev_setup,
.newlink = hsr_newlink, .newlink = hsr_newlink,
.dellink = hsr_dellink,
.fill_info = hsr_fill_info, .fill_info = hsr_fill_info,
}; };

View File

@ -193,4 +193,5 @@ void hsr_del_port(struct hsr_port *port)
if (port != master) if (port != master)
dev_put(port->dev); dev_put(port->dev);
kfree(port);
} }

View File

@ -62,6 +62,11 @@
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#define IPV6ONLY_FLAGS \
(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
static struct ipv4_devconf ipv4_devconf = { static struct ipv4_devconf ipv4_devconf = {
.data = { .data = {
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
@ -482,6 +487,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
ifa->ifa_flags &= ~IFA_F_SECONDARY; ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list; last_primary = &in_dev->ifa_list;
/* Don't set IPv6 only flags to IPv4 addresses */
ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
ifap = &in_dev->ifa_list; ifap = &in_dev->ifa_list;
ifa1 = rtnl_dereference(*ifap); ifa1 = rtnl_dereference(*ifap);

View File

@ -1229,12 +1229,8 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
if (pmc) { if (pmc) {
im->interface = pmc->interface; im->interface = pmc->interface;
if (im->sfmode == MCAST_INCLUDE) { if (im->sfmode == MCAST_INCLUDE) {
im->tomb = pmc->tomb; swap(im->tomb, pmc->tomb);
pmc->tomb = NULL; swap(im->sources, pmc->sources);
im->sources = pmc->sources;
pmc->sources = NULL;
for (psf = im->sources; psf; psf = psf->sf_next) for (psf = im->sources; psf; psf = psf->sf_next)
psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
} else { } else {

View File

@ -24,9 +24,6 @@ raw_get_hashinfo(const struct inet_diag_req_v2 *r)
return &raw_v6_hashinfo; return &raw_v6_hashinfo;
#endif #endif
} else { } else {
pr_warn_once("Unexpected inet family %d\n",
r->sdiag_family);
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
} }

View File

@ -448,7 +448,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
n = ip_neigh_gw4(dev, pkey); n = ip_neigh_gw4(dev, pkey);
} }
if (n && !refcount_inc_not_zero(&n->refcnt)) if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
n = NULL; n = NULL;
rcu_read_unlock_bh(); rcu_read_unlock_bh();

View File

@ -2614,6 +2614,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_saved_syn_free(tp); tcp_saved_syn_free(tp);
tp->compressed_ack = 0; tp->compressed_ack = 0;
tp->bytes_sent = 0; tp->bytes_sent = 0;
tp->bytes_acked = 0;
tp->bytes_received = 0;
tp->bytes_retrans = 0; tp->bytes_retrans = 0;
tp->duplicate_sack[0].start_seq = 0; tp->duplicate_sack[0].start_seq = 0;
tp->duplicate_sack[0].end_seq = 0; tp->duplicate_sack[0].end_seq = 0;

View File

@ -2436,8 +2436,10 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
goto out; goto out;
} }
err = pfkey_xfrm_policy2msg(out_skb, xp, dir); err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
if (err < 0) if (err < 0) {
kfree_skb(out_skb);
goto out; goto out;
}
out_hdr = (struct sadb_msg *) out_skb->data; out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version; out_hdr->sadb_msg_version = hdr->sadb_msg_version;
@ -2688,8 +2690,10 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
return PTR_ERR(out_skb); return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir); err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
if (err < 0) if (err < 0) {
kfree_skb(out_skb);
return err; return err;
}
out_hdr = (struct sadb_msg *) out_skb->data; out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = pfk->dump.msg_version; out_hdr->sadb_msg_version = pfk->dump.msg_version;

View File

@ -2351,7 +2351,6 @@ static const struct nf_hook_ops ip_vs_ops[] = {
static int __net_init __ip_vs_init(struct net *net) static int __net_init __ip_vs_init(struct net *net)
{ {
struct netns_ipvs *ipvs; struct netns_ipvs *ipvs;
int ret;
ipvs = net_generic(net, ip_vs_net_id); ipvs = net_generic(net, ip_vs_net_id);
if (ipvs == NULL) if (ipvs == NULL)
@ -2383,17 +2382,11 @@ static int __net_init __ip_vs_init(struct net *net)
if (ip_vs_sync_net_init(ipvs) < 0) if (ip_vs_sync_net_init(ipvs) < 0)
goto sync_fail; goto sync_fail;
ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0)
goto hook_fail;
return 0; return 0;
/* /*
* Error handling * Error handling
*/ */
hook_fail:
ip_vs_sync_net_cleanup(ipvs);
sync_fail: sync_fail:
ip_vs_conn_net_cleanup(ipvs); ip_vs_conn_net_cleanup(ipvs);
conn_fail: conn_fail:
@ -2423,6 +2416,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
net->ipvs = NULL; net->ipvs = NULL;
} }
static int __net_init __ip_vs_dev_init(struct net *net)
{
int ret;
ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0)
goto hook_fail;
return 0;
hook_fail:
return ret;
}
static void __net_exit __ip_vs_dev_cleanup(struct net *net) static void __net_exit __ip_vs_dev_cleanup(struct net *net)
{ {
struct netns_ipvs *ipvs = net_ipvs(net); struct netns_ipvs *ipvs = net_ipvs(net);
@ -2442,6 +2448,7 @@ static struct pernet_operations ipvs_core_ops = {
}; };
static struct pernet_operations ipvs_core_dev_ops = { static struct pernet_operations ipvs_core_dev_ops = {
.init = __ip_vs_dev_init,
.exit = __ip_vs_dev_cleanup, .exit = __ip_vs_dev_cleanup,
}; };

View File

@ -2454,9 +2454,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
cfg.syncid = dm->syncid; cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state); ret = start_sync_thread(ipvs, &cfg, dm->state);
} else { } else {
mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state); ret = stop_sync_thread(ipvs, dm->state);
mutex_unlock(&ipvs->sync_mutex);
} }
goto out_dec; goto out_dec;
} }
@ -3581,10 +3579,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
if (!attrs[IPVS_DAEMON_ATTR_STATE]) if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL; return -EINVAL;
mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, ret = stop_sync_thread(ipvs,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
mutex_unlock(&ipvs->sync_mutex);
return ret; return ret;
} }

View File

@ -195,6 +195,7 @@ union ip_vs_sync_conn {
#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
struct ip_vs_sync_thread_data { struct ip_vs_sync_thread_data {
struct task_struct *task;
struct netns_ipvs *ipvs; struct netns_ipvs *ipvs;
struct socket *sock; struct socket *sock;
char *buf; char *buf;
@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
max(IPVS_SYNC_SEND_DELAY, 1)); max(IPVS_SYNC_SEND_DELAY, 1));
ms->sync_queue_len++; ms->sync_queue_len++;
list_add_tail(&sb->list, &ms->sync_queue); list_add_tail(&sb->list, &ms->sync_queue);
if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
wake_up_process(ms->master_thread); int id = (int)(ms - ipvs->ms);
wake_up_process(ipvs->master_tinfo[id].task);
}
} else } else
ip_vs_sync_buff_release(sb); ip_vs_sync_buff_release(sb);
spin_unlock(&ipvs->sync_lock); spin_unlock(&ipvs->sync_lock);
@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
spin_lock_bh(&ipvs->sync_lock); spin_lock_bh(&ipvs->sync_lock);
if (ms->sync_queue_len && if (ms->sync_queue_len &&
ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
int id = (int)(ms - ipvs->ms);
ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
wake_up_process(ms->master_thread); wake_up_process(ipvs->master_tinfo[id].task);
} }
spin_unlock_bh(&ipvs->sync_lock); spin_unlock_bh(&ipvs->sync_lock);
} }
@ -1703,10 +1709,6 @@ done:
if (sb) if (sb)
ip_vs_sync_buff_release(sb); ip_vs_sync_buff_release(sb);
/* release the sending multicast socket */
sock_release(tinfo->sock);
kfree(tinfo);
return 0; return 0;
} }
@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
} }
} }
/* release the sending multicast socket */
sock_release(tinfo->sock);
kfree(tinfo->buf);
kfree(tinfo);
return 0; return 0;
} }
@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state) int state)
{ {
struct ip_vs_sync_thread_data *tinfo = NULL; struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
struct task_struct **array = NULL, *task; struct task_struct *task;
struct net_device *dev; struct net_device *dev;
char *name; char *name;
int (*threadfn)(void *data); int (*threadfn)(void *data);
@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
threadfn = sync_thread_master; threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) { } else if (state == IP_VS_STATE_BACKUP) {
result = -EEXIST; result = -EEXIST;
if (ipvs->backup_threads) if (ipvs->backup_tinfo)
goto out_early; goto out_early;
ipvs->bcfg = *c; ipvs->bcfg = *c;
@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
master_wakeup_work_handler); master_wakeup_work_handler);
ms->ipvs = ipvs; ms->ipvs = ipvs;
} }
} else {
array = kcalloc(count, sizeof(struct task_struct *),
GFP_KERNEL);
result = -ENOMEM;
if (!array)
goto out;
} }
result = -ENOMEM;
ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
GFP_KERNEL);
if (!ti)
goto out;
for (id = 0; id < count; id++) { for (id = 0; id < count; id++) {
result = -ENOMEM; tinfo = &ti[id];
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
if (!tinfo)
goto out;
tinfo->ipvs = ipvs; tinfo->ipvs = ipvs;
tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) { if (state == IP_VS_STATE_BACKUP) {
result = -ENOMEM;
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL); GFP_KERNEL);
if (!tinfo->buf) if (!tinfo->buf)
goto out; goto out;
} else {
tinfo->buf = NULL;
} }
tinfo->id = id; tinfo->id = id;
if (state == IP_VS_STATE_MASTER) if (state == IP_VS_STATE_MASTER)
@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
result = PTR_ERR(task); result = PTR_ERR(task);
goto out; goto out;
} }
tinfo = NULL; tinfo->task = task;
if (state == IP_VS_STATE_MASTER)
ipvs->ms[id].master_thread = task;
else
array[id] = task;
} }
/* mark as active */ /* mark as active */
if (state == IP_VS_STATE_BACKUP) if (state == IP_VS_STATE_MASTER)
ipvs->backup_threads = array; ipvs->master_tinfo = ti;
else
ipvs->backup_tinfo = ti;
spin_lock_bh(&ipvs->sync_buff_lock); spin_lock_bh(&ipvs->sync_buff_lock);
ipvs->sync_state |= state; ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock); spin_unlock_bh(&ipvs->sync_buff_lock);
@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
out: out:
/* We do not need RTNL lock anymore, release it here so that /* We do not need RTNL lock anymore, release it here so that
* sock_release below and in the kthreads can use rtnl_lock * sock_release below can use rtnl_lock to leave the mcast group.
* to leave the mcast group.
*/ */
rtnl_unlock(); rtnl_unlock();
count = id; id = min(id, count - 1);
while (count-- > 0) { if (ti) {
if (state == IP_VS_STATE_MASTER) for (tinfo = ti + id; tinfo >= ti; tinfo--) {
kthread_stop(ipvs->ms[count].master_thread); if (tinfo->task)
else kthread_stop(tinfo->task);
kthread_stop(array[count]); }
} }
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms); kfree(ipvs->ms);
ipvs->ms = NULL; ipvs->ms = NULL;
} }
mutex_unlock(&ipvs->sync_mutex); mutex_unlock(&ipvs->sync_mutex);
if (tinfo) {
if (tinfo->sock) /* No more mutexes, release socks */
sock_release(tinfo->sock); if (ti) {
kfree(tinfo->buf); for (tinfo = ti + id; tinfo >= ti; tinfo--) {
kfree(tinfo); if (tinfo->sock)
sock_release(tinfo->sock);
kfree(tinfo->buf);
}
kfree(ti);
} }
kfree(array);
return result; return result;
out_early: out_early:
@ -1944,15 +1935,18 @@ out_early:
int stop_sync_thread(struct netns_ipvs *ipvs, int state) int stop_sync_thread(struct netns_ipvs *ipvs, int state)
{ {
struct task_struct **array; struct ip_vs_sync_thread_data *ti, *tinfo;
int id; int id;
int retc = -EINVAL; int retc = -EINVAL;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
mutex_lock(&ipvs->sync_mutex);
if (state == IP_VS_STATE_MASTER) { if (state == IP_VS_STATE_MASTER) {
retc = -ESRCH;
if (!ipvs->ms) if (!ipvs->ms)
return -ESRCH; goto err;
ti = ipvs->master_tinfo;
/* /*
* The lock synchronizes with sb_queue_tail(), so that we don't * The lock synchronizes with sb_queue_tail(), so that we don't
@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
struct ipvs_master_sync_state *ms = &ipvs->ms[id]; struct ipvs_master_sync_state *ms = &ipvs->ms[id];
int ret; int ret;
tinfo = &ti[id];
pr_info("stopping master sync thread %d ...\n", pr_info("stopping master sync thread %d ...\n",
task_pid_nr(ms->master_thread)); task_pid_nr(tinfo->task));
cancel_delayed_work_sync(&ms->master_wakeup_work); cancel_delayed_work_sync(&ms->master_wakeup_work);
ret = kthread_stop(ms->master_thread); ret = kthread_stop(tinfo->task);
if (retc >= 0) if (retc >= 0)
retc = ret; retc = ret;
} }
kfree(ipvs->ms); kfree(ipvs->ms);
ipvs->ms = NULL; ipvs->ms = NULL;
ipvs->master_tinfo = NULL;
} else if (state == IP_VS_STATE_BACKUP) { } else if (state == IP_VS_STATE_BACKUP) {
if (!ipvs->backup_threads) retc = -ESRCH;
return -ESRCH; if (!ipvs->backup_tinfo)
goto err;
ti = ipvs->backup_tinfo;
ipvs->sync_state &= ~IP_VS_STATE_BACKUP; ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
array = ipvs->backup_threads;
retc = 0; retc = 0;
for (id = ipvs->threads_mask; id >= 0; id--) { for (id = ipvs->threads_mask; id >= 0; id--) {
int ret; int ret;
tinfo = &ti[id];
pr_info("stopping backup sync thread %d ...\n", pr_info("stopping backup sync thread %d ...\n",
task_pid_nr(array[id])); task_pid_nr(tinfo->task));
ret = kthread_stop(array[id]); ret = kthread_stop(tinfo->task);
if (retc >= 0) if (retc >= 0)
retc = ret; retc = ret;
} }
kfree(array); ipvs->backup_tinfo = NULL;
ipvs->backup_threads = NULL; } else {
goto err;
} }
id = ipvs->threads_mask;
mutex_unlock(&ipvs->sync_mutex);
/* No more mutexes, release socks */
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
if (tinfo->sock)
sock_release(tinfo->sock);
kfree(tinfo->buf);
}
kfree(ti);
/* decrease the module use count */ /* decrease the module use count */
ip_vs_use_count_dec(); ip_vs_use_count_dec();
return retc;
err:
mutex_unlock(&ipvs->sync_mutex);
return retc; return retc;
} }
@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
{ {
int retc; int retc;
mutex_lock(&ipvs->sync_mutex);
retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER); retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
if (retc && retc != -ESRCH) if (retc && retc != -ESRCH)
pr_err("Failed to stop Master Daemon\n"); pr_err("Failed to stop Master Daemon\n");
@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP); retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
if (retc && retc != -ESRCH) if (retc && retc != -ESRCH)
pr_err("Failed to stop Backup Daemon\n"); pr_err("Failed to stop Backup Daemon\n");
mutex_unlock(&ipvs->sync_mutex);
} }

View File

@ -1256,7 +1256,6 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple tuple;
struct nf_conn *ct; struct nf_conn *ct;
struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
struct nf_conntrack_zone zone; struct nf_conntrack_zone zone;
int err; int err;
@ -1266,11 +1265,13 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
if (cda[CTA_TUPLE_ORIG]) if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
u3, &zone); nfmsg->nfgen_family, &zone);
else if (cda[CTA_TUPLE_REPLY]) else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
u3, &zone); nfmsg->nfgen_family, &zone);
else { else {
u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
return ctnetlink_flush_conntrack(net, cda, return ctnetlink_flush_conntrack(net, cda,
NETLINK_CB(skb).portid, NETLINK_CB(skb).portid,
nlmsg_report(nlh), u3); nlmsg_report(nlh), u3);

View File

@ -218,7 +218,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
/* See ip_conntrack_proto_tcp.c */ /* See ip_conntrack_proto_tcp.c */
if (state->net->ct.sysctl_checksum && if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING && state->hook == NF_INET_PRE_ROUTING &&
nf_ip_checksum(skb, state->hook, dataoff, 0)) { nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
icmp_error_log(skb, state, "bad hw icmp checksum"); icmp_error_log(skb, state, "bad hw icmp checksum");
return -NF_ACCEPT; return -NF_ACCEPT;
} }

View File

@ -564,7 +564,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
if (skb_ensure_writable(skb, hdrlen + sizeof(*inside))) if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
return 0; return 0;
if (nf_ip_checksum(skb, hooknum, hdrlen, 0)) if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
return 0; return 0;
inside = (void *)skb->data + hdrlen; inside = (void *)skb->data + hdrlen;

View File

@ -189,6 +189,11 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
goto err; goto err;
} }
if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
status = -ENETDOWN;
goto err;
}
*entry = (struct nf_queue_entry) { *entry = (struct nf_queue_entry) {
.skb = skb, .skb = skb,
.state = *state, .state = *state,
@ -197,7 +202,6 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
}; };
nf_queue_entry_get_refs(entry); nf_queue_entry_get_refs(entry);
skb_dst_force(skb);
switch (entry->state.pf) { switch (entry->state.pf) {
case AF_INET: case AF_INET:

View File

@ -17,7 +17,8 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
case CHECKSUM_COMPLETE: case CHECKSUM_COMPLETE:
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
break; break;
if ((protocol == 0 && !csum_fold(skb->csum)) || if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
!csum_fold(skb->csum)) ||
!csum_tcpudp_magic(iph->saddr, iph->daddr, !csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - dataoff, protocol, skb->len - dataoff, protocol,
skb->csum)) { skb->csum)) {
@ -26,7 +27,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
} }
/* fall through */ /* fall through */
case CHECKSUM_NONE: case CHECKSUM_NONE:
if (protocol == 0) if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
skb->csum = 0; skb->csum = 0;
else else
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,

View File

@ -869,7 +869,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
unsigned short frametype, flags, window, timeout; unsigned short frametype, flags, window, timeout;
int ret; int ret;
skb->sk = NULL; /* Initially we don't know who it's for */ skb_orphan(skb);
/* /*
* skb->data points to the netrom frame start * skb->data points to the netrom frame start
@ -968,6 +968,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
window = skb->data[20]; window = skb->data[20];
skb->sk = make; skb->sk = make;
skb->destructor = sock_efree;
make->sk_state = TCP_ESTABLISHED; make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */ /* Fill in his circuit details */

View File

@ -107,7 +107,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
if (!conn_info) { if (!conn_info) {
rc = -EPROTO; rc = -EPROTO;
goto free_exit; goto exit;
} }
__skb_queue_head_init(&frags_q); __skb_queue_head_init(&frags_q);

Some files were not shown because too many files have changed in this diff Show More