Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [IPV6]: fix flowlabel seqfile handling
  [IPV6]: return EINVAL for invalid address with flowlabel lease request
  [SCTP]: Remove temporary associations from backlog and hash.
  [SCTP]: Correctly set IP id for SCTP traffic
  [NetLabel]: protect the CIPSOv4 socket option from setsockopt()
  [NETFILTER]: ip_tables: compat code module refcounting fix
  [NETFILTER]: nf_conntrack: add missing unlock in get_next_corpse()
  [NETFILTER]: ip_tables: compat error way cleanup
  [NETFILTER]: Missed and reordered checks in {arp,ip,ip6}_tables
  [NETFILTER]: remove masq/NAT from ip6tables Kconfig help
  [IPV6]: fix lockup via /proc/net/ip6_flowlabel
  [NET]: fix uaccess handling
  [SCTP]: Always linearise packet on input
  [ETH1394]: Fix unaligned accesses.
  [DCCP]: fix printk format warnings
  [NET]: Fix segmentation of linear packets
  [XFRM] xfrm_user: Fix unaligned accesses.
  [APPLETALK]: Fix potential OOPS in atalk_sendmsg().
  [NET] sealevel: uses arp_broken_ops
This commit is contained in:
Linus Torvalds 2006-10-31 08:08:31 -08:00
commit 612b322ade
25 changed files with 229 additions and 116 deletions

View File

@ -64,6 +64,7 @@
#include <linux/ethtool.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/unaligned.h>
#include <net/arp.h>
#include "config_roms.h"
@ -491,7 +492,7 @@ static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
int i;
struct eth1394_priv *priv = netdev_priv(dev);
struct hpsb_host *host = priv->host;
u64 guid = *((u64*)&(host->csr.rom->bus_info_data[3]));
u64 guid = get_unaligned((u64*)&(host->csr.rom->bus_info_data[3]));
u16 maxpayload = 1 << (host->csr.max_rec + 1);
int max_speed = IEEE1394_SPEED_MAX;
@ -514,8 +515,8 @@ static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
ETHER1394_GASP_OVERHEAD)));
/* Set our hardware address while we're at it */
*(u64*)dev->dev_addr = guid;
*(u64*)dev->broadcast = ~0x0ULL;
memcpy(dev->dev_addr, &guid, sizeof(u64));
memset(dev->broadcast, 0xff, sizeof(u64));
}
spin_unlock_irqrestore (&priv->lock, flags);
@ -894,6 +895,7 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
u16 maxpayload;
struct eth1394_node_ref *node;
struct eth1394_node_info *node_info;
__be64 guid;
/* Sanity check. MacOSX seems to be sending us 131 in this
* field (atleast on my Panther G5). Not sure why. */
@ -902,8 +904,9 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1)));
guid = get_unaligned(&arp1394->s_uniq_id);
node = eth1394_find_node_guid(&priv->ip_node_list,
be64_to_cpu(arp1394->s_uniq_id));
be64_to_cpu(guid));
if (!node) {
return 0;
}
@ -931,10 +934,9 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
arp_ptr += arp->ar_pln; /* skip over sender IP addr */
if (arp->ar_op == htons(ARPOP_REQUEST))
/* just set ARP req target unique ID to 0 */
*((u64*)arp_ptr) = 0;
memset(arp_ptr, 0, sizeof(u64));
else
*((u64*)arp_ptr) = *((u64*)dev->dev_addr);
memcpy(arp_ptr, dev->dev_addr, sizeof(u64));
}
/* Now add the ethernet header. */
@ -1675,8 +1677,10 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
priv->bc_dgl++;
} else {
__be64 guid = get_unaligned((u64 *)eth->h_dest);
node = eth1394_find_node_guid(&priv->ip_node_list,
be64_to_cpu(*(u64*)eth->h_dest));
be64_to_cpu(guid));
if (!node) {
ret = -EAGAIN;
goto fail;

View File

@ -127,7 +127,7 @@ config LANMEDIA
# There is no way to detect a Sealevel board. Force it modular
config SEALEVEL_4021
tristate "Sealevel Systems 4021 support"
depends on WAN && ISA && m && ISA_DMA_API
depends on WAN && ISA && m && ISA_DMA_API && INET
help
This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.

View File

@ -1584,7 +1584,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) {
rt = atrtr_find(&usat->sat_addr);
dev = rt->dev;
} else {
struct atalk_addr at_hint;
@ -1592,7 +1591,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
at_hint.s_net = at->src_net;
rt = atrtr_find(&at_hint);
dev = rt->dev;
}
if (!rt)
return -ENETUNREACH;

View File

@ -1946,7 +1946,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
do {
struct sk_buff *nskb;
skb_frag_t *frag;
int hsize, nsize;
int hsize;
int k;
int size;
@ -1957,11 +1957,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
hsize = skb_headlen(skb) - offset;
if (hsize < 0)
hsize = 0;
nsize = hsize + doffset;
if (nsize > len + doffset || !sg)
nsize = len + doffset;
if (hsize > len || !sg)
hsize = len;
nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
if (unlikely(!nskb))
goto err;

View File

@ -352,14 +352,14 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, int len)
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
ccid2_pr_debug("pipe=%d\n", hctx->ccid2hctx_pipe);
ccid2_pr_debug("Sent: seq=%llu\n", seq);
ccid2_pr_debug("Sent: seq=%llu\n", (unsigned long long)seq);
do {
struct ccid2_seq *seqp = hctx->ccid2hctx_seqt;
while (seqp != hctx->ccid2hctx_seqh) {
ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
seqp->ccid2s_seq, seqp->ccid2s_acked,
seqp->ccid2s_sent);
(unsigned long long)seqp->ccid2s_seq,
seqp->ccid2s_acked, seqp->ccid2s_sent);
seqp = seqp->ccid2s_next;
}
} while (0);
@ -480,7 +480,8 @@ static inline void ccid2_new_ack(struct sock *sk,
/* first measurement */
if (hctx->ccid2hctx_srtt == -1) {
ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
r, jiffies, seqp->ccid2s_seq);
r, jiffies,
(unsigned long long)seqp->ccid2s_seq);
ccid2_change_srtt(hctx, r);
hctx->ccid2hctx_rttvar = r >> 1;
} else {
@ -636,8 +637,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
u64 ackno_end_rl;
dccp_set_seqno(&ackno_end_rl, ackno - rl);
ccid2_pr_debug("ackvec start:%llu end:%llu\n", ackno,
ackno_end_rl);
ccid2_pr_debug("ackvec start:%llu end:%llu\n",
(unsigned long long)ackno,
(unsigned long long)ackno_end_rl);
/* if the seqno we are analyzing is larger than the
* current ackno, then move towards the tail of our
* seqnos.
@ -672,7 +674,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
seqp->ccid2s_acked = 1;
ccid2_pr_debug("Got ack for %llu\n",
seqp->ccid2s_seq);
(unsigned long long)seqp->ccid2s_seq);
ccid2_hc_tx_dec_pipe(sk);
}
if (seqp == hctx->ccid2hctx_seqt) {
@ -718,7 +720,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
while (1) {
if (!seqp->ccid2s_acked) {
ccid2_pr_debug("Packet lost: %llu\n",
seqp->ccid2s_seq);
(unsigned long long)seqp->ccid2s_seq);
/* XXX need to traverse from tail -> head in
* order to detect multiple congestion events in
* one ack vector.

View File

@ -1307,7 +1307,8 @@ int cipso_v4_socket_setattr(const struct socket *sock,
/* We can't use ip_options_get() directly because it makes a call to
* ip_options_get_alloc() which allocates memory with GFP_KERNEL and
* we can't block here. */
* we won't always have CAP_NET_RAW even though we _always_ want to
* set the IPOPT_CIPSO option. */
opt_len = (buf_len + 3) & ~3;
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
if (opt == NULL) {
@ -1317,11 +1318,9 @@ int cipso_v4_socket_setattr(const struct socket *sock,
memcpy(opt->__data, buf, buf_len);
opt->optlen = opt_len;
opt->is_data = 1;
opt->cipso = sizeof(struct iphdr);
kfree(buf);
buf = NULL;
ret_val = ip_options_compile(opt, NULL);
if (ret_val != 0)
goto socket_setattr_failure;
sk_inet = inet_sk(sk);
if (sk_inet->is_icsk) {

View File

@ -443,7 +443,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
opt->router_alert = optptr - iph;
break;
case IPOPT_CIPSO:
if (opt->cipso) {
if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) {
pp_ptr = optptr;
goto error;
}

View File

@ -466,7 +466,13 @@ static inline int check_entry(struct arpt_entry *e, const char *name, unsigned i
return -EINVAL;
}
if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
return -EINVAL;
t = arpt_get_target(e);
if (e->target_offset + t->u.target_size > e->next_offset)
return -EINVAL;
target = try_then_request_module(xt_find_target(NF_ARP, t->u.user.name,
t->u.user.revision),
"arpt_%s", t->u.user.name);
@ -621,20 +627,18 @@ static int translate_table(const char *name,
}
}
if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
duprintf("Looping hook\n");
return -ELOOP;
}
/* Finally, each sanity check must pass */
i = 0;
ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
check_entry, name, size, &i);
if (ret != 0) {
ARPT_ENTRY_ITERATE(entry0, newinfo->size,
cleanup_entry, &i);
return ret;
if (ret != 0)
goto cleanup;
ret = -ELOOP;
if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
duprintf("Looping hook\n");
goto cleanup;
}
/* And one copy for every other CPU */
@ -643,6 +647,9 @@ static int translate_table(const char *name,
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return 0;
cleanup:
ARPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
return ret;
}

View File

@ -547,12 +547,18 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
return -EINVAL;
}
if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
return -EINVAL;
j = 0;
ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
if (ret != 0)
goto cleanup_matches;
t = ipt_get_target(e);
ret = -EINVAL;
if (e->target_offset + t->u.target_size > e->next_offset)
goto cleanup_matches;
target = try_then_request_module(xt_find_target(AF_INET,
t->u.user.name,
t->u.user.revision),
@ -712,19 +718,17 @@ translate_table(const char *name,
}
}
if (!mark_source_chains(newinfo, valid_hooks, entry0))
return -ELOOP;
/* Finally, each sanity check must pass */
i = 0;
ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
check_entry, name, size, &i);
if (ret != 0) {
IPT_ENTRY_ITERATE(entry0, newinfo->size,
cleanup_entry, &i);
return ret;
}
if (ret != 0)
goto cleanup;
ret = -ELOOP;
if (!mark_source_chains(newinfo, valid_hooks, entry0))
goto cleanup;
/* And one copy for every other CPU */
for_each_possible_cpu(i) {
@ -732,6 +736,9 @@ translate_table(const char *name,
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return 0;
cleanup:
IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
return ret;
}
@ -1463,6 +1470,10 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e,
return -EINVAL;
}
if (e->target_offset + sizeof(struct compat_xt_entry_target) >
e->next_offset)
return -EINVAL;
off = 0;
entry_offset = (void *)e - (void *)base;
j = 0;
@ -1472,6 +1483,9 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e,
goto cleanup_matches;
t = ipt_get_target(e);
ret = -EINVAL;
if (e->target_offset + t->u.target_size > e->next_offset)
goto cleanup_matches;
target = try_then_request_module(xt_find_target(AF_INET,
t->u.user.name,
t->u.user.revision),
@ -1513,7 +1527,7 @@ cleanup_matches:
static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
void **dstptr, compat_uint_t *size, const char *name,
const struct ipt_ip *ip, unsigned int hookmask, int *i)
const struct ipt_ip *ip, unsigned int hookmask)
{
struct ipt_entry_match *dm;
struct ipt_match *match;
@ -1526,22 +1540,13 @@ static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
name, hookmask, ip->proto,
ip->invflags & IPT_INV_PROTO);
if (ret)
goto err;
if (m->u.kernel.match->checkentry
if (!ret && m->u.kernel.match->checkentry
&& !m->u.kernel.match->checkentry(name, ip, match, dm->data,
hookmask)) {
duprintf("ip_tables: check failed for `%s'.\n",
m->u.kernel.match->name);
ret = -EINVAL;
goto err;
}
(*i)++;
return 0;
err:
module_put(m->u.kernel.match->me);
return ret;
}
@ -1553,19 +1558,18 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
struct ipt_target *target;
struct ipt_entry *de;
unsigned int origsize;
int ret, h, j;
int ret, h;
ret = 0;
origsize = *size;
de = (struct ipt_entry *)*dstptr;
memcpy(de, e, sizeof(struct ipt_entry));
j = 0;
*dstptr += sizeof(struct compat_ipt_entry);
ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
name, &de->ip, de->comefrom, &j);
name, &de->ip, de->comefrom);
if (ret)
goto cleanup_matches;
goto err;
de->target_offset = e->target_offset - (origsize - *size);
t = ipt_get_target(e);
target = t->u.kernel.target;
@ -1599,12 +1603,7 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
goto err;
}
ret = 0;
return ret;
err:
module_put(t->u.kernel.target->me);
cleanup_matches:
IPT_MATCH_ITERATE(e, cleanup_match, &j);
return ret;
}
@ -1618,7 +1617,7 @@ translate_compat_table(const char *name,
unsigned int *hook_entries,
unsigned int *underflows)
{
unsigned int i;
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
unsigned int size;
@ -1636,21 +1635,21 @@ translate_compat_table(const char *name,
}
duprintf("translate_compat_table: size %u\n", info->size);
i = 0;
j = 0;
xt_compat_lock(AF_INET);
/* Walk through entries, checking offsets. */
ret = IPT_ENTRY_ITERATE(entry0, total_size,
check_compat_entry_size_and_hooks,
info, &size, entry0,
entry0 + total_size,
hook_entries, underflows, &i, name);
hook_entries, underflows, &j, name);
if (ret != 0)
goto out_unlock;
ret = -EINVAL;
if (i != number) {
if (j != number) {
duprintf("translate_compat_table: %u not %u entries\n",
i, number);
j, number);
goto out_unlock;
}
@ -1709,8 +1708,10 @@ translate_compat_table(const char *name,
free_newinfo:
xt_free_table_info(newinfo);
out:
IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
return ret;
out_unlock:
compat_flush_offsets();
xt_compat_unlock(AF_INET);
goto out;
}

View File

@ -329,7 +329,7 @@ error:
return err;
}
static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
{
struct iovec *iov;
u8 __user *type = NULL;
@ -338,7 +338,7 @@ static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
unsigned int i;
if (!msg->msg_iov)
return;
return 0;
for (i = 0; i < msg->msg_iovlen; i++) {
iov = &msg->msg_iov[i];
@ -360,8 +360,9 @@ static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
code = iov->iov_base;
if (type && code) {
get_user(fl->fl_icmp_type, type);
get_user(fl->fl_icmp_code, code);
if (get_user(fl->fl_icmp_type, type) ||
get_user(fl->fl_icmp_code, code))
return -EFAULT;
probed = 1;
}
break;
@ -372,6 +373,7 @@ static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
if (probed)
break;
}
return 0;
}
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
@ -480,8 +482,11 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
.proto = inet->hdrincl ? IPPROTO_RAW :
sk->sk_protocol,
};
if (!inet->hdrincl)
raw_probe_proto_opt(&fl, msg);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl, msg);
if (err)
goto done;
}
security_sk_classify_flow(sk, &fl);
err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));

View File

@ -330,8 +330,10 @@ fl_create(struct in6_flowlabel_req *freq, char __user *optval, int optlen, int *
fl->share = freq->flr_share;
addr_type = ipv6_addr_type(&freq->flr_dst);
if ((addr_type&IPV6_ADDR_MAPPED)
|| addr_type == IPV6_ADDR_ANY)
|| addr_type == IPV6_ADDR_ANY) {
err = -EINVAL;
goto done;
}
ipv6_addr_copy(&fl->dst, &freq->flr_dst);
atomic_set(&fl->users, 1);
switch (fl->share) {
@ -587,6 +589,8 @@ static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flo
while (!fl) {
if (++state->bucket <= FL_HASH_MASK)
fl = fl_ht[state->bucket];
else
break;
}
return fl;
}
@ -623,9 +627,13 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v)
read_unlock_bh(&ip6_fl_lock);
}
static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
while(fl) {
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
"Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
else {
struct ip6_flowlabel *fl = v;
seq_printf(seq,
"%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_SEQFMT " %-4d\n",
(unsigned)ntohl(fl->label),
@ -636,17 +644,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
(long)(fl->expires - jiffies)/HZ,
NIP6(fl->dst),
fl->opt ? fl->opt->opt_nflen : 0);
fl = fl->next;
}
}
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
"Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
else
ip6fl_fl_seq_show(seq, v);
return 0;
}

View File

@ -40,7 +40,7 @@ config IP6_NF_QUEUE
To compile it as a module, choose M here. If unsure, say N.
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering/masq/NAT)"
tristate "IP6 tables support (required for filtering)"
depends on NETFILTER_XTABLES
help
ip6tables is a general, extensible packet identification framework.

View File

@ -586,12 +586,19 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
return -EINVAL;
}
if (e->target_offset + sizeof(struct ip6t_entry_target) >
e->next_offset)
return -EINVAL;
j = 0;
ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, e->comefrom, &j);
if (ret != 0)
goto cleanup_matches;
t = ip6t_get_target(e);
ret = -EINVAL;
if (e->target_offset + t->u.target_size > e->next_offset)
goto cleanup_matches;
target = try_then_request_module(xt_find_target(AF_INET6,
t->u.user.name,
t->u.user.revision),
@ -751,19 +758,17 @@ translate_table(const char *name,
}
}
if (!mark_source_chains(newinfo, valid_hooks, entry0))
return -ELOOP;
/* Finally, each sanity check must pass */
i = 0;
ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
check_entry, name, size, &i);
if (ret != 0) {
IP6T_ENTRY_ITERATE(entry0, newinfo->size,
cleanup_entry, &i);
return ret;
}
if (ret != 0)
goto cleanup;
ret = -ELOOP;
if (!mark_source_chains(newinfo, valid_hooks, entry0))
goto cleanup;
/* And one copy for every other CPU */
for_each_possible_cpu(i) {
@ -771,6 +776,9 @@ translate_table(const char *name,
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return 0;
cleanup:
IP6T_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
return ret;
}

View File

@ -604,7 +604,7 @@ error:
return err;
}
static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
{
struct iovec *iov;
u8 __user *type = NULL;
@ -616,7 +616,7 @@ static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
int i;
if (!msg->msg_iov)
return;
return 0;
for (i = 0; i < msg->msg_iovlen; i++) {
iov = &msg->msg_iov[i];
@ -638,8 +638,9 @@ static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
code = iov->iov_base;
if (type && code) {
get_user(fl->fl_icmp_type, type);
get_user(fl->fl_icmp_code, code);
if (get_user(fl->fl_icmp_type, type) ||
get_user(fl->fl_icmp_code, code))
return -EFAULT;
probed = 1;
}
break;
@ -650,7 +651,8 @@ static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
/* check if type field is readable or not. */
if (iov->iov_len > 2 - len) {
u8 __user *p = iov->iov_base;
get_user(fl->fl_mh_type, &p[2 - len]);
if (get_user(fl->fl_mh_type, &p[2 - len]))
return -EFAULT;
probed = 1;
} else
len += iov->iov_len;
@ -664,6 +666,7 @@ static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
if (probed)
break;
}
return 0;
}
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
@ -787,7 +790,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
opt = ipv6_fixup_options(&opt_space, opt);
fl.proto = proto;
rawv6_probe_proto_opt(&fl, msg);
err = rawv6_probe_proto_opt(&fl, msg);
if (err)
goto out;
ipv6_addr_copy(&fl.fl6_dst, daddr);
if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))

View File

@ -1520,9 +1520,10 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
if (iter(ct, data))
goto found;
}
write_unlock_bh(&nf_conntrack_lock);
return NULL;
found:
atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
atomic_inc(&ct->ct_general.use);
write_unlock_bh(&nf_conntrack_lock);
return ct;
}

View File

@ -1075,8 +1075,9 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
len = sizeof(int);
val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
put_user(len, optlen);
put_user(val, optval);
if (put_user(len, optlen) ||
put_user(val, optval))
return -EFAULT;
err = 0;
break;
default:

View File

@ -346,11 +346,18 @@ void sctp_association_free(struct sctp_association *asoc)
struct list_head *pos, *temp;
int i;
list_del(&asoc->asocs);
/* Only real associations count against the endpoint, so
* don't bother for if this is a temporary association.
*/
if (!asoc->temp) {
list_del(&asoc->asocs);
/* Decrement the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk->sk_ack_backlog--;
/* Decrement the backlog value for a TCP-style listening
* socket.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk->sk_ack_backlog--;
}
/* Mark as dead, so other users can know this structure is
* going away.

View File

@ -144,6 +144,13 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
{
struct sock *sk = ep->base.sk;
/* If this is a temporary association, don't bother
* since we'll be removing it shortly and don't
* want anyone to find it anyway.
*/
if (asoc->temp)
return;
/* Now just add it to our list of asocs */
list_add_tail(&asoc->asocs, &ep->asocs);

View File

@ -135,6 +135,9 @@ int sctp_rcv(struct sk_buff *skb)
SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
if (skb_linearize(skb))
goto discard_it;
sh = (struct sctphdr *) skb->h.raw;
/* Pull up the IP and SCTP headers. */
@ -768,6 +771,9 @@ static void __sctp_hash_established(struct sctp_association *asoc)
/* Add an association to the hash. Local BH-safe. */
void sctp_hash_established(struct sctp_association *asoc)
{
if (asoc->temp)
return;
sctp_local_bh_disable();
__sctp_hash_established(asoc);
sctp_local_bh_enable();
@ -801,6 +807,9 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
/* Remove association from the hash table. Local BH-safe. */
void sctp_unhash_established(struct sctp_association *asoc)
{
if (asoc->temp)
return;
sctp_local_bh_disable();
__sctp_unhash_established(asoc);
sctp_local_bh_enable();

View File

@ -591,7 +591,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
newinet->dport = htons(asoc->peer.port);
newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
newinet->pmtudisc = inet->pmtudisc;
newinet->id = 0;
newinet->id = asoc->next_tsn ^ jiffies;
newinet->uc_ttl = -1;
newinet->mc_loop = 1;

View File

@ -3372,6 +3372,7 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
{
struct sock *sk = asoc->base.sk;
struct socket *sock;
struct inet_sock *inetsk;
int err = 0;
/* An association cannot be branched off from an already peeled-off
@ -3389,6 +3390,14 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
* asoc to the newsk.
*/
sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
/* Make peeled-off sockets more like 1-1 accepted sockets.
* Set the daddr and initialize id to something more random
*/
inetsk = inet_sk(sock->sk);
inetsk->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
inetsk->id = asoc->next_tsn ^ jiffies;
*sockp = sock;
return err;

View File

@ -323,7 +323,7 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
x->props.replay_window = p->replay_window;
x->props.reqid = p->reqid;
x->props.family = p->family;
x->props.saddr = p->saddr;
memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
x->props.flags = p->flags;
}
@ -545,7 +545,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->lft, &x->lft, sizeof(p->lft));
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
memcpy(&p->stats, &x->stats, sizeof(p->stats));
p->saddr = x->props.saddr;
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p->mode = x->props.mode;
p->replay_window = x->props.replay_window;
p->reqid = x->props.reqid;

View File

@ -3313,7 +3313,13 @@ static int selinux_socket_getpeername(struct socket *sock)
static int selinux_socket_setsockopt(struct socket *sock,int level,int optname)
{
return socket_has_perm(current, sock, SOCKET__SETOPT);
int err;
err = socket_has_perm(current, sock, SOCKET__SETOPT);
if (err)
return err;
return selinux_netlbl_socket_setsockopt(sock, level, optname);
}
static int selinux_socket_getsockopt(struct socket *sock, int level,

View File

@ -53,6 +53,9 @@ void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
void selinux_netlbl_sk_clone_security(struct sk_security_struct *ssec,
struct sk_security_struct *newssec);
int selinux_netlbl_inode_permission(struct inode *inode, int mask);
int selinux_netlbl_socket_setsockopt(struct socket *sock,
int level,
int optname);
#else
static inline void selinux_netlbl_cache_invalidate(void)
{
@ -114,6 +117,13 @@ static inline int selinux_netlbl_inode_permission(struct inode *inode,
{
return 0;
}
static inline int selinux_netlbl_socket_setsockopt(struct socket *sock,
int level,
int optname)
{
return 0;
}
#endif /* CONFIG_NETLABEL */
#endif

View File

@ -2682,4 +2682,41 @@ u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb)
return peer_sid;
}
/**
* selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel
* @sock: the socket
* @level: the socket level or protocol
* @optname: the socket option name
*
* Description:
* Check the setsockopt() call and if the user is trying to replace the IP
* options on a socket and a NetLabel is in place for the socket deny the
* access; otherwise allow the access. Returns zero when the access is
* allowed, -EACCES when denied, and other negative values on error.
*
*/
int selinux_netlbl_socket_setsockopt(struct socket *sock,
int level,
int optname)
{
int rc = 0;
struct inode *inode = SOCK_INODE(sock);
struct sk_security_struct *sksec = sock->sk->sk_security;
struct inode_security_struct *isec = inode->i_security;
struct netlbl_lsm_secattr secattr;
mutex_lock(&isec->lock);
if (level == IPPROTO_IP && optname == IP_OPTIONS &&
sksec->nlbl_state == NLBL_LABELED) {
netlbl_secattr_init(&secattr);
rc = netlbl_socket_getattr(sock, &secattr);
if (rc == 0 && (secattr.cache || secattr.mls_lvl_vld))
rc = -EACCES;
netlbl_secattr_destroy(&secattr);
}
mutex_unlock(&isec->lock);
return rc;
}
#endif /* CONFIG_NETLABEL */