Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [PKTGEN]: Remove write-only variable. [NETFILTER]: xt_tcpudp: fix wrong struct in udp_checkentry [NET_SCHED] sch_prio.c: remove duplicate call of tc_classify() [BRIDGE]: Fix OOPS when bridging device without ethtool. [BRIDGE]: Packets leaking out of disabled/blocked ports. [TCP]: Allow minimum RTO to be configurable via routing metrics. SCTP: Fix to handle invalid parameter length correctly SCTP: Abort on COOKIE-ECHO if backlog is exceeded. SCTP: Correctly disable listening when backlog is 0. SCTP: Do not retransmit chunks that are newer then rtt. SCTP: Uncomfirmed transports can't become Inactive SCTP: Pick the correct port when binding to 0. SCTP: Use net_ratelimit to suppress error messages print too fast SCTP: Fix to encode PROTOCOL VIOLATION error cause correctly SCTP: Fix sctp_addto_chunk() to add pad with correct length SCTP: Assign stream sequence numbers to the entire message SCTP: properly clean up fragment and ordering queues during FWD-TSN. [PKTGEN]: Fix multiqueue oops. [BNX2]: Add write posting comment. [BNX2]: Use msleep().
This commit is contained in:
commit
0ee1307990
|
@ -3934,11 +3934,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
|
|||
/* Chip reset. */
|
||||
REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
|
||||
|
||||
/* Reading back any register after chip reset will hang the
|
||||
* bus on 5706 A0 and A1. The msleep below provides plenty
|
||||
* of margin for write posting.
|
||||
*/
|
||||
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
|
||||
(CHIP_ID(bp) == CHIP_ID_5706_A1)) {
|
||||
current->state = TASK_UNINTERRUPTIBLE;
|
||||
schedule_timeout(HZ / 50);
|
||||
}
|
||||
(CHIP_ID(bp) == CHIP_ID_5706_A1))
|
||||
msleep(20);
|
||||
|
||||
/* Reset takes approximate 30 usec */
|
||||
for (i = 0; i < 10; i++) {
|
||||
|
|
|
@ -351,6 +351,8 @@ enum
|
|||
#define RTAX_INITCWND RTAX_INITCWND
|
||||
RTAX_FEATURES,
|
||||
#define RTAX_FEATURES RTAX_FEATURES
|
||||
RTAX_RTO_MIN,
|
||||
#define RTAX_RTO_MIN RTAX_RTO_MIN
|
||||
__RTAX_MAX
|
||||
};
|
||||
|
||||
|
|
|
@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
|
|||
const struct sctp_chunk *);
|
||||
struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
|
||||
const struct sctp_chunk *);
|
||||
void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t);
|
||||
void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t);
|
||||
struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
|
||||
const struct sctp_chunk *,
|
||||
const size_t hint);
|
||||
|
|
|
@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
|
|||
struct iovec *data);
|
||||
void sctp_chunk_free(struct sctp_chunk *);
|
||||
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
|
||||
void *sctp_addto_param(struct sctp_chunk *, int len, const void *data);
|
||||
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
|
||||
const struct sctp_association *,
|
||||
struct sock *);
|
||||
|
|
|
@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
|
|||
/* Skip over an SSN. */
|
||||
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
|
||||
|
||||
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
|
||||
#endif /* __sctp_ulpqueue_h__ */
|
||||
|
||||
|
||||
|
|
|
@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
|||
if (hold_time(br) == 0)
|
||||
return;
|
||||
|
||||
/* ignore packets unless we are using this port */
|
||||
if (!(source->state == BR_STATE_LEARNING ||
|
||||
source->state == BR_STATE_FORWARDING))
|
||||
return;
|
||||
|
||||
fdb = fdb_find(head, addr);
|
||||
if (likely(fdb)) {
|
||||
/* attempt to update an entry for a local interface */
|
||||
|
|
|
@ -33,17 +33,17 @@
|
|||
*/
|
||||
static int port_cost(struct net_device *dev)
|
||||
{
|
||||
if (dev->ethtool_ops->get_settings) {
|
||||
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
|
||||
int err = dev->ethtool_ops->get_settings(dev, &ecmd);
|
||||
if (!err) {
|
||||
if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
|
||||
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
|
||||
|
||||
if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
|
||||
switch(ecmd.speed) {
|
||||
case SPEED_100:
|
||||
return 19;
|
||||
case SPEED_1000:
|
||||
return 4;
|
||||
case SPEED_10000:
|
||||
return 2;
|
||||
case SPEED_1000:
|
||||
return 4;
|
||||
case SPEED_100:
|
||||
return 19;
|
||||
case SPEED_10:
|
||||
return 100;
|
||||
}
|
||||
|
|
|
@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
|
|||
{
|
||||
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
|
||||
|
||||
if (p && p->state != BR_STATE_DISABLED)
|
||||
if (p)
|
||||
br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
|
||||
|
||||
return 0; /* process further */
|
||||
}
|
||||
|
||||
|
|
|
@ -380,7 +380,6 @@ struct pktgen_thread {
|
|||
/* Field for thread to receive "posted" events terminate, stop ifs etc. */
|
||||
|
||||
u32 control;
|
||||
int pid;
|
||||
int cpu;
|
||||
|
||||
wait_queue_head_t queue;
|
||||
|
@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|||
}
|
||||
|
||||
if ((netif_queue_stopped(odev) ||
|
||||
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
|
||||
need_resched()) {
|
||||
(pkt_dev->skb &&
|
||||
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
|
||||
need_resched()) {
|
||||
idle_start = getCurUs();
|
||||
|
||||
if (!netif_running(odev)) {
|
||||
|
@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg)
|
|||
|
||||
init_waitqueue_head(&t->queue);
|
||||
|
||||
t->pid = current->pid;
|
||||
|
||||
pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid);
|
||||
|
||||
max_before_softirq = t->max_before_softirq;
|
||||
|
|
|
@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
|
|||
tcp_grow_window(sk, skb);
|
||||
}
|
||||
|
||||
static u32 tcp_rto_min(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
u32 rto_min = TCP_RTO_MIN;
|
||||
|
||||
if (dst_metric_locked(dst, RTAX_RTO_MIN))
|
||||
rto_min = dst->metrics[RTAX_RTO_MIN-1];
|
||||
return rto_min;
|
||||
}
|
||||
|
||||
/* Called to compute a smoothed rtt estimate. The data fed to this
|
||||
* routine either comes from timestamps, or from segments that were
|
||||
* known _not_ to have been retransmitted [see Karn/Partridge
|
||||
|
@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
|
|||
if (tp->mdev_max < tp->rttvar)
|
||||
tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
|
||||
tp->rtt_seq = tp->snd_nxt;
|
||||
tp->mdev_max = TCP_RTO_MIN;
|
||||
tp->mdev_max = tcp_rto_min(sk);
|
||||
}
|
||||
} else {
|
||||
/* no previous measure. */
|
||||
tp->srtt = m<<3; /* take the measured time to be rtt */
|
||||
tp->mdev = m<<1; /* make sure rto = 3*rtt */
|
||||
tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
|
||||
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
|
||||
tp->rtt_seq = tp->snd_nxt;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ udp_checkentry(const char *tablename,
|
|||
void *matchinfo,
|
||||
unsigned int hook_mask)
|
||||
{
|
||||
const struct xt_tcp *udpinfo = matchinfo;
|
||||
const struct xt_udp *udpinfo = matchinfo;
|
||||
|
||||
/* Must specify no unknown invflags */
|
||||
return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
|
||||
|
|
|
@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||
if (TC_H_MAJ(skb->priority) != sch->handle) {
|
||||
err = tc_classify(skb, q->filter_list, &res);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
switch (tc_classify(skb, q->filter_list, &res)) {
|
||||
switch (err) {
|
||||
case TC_ACT_STOLEN:
|
||||
case TC_ACT_QUEUED:
|
||||
*qerr = NET_XMIT_SUCCESS;
|
||||
|
|
|
@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
|
|||
break;
|
||||
|
||||
case SCTP_TRANSPORT_DOWN:
|
||||
transport->state = SCTP_INACTIVE;
|
||||
/* if the transort was never confirmed, do not transition it
|
||||
* to inactive state.
|
||||
*/
|
||||
if (transport->state != SCTP_UNCONFIRMED)
|
||||
transport->state = SCTP_INACTIVE;
|
||||
|
||||
spc_state = SCTP_ADDR_UNREACHABLE;
|
||||
break;
|
||||
|
||||
|
|
|
@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
|
|||
*/
|
||||
if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
|
||||
(!fast_retransmit && !chunk->tsn_gap_acked)) {
|
||||
/* If this chunk was sent less then 1 rto ago, do not
|
||||
* retransmit this chunk, but give the peer time
|
||||
* to acknowlege it.
|
||||
*/
|
||||
if ((jiffies - chunk->sent_at) < transport->rto)
|
||||
continue;
|
||||
|
||||
/* RFC 2960 6.2.1 Processing a Received SACK
|
||||
*
|
||||
* C) Any time a DATA chunk is marked for
|
||||
|
|
|
@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
|
|||
* abort chunk.
|
||||
*/
|
||||
void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
|
||||
const void *payload, size_t paylen)
|
||||
size_t paylen)
|
||||
{
|
||||
sctp_errhdr_t err;
|
||||
__u16 len;
|
||||
|
@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
|
|||
len = sizeof(sctp_errhdr_t) + paylen;
|
||||
err.length = htons(len);
|
||||
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
|
||||
sctp_addto_chunk(chunk, paylen, payload);
|
||||
}
|
||||
|
||||
/* 3.3.2 Initiation (INIT) (1)
|
||||
|
@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
|
|||
|
||||
/* Put the tsn back into network byte order. */
|
||||
payload = htonl(tsn);
|
||||
sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload,
|
||||
sizeof(payload));
|
||||
sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
|
||||
sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
|
||||
|
||||
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
|
||||
*
|
||||
|
@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
|
|||
goto err_copy;
|
||||
}
|
||||
|
||||
sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
|
||||
sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
|
||||
sctp_addto_chunk(retval, paylen, payload);
|
||||
|
||||
if (paylen)
|
||||
kfree(payload);
|
||||
|
@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
|
|||
struct sctp_paramhdr phdr;
|
||||
|
||||
retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
|
||||
+ sizeof(sctp_chunkhdr_t));
|
||||
+ sizeof(sctp_paramhdr_t));
|
||||
if (!retval)
|
||||
goto end;
|
||||
|
||||
sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen);
|
||||
sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
|
||||
+ sizeof(sctp_paramhdr_t));
|
||||
|
||||
phdr.type = htons(chunk->chunk_hdr->type);
|
||||
phdr.length = chunk->chunk_hdr->length;
|
||||
sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr);
|
||||
sctp_addto_chunk(retval, paylen, payload);
|
||||
sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
|
||||
|
||||
end:
|
||||
return retval;
|
||||
|
@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
|
|||
if (!retval)
|
||||
goto nodata;
|
||||
|
||||
sctp_init_cause(retval, cause_code, payload, paylen);
|
||||
sctp_init_cause(retval, cause_code, paylen);
|
||||
sctp_addto_chunk(retval, paylen, payload);
|
||||
|
||||
nodata:
|
||||
return retval;
|
||||
|
@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
|
|||
void *target;
|
||||
void *padding;
|
||||
int chunklen = ntohs(chunk->chunk_hdr->length);
|
||||
int padlen = chunklen % 4;
|
||||
int padlen = WORD_ROUND(chunklen) - chunklen;
|
||||
|
||||
padding = skb_put(chunk->skb, padlen);
|
||||
target = skb_put(chunk->skb, len);
|
||||
|
@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
|
|||
return target;
|
||||
}
|
||||
|
||||
/* Append bytes to the end of a parameter. Will panic if chunk is not big
|
||||
* enough.
|
||||
*/
|
||||
void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
|
||||
{
|
||||
void *target;
|
||||
int chunklen = ntohs(chunk->chunk_hdr->length);
|
||||
|
||||
target = skb_put(chunk->skb, len);
|
||||
|
||||
memcpy(target, data, len);
|
||||
|
||||
/* Adjust the chunk length field. */
|
||||
chunk->chunk_hdr->length = htons(chunklen + len);
|
||||
chunk->chunk_end = skb_tail_pointer(chunk->skb);
|
||||
|
||||
return target;
|
||||
}
|
||||
|
||||
/* Append bytes from user space to the end of a chunk. Will panic if
|
||||
* chunk is not big enough.
|
||||
* Returns a kernel err value.
|
||||
|
@ -1174,25 +1196,36 @@ out:
|
|||
*/
|
||||
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
|
||||
{
|
||||
struct sctp_datamsg *msg;
|
||||
struct sctp_chunk *lchunk;
|
||||
struct sctp_stream *stream;
|
||||
__u16 ssn;
|
||||
__u16 sid;
|
||||
|
||||
if (chunk->has_ssn)
|
||||
return;
|
||||
|
||||
/* This is the last possible instant to assign a SSN. */
|
||||
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
|
||||
ssn = 0;
|
||||
} else {
|
||||
sid = ntohs(chunk->subh.data_hdr->stream);
|
||||
if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
|
||||
ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
|
||||
else
|
||||
ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
|
||||
}
|
||||
/* All fragments will be on the same stream */
|
||||
sid = ntohs(chunk->subh.data_hdr->stream);
|
||||
stream = &chunk->asoc->ssnmap->out;
|
||||
|
||||
chunk->subh.data_hdr->ssn = htons(ssn);
|
||||
chunk->has_ssn = 1;
|
||||
/* Now assign the sequence number to the entire message.
|
||||
* All fragments must have the same stream sequence number.
|
||||
*/
|
||||
msg = chunk->msg;
|
||||
list_for_each_entry(lchunk, &msg->chunks, frag_list) {
|
||||
if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
|
||||
ssn = 0;
|
||||
} else {
|
||||
if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
|
||||
ssn = sctp_ssn_next(stream, sid);
|
||||
else
|
||||
ssn = sctp_ssn_peek(stream, sid);
|
||||
}
|
||||
|
||||
lchunk->subh.data_hdr->ssn = htons(ssn);
|
||||
lchunk->has_ssn = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper function to assign a TSN if needed. This assumes that both
|
||||
|
@ -1466,7 +1499,8 @@ no_hmac:
|
|||
__be32 n = htonl(usecs);
|
||||
|
||||
sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
|
||||
&n, sizeof(n));
|
||||
sizeof(n));
|
||||
sctp_addto_chunk(*errp, sizeof(n), &n);
|
||||
*error = -SCTP_IERROR_STALE_COOKIE;
|
||||
} else
|
||||
*error = -SCTP_IERROR_NOMEM;
|
||||
|
@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
|
|||
report.num_missing = htonl(1);
|
||||
report.type = paramtype;
|
||||
sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
|
||||
&report, sizeof(report));
|
||||
sizeof(report));
|
||||
sctp_addto_chunk(*errp, sizeof(report), &report);
|
||||
}
|
||||
|
||||
/* Stop processing this chunk. */
|
||||
|
@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
|
|||
*errp = sctp_make_op_error_space(asoc, chunk, 0);
|
||||
|
||||
if (*errp)
|
||||
sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0);
|
||||
sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
|
||||
|
||||
/* Stop processing this chunk. */
|
||||
return 0;
|
||||
|
@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
|
|||
*errp = sctp_make_op_error_space(asoc, chunk, payload_len);
|
||||
|
||||
if (*errp) {
|
||||
sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error,
|
||||
sizeof(error));
|
||||
sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param);
|
||||
sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
|
||||
sizeof(error) + sizeof(sctp_paramhdr_t));
|
||||
sctp_addto_chunk(*errp, sizeof(error), error);
|
||||
sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
|
|||
if (!*errp)
|
||||
*errp = sctp_make_op_error_space(asoc, chunk, len);
|
||||
|
||||
if (*errp)
|
||||
sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED,
|
||||
param.v, len);
|
||||
if (*errp) {
|
||||
sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
|
||||
sctp_addto_chunk(*errp, len, param.v);
|
||||
}
|
||||
|
||||
/* Stop processing this chunk. */
|
||||
return 0;
|
||||
|
@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
|
|||
*errp = sctp_make_op_error_space(asoc, chunk,
|
||||
ntohs(chunk->chunk_hdr->length));
|
||||
|
||||
if (*errp)
|
||||
if (*errp) {
|
||||
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
|
||||
param.v,
|
||||
WORD_ROUND(ntohs(param.p->length)));
|
||||
sctp_addto_chunk(*errp,
|
||||
WORD_ROUND(ntohs(param.p->length)),
|
||||
param.v);
|
||||
}
|
||||
|
||||
break;
|
||||
case SCTP_PARAM_ACTION_SKIP:
|
||||
|
@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
|
|||
|
||||
if (*errp) {
|
||||
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
|
||||
param.v,
|
||||
WORD_ROUND(ntohs(param.p->length)));
|
||||
sctp_addto_chunk(*errp,
|
||||
WORD_ROUND(ntohs(param.p->length)),
|
||||
param.v);
|
||||
} else {
|
||||
/* If there is no memory for generating the ERROR
|
||||
* report as specified, an ABORT will be triggered
|
||||
|
@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
|
|||
* VIOLATION error. We build the ERROR chunk here and let the normal
|
||||
* error handling code build and send the packet.
|
||||
*/
|
||||
if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) {
|
||||
if (param.v != (void*)chunk->chunk_end) {
|
||||
sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
|
|||
break;
|
||||
|
||||
case SCTP_DISPOSITION_VIOLATION:
|
||||
printk(KERN_ERR "sctp protocol violation state %d "
|
||||
"chunkid %d\n", state, subtype.chunk);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR "sctp protocol violation state %d "
|
||||
"chunkid %d\n", state, subtype.chunk);
|
||||
break;
|
||||
|
||||
case SCTP_DISPOSITION_NOT_IMPL:
|
||||
|
@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
|
|||
/* Move the Cumulattive TSN Ack ahead. */
|
||||
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
|
||||
|
||||
/* purge the fragmentation queue */
|
||||
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
|
||||
|
||||
/* Abort any in progress partial delivery. */
|
||||
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
|
||||
break;
|
||||
|
|
|
@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
|
|||
struct sctp_chunk *err_chunk;
|
||||
struct sctp_packet *packet;
|
||||
sctp_unrecognized_param_t *unk_param;
|
||||
struct sock *sk;
|
||||
int len;
|
||||
|
||||
/* 6.10 Bundling
|
||||
|
@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
|
|||
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
|
||||
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
|
||||
|
||||
sk = ep->base.sk;
|
||||
/* If the endpoint is not listening or if the number of associations
|
||||
* on the TCP-style socket exceed the max backlog, respond with an
|
||||
* ABORT.
|
||||
*/
|
||||
if (!sctp_sstate(sk, LISTENING) ||
|
||||
(sctp_style(sk, TCP) &&
|
||||
sk_acceptq_is_full(sk)))
|
||||
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
|
||||
|
||||
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
|
||||
* Tag.
|
||||
*/
|
||||
|
@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
|
|||
struct sctp_ulpevent *ev, *ai_ev = NULL;
|
||||
int error = 0;
|
||||
struct sctp_chunk *err_chk_p;
|
||||
struct sock *sk;
|
||||
|
||||
/* If the packet is an OOTB packet which is temporarily on the
|
||||
* control endpoint, respond with an ABORT.
|
||||
|
@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
|
|||
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
|
||||
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
|
||||
|
||||
/* If the endpoint is not listening or if the number of associations
|
||||
* on the TCP-style socket exceed the max backlog, respond with an
|
||||
* ABORT.
|
||||
*/
|
||||
sk = ep->base.sk;
|
||||
if (!sctp_sstate(sk, LISTENING) ||
|
||||
(sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
|
||||
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
|
||||
|
||||
/* "Decode" the chunk. We have no optional parameters so we
|
||||
* are in good shape.
|
||||
*/
|
||||
|
@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
|
|||
/* This should never happen, but lets log it if so. */
|
||||
if (unlikely(!link)) {
|
||||
if (from_addr.sa.sa_family == AF_INET6) {
|
||||
printk(KERN_WARNING
|
||||
"%s association %p could not find address "
|
||||
NIP6_FMT "\n",
|
||||
__FUNCTION__,
|
||||
asoc,
|
||||
NIP6(from_addr.v6.sin6_addr));
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"%s association %p could not find address "
|
||||
NIP6_FMT "\n",
|
||||
__FUNCTION__,
|
||||
asoc,
|
||||
NIP6(from_addr.v6.sin6_addr));
|
||||
} else {
|
||||
printk(KERN_WARNING
|
||||
"%s association %p could not find address "
|
||||
NIPQUAD_FMT "\n",
|
||||
__FUNCTION__,
|
||||
asoc,
|
||||
NIPQUAD(from_addr.v4.sin_addr.s_addr));
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"%s association %p could not find address "
|
||||
NIPQUAD_FMT "\n",
|
||||
__FUNCTION__,
|
||||
asoc,
|
||||
NIPQUAD(from_addr.v4.sin_addr.s_addr));
|
||||
}
|
||||
return SCTP_DISPOSITION_DISCARD;
|
||||
}
|
||||
|
@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
|
|||
abort = sctp_make_abort(asoc, asconf_ack,
|
||||
sizeof(sctp_errhdr_t));
|
||||
if (abort) {
|
||||
sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0);
|
||||
sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
||||
SCTP_CHUNK(abort));
|
||||
}
|
||||
|
@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
|
|||
abort = sctp_make_abort(asoc, asconf_ack,
|
||||
sizeof(sctp_errhdr_t));
|
||||
if (abort) {
|
||||
sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0);
|
||||
sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
||||
SCTP_CHUNK(abort));
|
||||
}
|
||||
|
|
|
@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
|
|||
* The function sctp_get_port_local() does duplicate address
|
||||
* detection.
|
||||
*/
|
||||
addr->v4.sin_port = htons(snum);
|
||||
if ((ret = sctp_get_port_local(sk, addr))) {
|
||||
if (ret == (long) sk) {
|
||||
/* This endpoint has a conflicting address. */
|
||||
|
@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
|
|||
|
||||
sctp_unhash_endpoint(ep);
|
||||
sk->sk_state = SCTP_SS_CLOSED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Return if we are already listening. */
|
||||
|
@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
|
|||
|
||||
sctp_unhash_endpoint(ep);
|
||||
sk->sk_state = SCTP_SS_CLOSED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (sctp_sstate(sk, LISTENING))
|
||||
|
|
|
@ -659,6 +659,46 @@ done:
|
|||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush out stale fragments from the reassembly queue when processing
|
||||
* a Forward TSN.
|
||||
*
|
||||
* RFC 3758, Section 3.6
|
||||
*
|
||||
* After receiving and processing a FORWARD TSN, the data receiver MUST
|
||||
* take cautions in updating its re-assembly queue. The receiver MUST
|
||||
* remove any partially reassembled message, which is still missing one
|
||||
* or more TSNs earlier than or equal to the new cumulative TSN point.
|
||||
* In the event that the receiver has invoked the partial delivery API,
|
||||
* a notification SHOULD also be generated to inform the upper layer API
|
||||
* that the message being partially delivered will NOT be completed.
|
||||
*/
|
||||
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
|
||||
{
|
||||
struct sk_buff *pos, *tmp;
|
||||
struct sctp_ulpevent *event;
|
||||
__u32 tsn;
|
||||
|
||||
if (skb_queue_empty(&ulpq->reasm))
|
||||
return;
|
||||
|
||||
skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
|
||||
event = sctp_skb2event(pos);
|
||||
tsn = event->tsn;
|
||||
|
||||
/* Since the entire message must be abandoned by the
|
||||
* sender (item A3 in Section 3.5, RFC 3758), we can
|
||||
* free all fragments on the list that are less then
|
||||
* or equal to ctsn_point
|
||||
*/
|
||||
if (TSN_lte(tsn, fwd_tsn)) {
|
||||
__skb_unlink(pos, &ulpq->reasm);
|
||||
sctp_ulpevent_free(event);
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper function to gather skbs that have possibly become
|
||||
* ordered by an an incoming chunk.
|
||||
*/
|
||||
|
@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
|
|||
/* Helper function to gather skbs that have possibly become
|
||||
* ordered by forward tsn skipping their dependencies.
|
||||
*/
|
||||
static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
|
||||
static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
|
||||
{
|
||||
struct sk_buff *pos, *tmp;
|
||||
struct sctp_ulpevent *cevent;
|
||||
|
@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
|
|||
csid = cevent->stream;
|
||||
cssn = cevent->ssn;
|
||||
|
||||
if (cssn != sctp_ssn_peek(in, csid))
|
||||
/* Have we gone too far? */
|
||||
if (csid > sid)
|
||||
break;
|
||||
|
||||
/* Found it, so mark in the ssnmap. */
|
||||
sctp_ssn_next(in, csid);
|
||||
/* Have we not gone far enough? */
|
||||
if (csid < sid)
|
||||
continue;
|
||||
|
||||
/* see if this ssn has been marked by skipping */
|
||||
if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
|
||||
break;
|
||||
|
||||
__skb_unlink(pos, &ulpq->lobby);
|
||||
if (!event) {
|
||||
if (!event)
|
||||
/* Create a temporary list to collect chunks on. */
|
||||
event = sctp_skb2event(pos);
|
||||
__skb_queue_tail(&temp, sctp_event2skb(event));
|
||||
} else {
|
||||
/* Attach all gathered skbs to the event. */
|
||||
__skb_queue_tail(&temp, pos);
|
||||
}
|
||||
|
||||
/* Attach all gathered skbs to the event. */
|
||||
__skb_queue_tail(&temp, pos);
|
||||
}
|
||||
|
||||
/* Send event to the ULP. 'event' is the sctp_ulpevent for
|
||||
* very first SKB on the 'temp' list.
|
||||
*/
|
||||
if (event)
|
||||
if (event) {
|
||||
/* see if we have more ordered that we can deliver */
|
||||
sctp_ulpq_retrieve_ordered(ulpq, event);
|
||||
sctp_ulpq_tail_event(ulpq, event);
|
||||
}
|
||||
}
|
||||
|
||||
/* Skip over an SSN. */
|
||||
/* Skip over an SSN. This is used during the processing of
|
||||
* Forwared TSN chunk to skip over the abandoned ordered data
|
||||
*/
|
||||
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
|
||||
{
|
||||
struct sctp_stream *in;
|
||||
|
@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
|
|||
/* Go find any other chunks that were waiting for
|
||||
* ordering and deliver them if needed.
|
||||
*/
|
||||
sctp_ulpq_reap_ordered(ulpq);
|
||||
sctp_ulpq_reap_ordered(ulpq, sid);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue