sctp: add subscribe per asoc
The member subscribe should be per asoc, so that sockopt SCTP_EVENT in the next patch can subscribe a event from one asoc only. Signed-off-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
2cc0eeb676
commit
a1e3a0590f
|
@ -2077,6 +2077,8 @@ struct sctp_association {
|
|||
|
||||
int sent_cnt_removable;
|
||||
|
||||
__u16 subscribe;
|
||||
|
||||
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
|
||||
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
|
||||
};
|
||||
|
|
|
@ -135,6 +135,8 @@ static struct sctp_association *sctp_association_init(
|
|||
*/
|
||||
asoc->max_burst = sp->max_burst;
|
||||
|
||||
asoc->subscribe = sp->subscribe;
|
||||
|
||||
/* initialize association timers */
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
|
||||
|
|
|
@ -86,11 +86,10 @@ void sctp_datamsg_free(struct sctp_datamsg *msg)
|
|||
/* Final destructruction of datamsg memory. */
|
||||
static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
|
||||
{
|
||||
struct sctp_association *asoc = NULL;
|
||||
struct list_head *pos, *temp;
|
||||
struct sctp_chunk *chunk;
|
||||
struct sctp_sock *sp;
|
||||
struct sctp_ulpevent *ev;
|
||||
struct sctp_association *asoc = NULL;
|
||||
int error = 0, notify;
|
||||
|
||||
/* If we failed, we may need to notify. */
|
||||
|
@ -108,8 +107,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
|
|||
else
|
||||
error = asoc->outqueue.error;
|
||||
|
||||
sp = sctp_sk(asoc->base.sk);
|
||||
notify = sctp_ulpevent_type_enabled(sp->subscribe,
|
||||
notify = sctp_ulpevent_type_enabled(asoc->subscribe,
|
||||
SCTP_SEND_FAILED);
|
||||
}
|
||||
|
||||
|
|
|
@ -2307,6 +2307,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
|
|||
struct sctp_event_subscribe subscribe;
|
||||
__u8 *sn_type = (__u8 *)&subscribe;
|
||||
struct sctp_sock *sp = sctp_sk(sk);
|
||||
struct sctp_association *asoc;
|
||||
int i;
|
||||
|
||||
if (optlen > sizeof(struct sctp_event_subscribe))
|
||||
|
@ -2319,14 +2320,17 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
|
|||
sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i,
|
||||
sn_type[i]);
|
||||
|
||||
list_for_each_entry(asoc, &sp->ep->asocs, asocs)
|
||||
asoc->subscribe = sctp_sk(sk)->subscribe;
|
||||
|
||||
/* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
|
||||
* if there is no data to be sent or retransmit, the stack will
|
||||
* immediately send up this notification.
|
||||
*/
|
||||
if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_SENDER_DRY_EVENT)) {
|
||||
struct sctp_association *asoc = sctp_id2assoc(sk, 0);
|
||||
struct sctp_ulpevent *event;
|
||||
|
||||
asoc = sctp_id2assoc(sk, 0);
|
||||
if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
|
||||
event = sctp_ulpevent_make_sender_dry_event(asoc,
|
||||
GFP_USER | __GFP_NOWARN);
|
||||
|
|
|
@ -503,7 +503,7 @@ static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
|
|||
sk_incoming_cpu_update(sk);
|
||||
}
|
||||
|
||||
if (!sctp_ulpevent_is_enabled(event, sp->subscribe))
|
||||
if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
|
||||
goto out_free;
|
||||
|
||||
if (skb_list)
|
||||
|
@ -992,16 +992,17 @@ static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
|
|||
__u32 mid, __u16 flags, gfp_t gfp)
|
||||
{
|
||||
struct sock *sk = ulpq->asoc->base.sk;
|
||||
struct sctp_sock *sp = sctp_sk(sk);
|
||||
struct sctp_ulpevent *ev = NULL;
|
||||
|
||||
if (!sctp_ulpevent_type_enabled(sp->subscribe,
|
||||
if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
|
||||
SCTP_PARTIAL_DELIVERY_EVENT))
|
||||
return;
|
||||
|
||||
ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
|
||||
sid, mid, flags, gfp);
|
||||
if (ev) {
|
||||
struct sctp_sock *sp = sctp_sk(sk);
|
||||
|
||||
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
|
||||
|
||||
if (!sp->data_ready_signalled) {
|
||||
|
|
|
@ -219,7 +219,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|||
sk_incoming_cpu_update(sk);
|
||||
}
|
||||
/* Check if the user wishes to receive this event. */
|
||||
if (!sctp_ulpevent_is_enabled(event, sp->subscribe))
|
||||
if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
|
||||
goto out_free;
|
||||
|
||||
/* If we are in partial delivery mode, post to the lobby until
|
||||
|
@ -1137,7 +1137,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
|
|||
|
||||
sk = ulpq->asoc->base.sk;
|
||||
sp = sctp_sk(sk);
|
||||
if (sctp_ulpevent_type_enabled(sp->subscribe,
|
||||
if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
|
||||
SCTP_PARTIAL_DELIVERY_EVENT))
|
||||
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
|
||||
SCTP_PARTIAL_DELIVERY_ABORTED,
|
||||
|
|
Loading…
Reference in New Issue