dccp ccid-3: Update the RX history records in one place
This patch is a requirement for enabling ECN support later on. With that change in mind, the following preparations are done: * renamed handle_loss() into congestion_event() since it returns true when a congestion event happens (it will eventually also take care of ECN packets); * lets tfrc_rx_congestion_event() always update the RX history records, since this routine needs to be called for each non-duplicate packet anyway; * made all involved boolean-type functions to have return type `bool'; Updating the RX history records is now only necessary for the packets received up to sending the first feedback. The receiver code becomes again simpler. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
This commit is contained in:
parent
68c89ee535
commit
88e97a9334
|
@ -657,41 +657,26 @@ failed:
|
||||||
static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
|
struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
|
||||||
enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE;
|
|
||||||
const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
|
const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
|
||||||
const bool is_data_packet = dccp_data_packet(skb);
|
const bool is_data_packet = dccp_data_packet(skb);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform loss detection and handle pending losses
|
* Perform loss detection and handle pending losses
|
||||||
*/
|
*/
|
||||||
if (tfrc_rx_handle_loss(&hcrx->hist, &hcrx->li_hist,
|
if (tfrc_rx_congestion_event(&hcrx->hist, &hcrx->li_hist,
|
||||||
skb, ndp, ccid3_first_li, sk)) {
|
skb, ndp, ccid3_first_li, sk))
|
||||||
do_feedback = CCID3_FBACK_PARAM_CHANGE;
|
ccid3_hc_rx_send_feedback(sk, skb, CCID3_FBACK_PARAM_CHANGE);
|
||||||
goto done_receiving;
|
/*
|
||||||
}
|
* Feedback for first non-empty data packet (RFC 3448, 6.3)
|
||||||
|
*/
|
||||||
if (unlikely(hcrx->feedback == CCID3_FBACK_NONE)) {
|
else if (unlikely(hcrx->feedback == CCID3_FBACK_NONE && is_data_packet))
|
||||||
if (is_data_packet)
|
ccid3_hc_rx_send_feedback(sk, skb, CCID3_FBACK_INITIAL);
|
||||||
do_feedback = CCID3_FBACK_INITIAL;
|
|
||||||
goto update_records;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tfrc_rx_hist_loss_pending(&hcrx->hist))
|
|
||||||
return; /* done receiving */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3
|
* Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3
|
||||||
*/
|
*/
|
||||||
if (is_data_packet &&
|
else if (!tfrc_rx_hist_loss_pending(&hcrx->hist) && is_data_packet &&
|
||||||
SUB16(dccp_hdr(skb)->dccph_ccval, hcrx->last_counter) > 3)
|
SUB16(dccp_hdr(skb)->dccph_ccval, hcrx->last_counter) > 3)
|
||||||
do_feedback = CCID3_FBACK_PERIODIC;
|
ccid3_hc_rx_send_feedback(sk, skb, CCID3_FBACK_PERIODIC);
|
||||||
|
|
||||||
update_records:
|
|
||||||
tfrc_rx_hist_add_packet(&hcrx->hist, skb, ndp);
|
|
||||||
|
|
||||||
done_receiving:
|
|
||||||
if (do_feedback)
|
|
||||||
ccid3_hc_rx_send_feedback(sk, skb, do_feedback);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
|
static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
|
||||||
|
|
|
@ -140,18 +140,18 @@ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
|
||||||
* @sk: Used by @calc_first_li in caller-specific way (subtyping)
|
* @sk: Used by @calc_first_li in caller-specific way (subtyping)
|
||||||
* Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
|
* Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
|
||||||
*/
|
*/
|
||||||
int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
|
bool tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
|
||||||
u32 (*calc_first_li)(struct sock *), struct sock *sk)
|
u32 (*calc_first_li)(struct sock *), struct sock *sk)
|
||||||
{
|
{
|
||||||
struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new;
|
struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new;
|
||||||
|
|
||||||
if (cur != NULL && !tfrc_lh_is_new_loss(cur, tfrc_rx_hist_loss_prev(rh)))
|
if (cur != NULL && !tfrc_lh_is_new_loss(cur, tfrc_rx_hist_loss_prev(rh)))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
new = tfrc_lh_demand_next(lh);
|
new = tfrc_lh_demand_next(lh);
|
||||||
if (unlikely(new == NULL)) {
|
if (unlikely(new == NULL)) {
|
||||||
DCCP_CRIT("Cannot allocate/add loss record.");
|
DCCP_CRIT("Cannot allocate/add loss record.");
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno;
|
new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno;
|
||||||
|
@ -169,7 +169,7 @@ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
|
||||||
|
|
||||||
tfrc_lh_calc_i_mean(lh);
|
tfrc_lh_calc_i_mean(lh);
|
||||||
}
|
}
|
||||||
return 1;
|
return true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tfrc_lh_interval_add);
|
EXPORT_SYMBOL_GPL(tfrc_lh_interval_add);
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ static inline u8 tfrc_lh_length(struct tfrc_loss_hist *lh)
|
||||||
|
|
||||||
struct tfrc_rx_hist;
|
struct tfrc_rx_hist;
|
||||||
|
|
||||||
extern int tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
|
extern bool tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
|
||||||
u32 (*first_li)(struct sock *), struct sock *);
|
u32 (*first_li)(struct sock *), struct sock *);
|
||||||
extern void tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
|
extern void tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
|
||||||
extern void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
|
extern void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
|
||||||
|
|
|
@ -192,10 +192,8 @@ static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1)
|
||||||
u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
|
u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
|
||||||
s1 = DCCP_SKB_CB(skb)->dccpd_seq;
|
s1 = DCCP_SKB_CB(skb)->dccpd_seq;
|
||||||
|
|
||||||
if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */
|
if (!dccp_loss_free(s0, s1, n1)) /* gap between S0 and S1 */
|
||||||
h->loss_count = 1;
|
h->loss_count = 1;
|
||||||
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2)
|
static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2)
|
||||||
|
@ -328,13 +326,13 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tfrc_rx_handle_loss - Loss detection and further processing
|
* tfrc_rx_congestion_event - Loss detection and further processing
|
||||||
* @h: The non-empty RX history object
|
* @h: The non-empty RX history object
|
||||||
* @lh: Loss Intervals database to update
|
* @lh: Loss Intervals database to update
|
||||||
* @skb: Currently received packet
|
* @skb: Currently received packet
|
||||||
* @ndp: The NDP count belonging to @skb
|
* @ndp: The NDP count belonging to @skb
|
||||||
* @calc_first_li: Caller-dependent computation of first loss interval in @lh
|
* @first_li: Caller-dependent computation of first loss interval in @lh
|
||||||
* @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
|
* @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
|
||||||
* Chooses action according to pending loss, updates LI database when a new
|
* Chooses action according to pending loss, updates LI database when a new
|
||||||
* loss was detected, and does required post-processing. Returns 1 when caller
|
* loss was detected, and does required post-processing. Returns 1 when caller
|
||||||
* should send feedback, 0 otherwise.
|
* should send feedback, 0 otherwise.
|
||||||
|
@ -342,12 +340,12 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
|
||||||
* records accordingly, the caller should not perform any more RX history
|
* records accordingly, the caller should not perform any more RX history
|
||||||
* operations when loss_count is greater than 0 after calling this function.
|
* operations when loss_count is greater than 0 after calling this function.
|
||||||
*/
|
*/
|
||||||
int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
|
bool tfrc_rx_congestion_event(struct tfrc_rx_hist *h,
|
||||||
struct tfrc_loss_hist *lh,
|
struct tfrc_loss_hist *lh,
|
||||||
struct sk_buff *skb, const u64 ndp,
|
struct sk_buff *skb, const u64 ndp,
|
||||||
u32 (*calc_first_li)(struct sock *), struct sock *sk)
|
u32 (*first_li)(struct sock *), struct sock *sk)
|
||||||
{
|
{
|
||||||
int is_new_loss = 0;
|
bool new_event = false;
|
||||||
|
|
||||||
if (tfrc_rx_hist_duplicate(h, skb))
|
if (tfrc_rx_hist_duplicate(h, skb))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -355,6 +353,7 @@ int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
|
||||||
if (h->loss_count == 0) {
|
if (h->loss_count == 0) {
|
||||||
__do_track_loss(h, skb, ndp);
|
__do_track_loss(h, skb, ndp);
|
||||||
tfrc_rx_hist_sample_rtt(h, skb);
|
tfrc_rx_hist_sample_rtt(h, skb);
|
||||||
|
tfrc_rx_hist_add_packet(h, skb, ndp);
|
||||||
} else if (h->loss_count == 1) {
|
} else if (h->loss_count == 1) {
|
||||||
__one_after_loss(h, skb, ndp);
|
__one_after_loss(h, skb, ndp);
|
||||||
} else if (h->loss_count != 2) {
|
} else if (h->loss_count != 2) {
|
||||||
|
@ -363,7 +362,7 @@ int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
|
||||||
/*
|
/*
|
||||||
* Update Loss Interval database and recycle RX records
|
* Update Loss Interval database and recycle RX records
|
||||||
*/
|
*/
|
||||||
is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk);
|
new_event = tfrc_lh_interval_add(lh, h, first_li, sk);
|
||||||
__three_after_loss(h);
|
__three_after_loss(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,12 +377,12 @@ int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* RFC 3448, 6.1: update I_0, whose growth implies p <= p_prev */
|
/* RFC 3448, 6.1: update I_0, whose growth implies p <= p_prev */
|
||||||
if (!is_new_loss)
|
if (!new_event)
|
||||||
tfrc_lh_update_i_mean(lh, skb);
|
tfrc_lh_update_i_mean(lh, skb);
|
||||||
|
|
||||||
return is_new_loss;
|
return new_event;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tfrc_rx_handle_loss);
|
EXPORT_SYMBOL_GPL(tfrc_rx_congestion_event);
|
||||||
|
|
||||||
/* Compute the sending rate X_recv measured between feedback intervals */
|
/* Compute the sending rate X_recv measured between feedback intervals */
|
||||||
u32 tfrc_rx_hist_x_recv(struct tfrc_rx_hist *h, const u32 last_x_recv)
|
u32 tfrc_rx_hist_x_recv(struct tfrc_rx_hist *h, const u32 last_x_recv)
|
||||||
|
|
|
@ -186,11 +186,11 @@ extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
|
||||||
extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
|
extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
|
||||||
|
|
||||||
struct tfrc_loss_hist;
|
struct tfrc_loss_hist;
|
||||||
extern int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
|
extern bool tfrc_rx_congestion_event(struct tfrc_rx_hist *h,
|
||||||
struct tfrc_loss_hist *lh,
|
struct tfrc_loss_hist *lh,
|
||||||
struct sk_buff *skb, const u64 ndp,
|
struct sk_buff *skb, const u64 ndp,
|
||||||
u32 (*first_li)(struct sock *sk),
|
u32 (*first_li)(struct sock *sk),
|
||||||
struct sock *sk);
|
struct sock *sk);
|
||||||
extern void tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h,
|
extern void tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h,
|
||||||
const struct sk_buff *skb);
|
const struct sk_buff *skb);
|
||||||
extern int tfrc_rx_hist_init(struct tfrc_rx_hist *h, struct sock *sk);
|
extern int tfrc_rx_hist_init(struct tfrc_rx_hist *h, struct sock *sk);
|
||||||
|
|
Loading…
Reference in New Issue