aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-07-13 17:01:19 -0400
committerVlad Yasevich <vladislav.yasevich@hp.com>2007-08-29 13:34:33 -0400
commitea2dfb3733d53ac98b17756435d1f99e25490357 (patch)
tree9a70c036bcf1ed57a059efa245cbb63f300db0c5 /net
parentb07d68b5ca4d55a16fab223d63d5fb36f89ff42f (diff)
SCTP: properly clean up fragment and ordering queues during FWD-TSN.
When we recieve a FWD-TSN (meaning the peer has abandoned the data), we need to clean up any partially received messages that may be hanging out on the re-assembly or re-ordering queues. This is a MUST requirement that was not properly done before. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com.>
Diffstat (limited to 'net')
-rw-r--r--net/sctp/sm_sideeffect.c3
-rw-r--r--net/sctp/ulpqueue.c75
2 files changed, 65 insertions, 13 deletions
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d9fad4f6ffc..1907318e70f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1130,6 +1130,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
+ /* purge the fragmentation queue */
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
+
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
break;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 34eb977a204..fa0ba2a5564 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -659,6 +659,46 @@ done:
return retval;
}
+/*
+ * Flush out stale fragments from the reassembly queue when processing
+ * a Forward TSN.
+ *
+ * RFC 3758, Section 3.6
+ *
+ * After receiving and processing a FORWARD TSN, the data receiver MUST
+ * take cautions in updating its re-assembly queue. The receiver MUST
+ * remove any partially reassembled message, which is still missing one
+ * or more TSNs earlier than or equal to the new cumulative TSN point.
+ * In the event that the receiver has invoked the partial delivery API,
+ * a notification SHOULD also be generated to inform the upper layer API
+ * that the message being partially delivered will NOT be completed.
+ */
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
+{
+ struct sk_buff *pos, *tmp;
+ struct sctp_ulpevent *event;
+ __u32 tsn;
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return;
+
+ skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+ event = sctp_skb2event(pos);
+ tsn = event->tsn;
+
+ /* Since the entire message must be abandoned by the
+ * sender (item A3 in Section 3.5, RFC 3758), we can
+ * free all fragments on the list that are less then
+ * or equal to ctsn_point
+ */
+ if (TSN_lte(tsn, fwd_tsn)) {
+ __skb_unlink(pos, &ulpq->reasm);
+ sctp_ulpevent_free(event);
+ } else
+ break;
+ }
+}
+
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
-static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
+static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
csid = cevent->stream;
cssn = cevent->ssn;
- if (cssn != sctp_ssn_peek(in, csid))
+ /* Have we gone too far? */
+ if (csid > sid)
break;
- /* Found it, so mark in the ssnmap. */
- sctp_ssn_next(in, csid);
+ /* Have we not gone far enough? */
+ if (csid < sid)
+ continue;
+
+ /* see if this ssn has been marked by skipping */
+ if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+ break;
__skb_unlink(pos, &ulpq->lobby);
- if (!event) {
+ if (!event)
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- } else {
- /* Attach all gathered skbs to the event. */
- __skb_queue_tail(&temp, pos);
- }
+
+ /* Attach all gathered skbs to the event. */
+ __skb_queue_tail(&temp, pos);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
- if (event)
+ if (event) {
+ /* see if we have more ordered that we can deliver */
+ sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
+ }
}
-/* Skip over an SSN. */
+/* Skip over an SSN. This is used during the processing of
+ * Forwared TSN chunk to skip over the abandoned ordered data
+ */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
- sctp_ulpq_reap_ordered(ulpq);
+ sctp_ulpq_reap_ordered(ulpq, sid);
return;
}