aboutsummaryrefslogtreecommitdiff
path: root/net/sctp
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/input.c144
-rw-r--r--net/sctp/inqueue.c1
-rw-r--r--net/sctp/sm_sideeffect.c16
-rw-r--r--net/sctp/sm_statefuns.c140
-rw-r--r--net/sctp/sm_statetable.c10
-rw-r--r--net/sctp/socket.c29
-rw-r--r--net/sctp/ulpqueue.c27
7 files changed, 239 insertions, 128 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d117ebc75cf..1662f9cc869 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer,
struct sctp_transport **pt);
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
/* Calculate the SCTP checksum of an SCTP packet. */
static inline int sctp_rcv_checksum(struct sk_buff *skb)
@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb)
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
- sock_put(sk);
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb)
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
- sock_hold(sk);
rcvr = &ep->base;
}
@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock(sk);
- /* It is possible that the association could have moved to a different
- * socket if it is peeled off. If so, update the sk.
- */
- if (sk != rcvr->sk) {
- sctp_bh_lock_sock(rcvr->sk);
- sctp_bh_unlock_sock(sk);
- sk = rcvr->sk;
- }
-
if (sock_owned_by_user(sk))
- sk_add_backlog(sk, skb);
+ sctp_add_backlog(sk, skb);
else
- sctp_backlog_rcv(sk, skb);
+ sctp_inq_push(&chunk->rcvr->inqueue, chunk);
- /* Release the sock and the sock ref we took in the lookup calls.
- * The asoc/ep ref will be released in sctp_backlog_rcv.
- */
sctp_bh_unlock_sock(sk);
- sock_put(sk);
+
+ /* Release the asoc/ep ref we took in the lookup calls. */
+ if (asoc)
+ sctp_association_put(asoc);
+ else
+ sctp_endpoint_put(ep);
return 0;
@@ -280,8 +273,7 @@ discard_it:
return 0;
discard_release:
- /* Release any structures we may be holding. */
- sock_put(sk);
+ /* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
@@ -290,56 +282,87 @@ discard_release:
goto discard_it;
}
-/* Handle second half of inbound skb processing. If the sock was busy,
- * we may have need to delay processing until later when the sock is
- * released (on the backlog). If not busy, we call this routine
- * directly from the bottom half.
+/* Process the backlog queue of the socket. Every skb on
+ * the backlog holds a ref on an association or endpoint.
+ * We hold this ref throughout the state machine to make
+ * sure that the structure we need is still around.
*/
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
- struct sctp_inq *inqueue = NULL;
+ struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
+ int backloged = 0;
rcvr = chunk->rcvr;
- BUG_TRAP(rcvr->sk == sk);
-
- if (rcvr->dead) {
- sctp_chunk_free(chunk);
- } else {
- inqueue = &chunk->rcvr->inqueue;
- sctp_inq_push(inqueue, chunk);
- }
-
- /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */
- if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_put(sctp_assoc(rcvr));
- else
- sctp_endpoint_put(sctp_ep(rcvr));
-
+ /* If the rcvr is dead then the association or endpoint
+ * has been deleted and we can safely drop the chunk
+ * and refs that we are holding.
+ */
+ if (rcvr->dead) {
+ sctp_chunk_free(chunk);
+ goto done;
+ }
+
+ if (unlikely(rcvr->sk != sk)) {
+ /* In this case, the association moved from one socket to
+ * another. We are currently sitting on the backlog of the
+ * old socket, so we need to move.
+ * However, since we are here in the process context we
+ * need to take make sure that the user doesn't own
+ * the new socket when we process the packet.
+ * If the new socket is user-owned, queue the chunk to the
+ * backlog of the new socket without dropping any refs.
+ * Otherwise, we can safely push the chunk on the inqueue.
+ */
+
+ sk = rcvr->sk;
+ sctp_bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ sk_add_backlog(sk, skb);
+ backloged = 1;
+ } else
+ sctp_inq_push(inqueue, chunk);
+
+ sctp_bh_unlock_sock(sk);
+
+ /* If the chunk was backloged again, don't drop refs */
+ if (backloged)
+ return 0;
+ } else {
+ sctp_inq_push(inqueue, chunk);
+ }
+
+done:
+ /* Release the refs we took in sctp_add_backlog */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_put(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_put(sctp_ep(rcvr));
+ else
+ BUG();
+
return 0;
}
-void sctp_backlog_migrate(struct sctp_association *assoc,
- struct sock *oldsk, struct sock *newsk)
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct sctp_chunk *chunk;
+ struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_ep_common *rcvr = chunk->rcvr;
- skb = oldsk->sk_backlog.head;
- oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
- while (skb != NULL) {
- struct sk_buff *next = skb->next;
-
- chunk = SCTP_INPUT_CB(skb)->chunk;
- skb->next = NULL;
- if (&assoc->base == chunk->rcvr)
- sk_add_backlog(newsk, skb);
- else
- sk_add_backlog(oldsk, skb);
- skb = next;
- }
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_hold(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
+
+ sk_add_backlog(sk, skb);
}
/* Handle icmp frag needed error. */
@@ -412,7 +435,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
- struct sctp_association *asoc = NULL;
+ struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
*app = NULL; *tpp = NULL;
@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
return sk;
out:
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
return NULL;
@@ -463,7 +485,6 @@ out:
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
}
@@ -490,7 +511,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
struct sock *sk;
- struct sctp_association *asoc;
+ struct sctp_association *asoc = NULL;
struct sctp_transport *transport;
struct inet_sock *inet;
char *saveip, *savesctp;
@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
hit:
sctp_endpoint_hold(ep);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return ep;
}
@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association(
hit:
*pt = transport;
sctp_association_hold(asoc);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return asoc;
}
@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr,
struct sctp_transport *transport;
if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
- sock_put(asoc->base.sk);
sctp_association_put(asoc);
return 1;
}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 297b8951463..cf0c767d43a 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
/* This is the first chunk in the packet. */
chunk->singleton = 1;
ch = (sctp_chunkhdr_t *) chunk->skb->data;
+ chunk->data_accepted = 0;
}
chunk->chunk_hdr = ch;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8d1dc24bab4..c5beb2ad7ef 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -498,10 +498,6 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- /* Set sk_err to ECONNRESET on a 1-1 style socket. */
- if (!sctp_style(asoc->base.sk, UDP))
- asoc->base.sk->sk_err = ECONNRESET;
-
/* SEND_FAILED sent later when cleaning up the association. */
asoc->outqueue.error = error;
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -838,6 +834,15 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
return;
}
+/* Helper function to set sk_err on a 1-1 style socket. */
+static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
+{
+ struct sock *sk = asoc->base.sk;
+
+ if (!sctp_style(sk, UDP))
+ sk->sk_err = error;
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1458,6 +1463,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
local_cork = 0;
asoc->peer.retran_path = t;
break;
+ case SCTP_CMD_SET_SK_ERR:
+ sctp_cmd_set_sk_err(asoc, cmd->obj.error);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 2b9a832b29a..8bc279219a7 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
- __u16 error,
+ __u16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport);
@@ -448,7 +448,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
__u32 init_tag;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
- sctp_disposition_t ret;
+ __u16 error;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -480,11 +480,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
- return SCTP_DISPOSITION_DELETE_TCB;
+ return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
+ ECONNREFUSED, asoc,
+ chunk->transport);
}
/* Verify the INIT chunk before processing it. */
@@ -511,27 +509,16 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return SCTP_DISPOSITION_CONSUME;
+ error = SCTP_ERROR_INV_PARAM;
} else {
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return SCTP_DISPOSITION_NOMEM;
+ error = SCTP_ERROR_NO_RESOURCE;
}
} else {
- ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
- commands);
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return ret;
+ sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+ error = SCTP_ERROR_INV_PARAM;
}
+ return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+ asoc, chunk->transport);
}
/* Tag the variable length parameters. Note that we never
@@ -636,8 +623,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
*/
chunk->subh.cookie_hdr =
(struct sctp_signed_cookie *)chunk->skb->data;
- skb_pull(chunk->skb,
- ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t));
+ if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
+ sizeof(sctp_chunkhdr_t)))
+ goto nomem;
/* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
* "Z" will reply with a COOKIE ACK chunk after building a TCB
@@ -885,6 +873,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -965,7 +955,8 @@ sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
*/
chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data;
paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
- skb_pull(chunk->skb, paylen);
+ if (!pskb_pull(chunk->skb, paylen))
+ goto nomem;
reply = sctp_make_heartbeat_ack(asoc, chunk,
chunk->subh.hb_hdr, paylen);
@@ -1028,6 +1019,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
commands);
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
+ /* Make sure that the length of the parameter is what we expect */
+ if (ntohs(hbinfo->param_hdr.length) !=
+ sizeof(sctp_sender_hb_info_t)) {
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
from_addr = hbinfo->daddr;
link = sctp_assoc_lookup_paddr(asoc, &from_addr);
@@ -1860,8 +1857,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
* are in good shape.
*/
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
- skb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
- sizeof(sctp_chunkhdr_t));
+ if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
+ sizeof(sctp_chunkhdr_t)))
+ goto nomem;
/* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
* of a duplicate COOKIE ECHO match the Verification Tags of the
@@ -2123,6 +2121,8 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
int attempts = asoc->init_err_counter + 1;
if (attempts > asoc->max_init_attempts) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -2259,6 +2259,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -2303,7 +2304,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
- return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport);
+ return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+ chunk->transport);
}
/*
@@ -2315,7 +2317,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
void *arg,
sctp_cmd_seq_t *commands)
{
- return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc,
+ return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+ ENOPROTOOPT, asoc,
(struct sctp_transport *)arg);
}
@@ -2340,7 +2343,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
* This is common code called by several sctp_sf_*_abort() functions above.
*/
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
- __u16 error,
+ __u16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport)
{
@@ -2350,6 +2353,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
/* CMD_INIT_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(error));
@@ -3333,6 +3337,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3359,6 +3365,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3711,9 +3719,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
} else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4031,6 +4043,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
* TCB. This is a departure from our typical NOMEM handling.
*/
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4172,6 +4186,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
* TCB. This is a departure from our typical NOMEM handling.
*/
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4540,6 +4556,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
struct sctp_transport *transport = arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4659,6 +4677,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
" max_init_attempts: %d\n",
attempts, asoc->max_init_attempts);
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -4708,6 +4728,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -4739,6 +4761,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4814,6 +4838,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -4867,6 +4893,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -5151,7 +5179,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
int tmp;
__u32 tsn;
int account_value;
+ struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
struct sock *sk = asoc->base.sk;
+ int rcvbuf_over = 0;
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5162,10 +5192,16 @@ static int sctp_eat_data(const struct sctp_association *asoc,
/* ASSERT: Now skb->data is really the user data. */
/*
- * if we are established, and we have used up our receive
- * buffer memory, drop the frame
- */
- if (asoc->state == SCTP_STATE_ESTABLISHED) {
+ * If we are established, and we have used up our receive buffer
+ * memory, think about droping the frame.
+ * Note that we have an opportunity to improve performance here.
+ * If we accept one chunk from an skbuff, we have to keep all the
+ * memory of that skbuff around until the chunk is read into user
+ * space. Therefore, once we accept 1 chunk we may as well accept all
+ * remaining chunks in the skbuff. The data_accepted flag helps us do
+ * that.
+ */
+ if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) {
/*
* If the receive buffer policy is 1, then each
* association can allocate up to sk_rcvbuf bytes
@@ -5176,9 +5212,25 @@ static int sctp_eat_data(const struct sctp_association *asoc,
account_value = atomic_read(&asoc->rmem_alloc);
else
account_value = atomic_read(&sk->sk_rmem_alloc);
-
- if (account_value > sk->sk_rcvbuf)
- return SCTP_IERROR_IGNORE_TSN;
+ if (account_value > sk->sk_rcvbuf) {
+ /*
+ * We need to make forward progress, even when we are
+ * under memory pressure, so we always allow the
+ * next tsn after the ctsn ack point to be accepted.
+ * This lets us avoid deadlocks in which we have to
+ * drop frames that would otherwise let us drain the
+ * receive queue.
+ */
+ if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn)
+ return SCTP_IERROR_IGNORE_TSN;
+
+ /*
+ * We're going to accept the frame but we should renege
+ * to make space for it. This will send us down that
+ * path later in this function.
+ */
+ rcvbuf_over = 1;
+ }
}
/* Process ECN based congestion.
@@ -5226,6 +5278,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
datalen -= sizeof(sctp_data_chunk_t);
deliver = SCTP_CMD_CHUNK_ULP;
+ chunk->data_accepted = 1;
/* Think about partial delivery. */
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5242,7 +5295,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* large spill over.
*/
if (!asoc->rwnd || asoc->rwnd_over ||
- (datalen > asoc->rwnd + asoc->frag_point)) {
+ (datalen > asoc->rwnd + asoc->frag_point) ||
+ rcvbuf_over) {
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
@@ -5250,8 +5304,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* space and in the future we may want to detect and
* do more drastic reneging.
*/
- if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
- (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
+ if (sctp_tsnmap_has_gap(map) &&
+ (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
deliver = SCTP_CMD_RENEGE;
} else {
@@ -5280,6 +5334,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_DATA));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 75ef1040876..8bcca567615 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -366,9 +366,9 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
/* SCTP_STATE_CLOSED */ \
- {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+ {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
- {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+ {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_ECHOED */ \
{.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
/* SCTP_STATE_ESTABLISHED */ \
@@ -380,7 +380,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
- {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+ {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
} /* TYPE_SCTP_ECN_ECNE */
#define TYPE_SCTP_ECN_CWR { \
@@ -401,7 +401,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
- {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+ {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
} /* TYPE_SCTP_ECN_CWR */
#define TYPE_SCTP_SHUTDOWN_COMPLETE { \
@@ -647,7 +647,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
/* SCTP_STATE_CLOSED */ \
- {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+ {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b6e4b89539b..174d4d35e95 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1057,6 +1057,7 @@ static int __sctp_connect(struct sock* sk,
inet_sk(sk)->dport = htons(asoc->peer.port);
af = sctp_get_af_specific(to.sa.sa_family);
af->to_sk_daddr(&to, sk);
+ sk->sk_err = 0;
timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
err = sctp_wait_for_connect(asoc, &timeo);
@@ -1228,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
ep = sctp_sk(sk)->ep;
- /* Walk all associations on a socket, not on an endpoint. */
+ /* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1241,13 +1242,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
if (sctp_state(asoc, CLOSED)) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ continue;
+ }
+ }
- } else if (sock_flag(sk, SOCK_LINGER) &&
- !sk->sk_lingertime)
- sctp_primitive_ABORT(asoc, NULL);
- else
- sctp_primitive_SHUTDOWN(asoc, NULL);
- } else
+ if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
+ sctp_primitive_ABORT(asoc, NULL);
+ else
sctp_primitive_SHUTDOWN(asoc, NULL);
}
@@ -5317,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
+ BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
@@ -5604,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/
newsp->type = type;
- spin_lock_bh(&oldsk->sk_lock.slock);
- /* Migrate the backlog from oldsk to newsk. */
- sctp_backlog_migrate(assoc, oldsk, newsk);
- /* Migrate the association to the new socket. */
+ /* Mark the new socket "in-use" by the user so that any packets
+ * that may arrive on the association after we've moved it are
+ * queued to the backlog. This prevents a potential race between
+ * backlog processing on the old socket and new-packet processing
+ * on the new socket.
+ */
+ sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk);
- spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
@@ -5618,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
+ sctp_release_sock(newsk);
}
/* This proto struct describes the ULP interface for SCTP. */
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 2080b2d28c9..575e556aeb3 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -279,6 +279,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
+ struct sk_buff *new = NULL;
struct sctp_ulpevent *event;
struct sk_buff *pnext, *last;
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
@@ -297,11 +298,33 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
*/
if (last)
last->next = pos;
- else
- skb_shinfo(f_frag)->frag_list = pos;
+ else {
+ if (skb_cloned(f_frag)) {
+ /* This is a cloned skb, we can't just modify
+ * the frag_list. We need a new skb to do that.
+ * Instead of calling skb_unshare(), we'll do it
+ * ourselves since we need to delay the free.
+ */
+ new = skb_copy(f_frag, GFP_ATOMIC);
+ if (!new)
+ return NULL; /* try again later */
+
+ new->sk = f_frag->sk;
+
+ skb_shinfo(new)->frag_list = pos;
+ } else
+ skb_shinfo(f_frag)->frag_list = pos;
+ }
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, queue);
+
+ /* if we did unshare, then free the old skb and re-assign */
+ if (new) {
+ kfree_skb(f_frag);
+ f_frag = new;
+ }
+
while (pos) {
pnext = pos->next;