diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/Makefile | 3 | ||||
-rw-r--r-- | net/sctp/associola.c | 63 | ||||
-rw-r--r-- | net/sctp/auth.c | 938 | ||||
-rw-r--r-- | net/sctp/chunk.c | 12 | ||||
-rw-r--r-- | net/sctp/endpointola.c | 113 | ||||
-rw-r--r-- | net/sctp/input.c | 65 | ||||
-rw-r--r-- | net/sctp/inqueue.c | 19 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 10 | ||||
-rw-r--r-- | net/sctp/objcnt.c | 2 | ||||
-rw-r--r-- | net/sctp/output.c | 135 | ||||
-rw-r--r-- | net/sctp/protocol.c | 47 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 338 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 5 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 340 | ||||
-rw-r--r-- | net/sctp/sm_statetable.c | 33 | ||||
-rw-r--r-- | net/sctp/socket.c | 399 | ||||
-rw-r--r-- | net/sctp/sysctl.c | 37 | ||||
-rw-r--r-- | net/sctp/ulpevent.c | 55 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 1 |
19 files changed, 2455 insertions, 160 deletions
diff --git a/net/sctp/Makefile b/net/sctp/Makefile index 70c828bbe44..1da7204d9b4 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile @@ -9,7 +9,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ transport.o chunk.o sm_make_chunk.o ulpevent.o \ inqueue.o outqueue.o ulpqueue.o command.o \ tsnmap.o bind_addr.o socket.o primitive.o \ - output.o input.o debug.o ssnmap.o proc.o crc32c.o + output.o input.o debug.o ssnmap.o proc.o crc32c.o \ + auth.o sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o sctp-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 9bad8ba0fed..03158e3665d 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -74,6 +74,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a { struct sctp_sock *sp; int i; + sctp_paramhdr_t *p; + int err; /* Retrieve the SCTP per socket area. */ sp = sctp_sk((struct sock *)sk); @@ -298,6 +300,30 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->default_timetolive = sp->default_timetolive; asoc->default_rcv_context = sp->default_rcv_context; + /* AUTH related initializations */ + INIT_LIST_HEAD(&asoc->endpoint_shared_keys); + err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); + if (err) + goto fail_init; + + asoc->active_key_id = ep->active_key_id; + asoc->asoc_shared_key = NULL; + + asoc->default_hmac_id = 0; + /* Save the hmacs and chunks list into this association */ + if (ep->auth_hmacs_list) + memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, + ntohs(ep->auth_hmacs_list->param_hdr.length)); + if (ep->auth_chunk_list) + memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, + ntohs(ep->auth_chunk_list->param_hdr.length)); + + /* Get the AUTH random number for this association */ + p = (sctp_paramhdr_t *)asoc->c.auth_random; + p->type = SCTP_PARAM_RANDOM; + p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH); + get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); + return asoc; fail_init: @@ -389,6 +415,9 @@ void sctp_association_free(struct sctp_association *asoc) /* Free peer's cached cookie. */ kfree(asoc->peer.cookie); + kfree(asoc->peer.peer_random); + kfree(asoc->peer.peer_chunks); + kfree(asoc->peer.peer_hmacs); /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { @@ -407,6 +436,12 @@ void sctp_association_free(struct sctp_association *asoc) if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); + /* AUTH - Free the endpoint shared keys */ + sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); + + /* AUTH - Free the association shared key */ + sctp_auth_key_put(asoc->asoc_shared_key); + sctp_association_put(asoc); } @@ -976,6 +1011,16 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) state = asoc->state; subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + /* SCTP-AUTH, Section 6.3: + * The receiver has a list of chunk types which it expects + * to be received only after an AUTH-chunk. This list has + * been sent to the peer during the association setup. It + * MUST silently discard these chunks if they are not placed + * after an AUTH chunk in the packet. + */ + if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) + continue; + /* Remember where the last DATA chunk came from so we * know where to send the SACK. */ @@ -1112,6 +1157,24 @@ void sctp_assoc_update(struct sctp_association *asoc, sctp_assoc_set_id(asoc, GFP_ATOMIC); } } + + /* SCTP-AUTH: Save the peer parameters from the new assocaitions + * and also move the association shared keys over + */ + kfree(asoc->peer.peer_random); + asoc->peer.peer_random = new->peer.peer_random; + new->peer.peer_random = NULL; + + kfree(asoc->peer.peer_chunks); + asoc->peer.peer_chunks = new->peer.peer_chunks; + new->peer.peer_chunks = NULL; + + kfree(asoc->peer.peer_hmacs); + asoc->peer.peer_hmacs = new->peer.peer_hmacs; + new->peer.peer_hmacs = NULL; + + sctp_auth_key_put(asoc->asoc_shared_key); + sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); } /* Update the retran path for sending a retransmitted packet. diff --git a/net/sctp/auth.c b/net/sctp/auth.c new file mode 100644 index 00000000000..78181072471 --- /dev/null +++ b/net/sctp/auth.c @@ -0,0 +1,938 @@ +/* SCTP kernel reference Implementation + * (C) Copyright 2007 Hewlett-Packard Development Company, L.P. + * + * This file is part of the SCTP kernel reference Implementation + * + * The SCTP reference implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * The SCTP reference implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <lksctp-developers@lists.sourceforge.net> + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * Vlad Yasevich <vladislav.yasevich@hp.com> + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ + +#include <linux/types.h> +#include <linux/crypto.h> +#include <linux/scatterlist.h> +#include <net/sctp/sctp.h> +#include <net/sctp/auth.h> + +static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = { + { + /* id 0 is reserved. as all 0 */ + .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_0, + }, + { + .hmac_id = SCTP_AUTH_HMAC_ID_SHA1, + .hmac_name="hmac(sha1)", + .hmac_len = SCTP_SHA1_SIG_SIZE, + }, + { + /* id 2 is reserved as well */ + .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2, + }, + { + .hmac_id = SCTP_AUTH_HMAC_ID_SHA256, + .hmac_name="hmac(sha256)", + .hmac_len = SCTP_SHA256_SIG_SIZE, + } +}; + + +void sctp_auth_key_put(struct sctp_auth_bytes *key) +{ + if (!key) + return; + + if (atomic_dec_and_test(&key->refcnt)) { + kfree(key); + SCTP_DBG_OBJCNT_DEC(keys); + } +} + +/* Create a new key structure of a given length */ +static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) +{ + struct sctp_auth_bytes *key; + + /* Allocate the shared key */ + key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); + if (!key) + return NULL; + + key->len = key_len; + atomic_set(&key->refcnt, 1); + SCTP_DBG_OBJCNT_INC(keys); + + return key; +} + +/* Create a new shared key container with a give key id */ +struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp) +{ + struct sctp_shared_key *new; + + /* Allocate the shared key container */ + new = kzalloc(sizeof(struct sctp_shared_key), gfp); + if (!new) + return NULL; + + INIT_LIST_HEAD(&new->key_list); + new->key_id = key_id; + + return new; +} + +/* Free the shared key stucture */ +void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) +{ + BUG_ON(!list_empty(&sh_key->key_list)); + sctp_auth_key_put(sh_key->key); + sh_key->key = NULL; + kfree(sh_key); +} + +/* Destory the entire key list. This is done during the + * associon and endpoint free process. + */ +void sctp_auth_destroy_keys(struct list_head *keys) +{ + struct sctp_shared_key *ep_key; + struct sctp_shared_key *tmp; + + if (list_empty(keys)) + return; + + key_for_each_safe(ep_key, tmp, keys) { + list_del_init(&ep_key->key_list); + sctp_auth_shkey_free(ep_key); + } +} + +/* Compare two byte vectors as numbers. Return values + * are: + * 0 - vectors are equal + * < 0 - vector 1 is smaller then vector2 + * > 0 - vector 1 is greater then vector2 + * + * Algorithm is: + * This is performed by selecting the numerically smaller key vector... + * If the key vectors are equal as numbers but differ in length ... + * the shorter vector is considered smaller + * + * Examples (with small values): + * 000123456789 > 123456789 (first number is longer) + * 000123456789 < 234567891 (second number is larger numerically) + * 123456789 > 2345678 (first number is both larger & longer) + */ +static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1, + struct sctp_auth_bytes *vector2) +{ + int diff; + int i; + const __u8 *longer; + + diff = vector1->len - vector2->len; + if (diff) { + longer = (diff > 0) ? vector1->data : vector2->data; + + /* Check to see if the longer number is + * lead-zero padded. If it is not, it + * is automatically larger numerically. + */ + for (i = 0; i < abs(diff); i++ ) { + if (longer[i] != 0) + return diff; + } + } + + /* lengths are the same, compare numbers */ + return memcmp(vector1->data, vector2->data, vector1->len); +} + +/* + * Create a key vector as described in SCTP-AUTH, Section 6.1 + * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO + * parameter sent by each endpoint are concatenated as byte vectors. + * These parameters include the parameter type, parameter length, and + * the parameter value, but padding is omitted; all padding MUST be + * removed from this concatenation before proceeding with further + * computation of keys. Parameters which were not sent are simply + * omitted from the concatenation process. The resulting two vectors + * are called the two key vectors. + */ +static struct sctp_auth_bytes *sctp_auth_make_key_vector( + sctp_random_param_t *random, + sctp_chunks_param_t *chunks, + sctp_hmac_algo_param_t *hmacs, + gfp_t gfp) +{ + struct sctp_auth_bytes *new; + __u32 len; + __u32 offset = 0; + + len = ntohs(random->param_hdr.length) + ntohs(hmacs->param_hdr.length); + if (chunks) + len += ntohs(chunks->param_hdr.length); + + new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp); + if (!new) + return NULL; + + new->len = len; + + memcpy(new->data, random, ntohs(random->param_hdr.length)); + offset += ntohs(random->param_hdr.length); + + if (chunks) { + memcpy(new->data + offset, chunks, + ntohs(chunks->param_hdr.length)); + offset += ntohs(chunks->param_hdr.length); + } + + memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length)); + + return new; +} + + +/* Make a key vector based on our local parameters */ +struct sctp_auth_bytes *sctp_auth_make_local_vector( + const struct sctp_association *asoc, + gfp_t gfp) +{ + return sctp_auth_make_key_vector( + (sctp_random_param_t*)asoc->c.auth_random, + (sctp_chunks_param_t*)asoc->c.auth_chunks, + (sctp_hmac_algo_param_t*)asoc->c.auth_hmacs, + gfp); +} + +/* Make a key vector based on peer's parameters */ +struct sctp_auth_bytes *sctp_auth_make_peer_vector( + const struct sctp_association *asoc, + gfp_t gfp) +{ + return sctp_auth_make_key_vector(asoc->peer.peer_random, + asoc->peer.peer_chunks, + asoc->peer.peer_hmacs, + gfp); +} + + +/* Set the value of the association shared key base on the parameters + * given. The algorithm is: + * From the endpoint pair shared keys and the key vectors the + * association shared keys are computed. This is performed by selecting + * the numerically smaller key vector and concatenating it to the + * endpoint pair shared key, and then concatenating the numerically + * larger key vector to that. The result of the concatenation is the + * association shared key. + */ +static struct sctp_auth_bytes *sctp_auth_asoc_set_secret( + struct sctp_shared_key *ep_key, + struct sctp_auth_bytes *first_vector, + struct sctp_auth_bytes *last_vector, + gfp_t gfp) +{ + struct sctp_auth_bytes *secret; + __u32 offset = 0; + __u32 auth_len; + + auth_len = first_vector->len + last_vector->len; + if (ep_key->key) + auth_len += ep_key->key->len; + + secret = sctp_auth_create_key(auth_len, gfp); + if (!secret) + return NULL; + + if (ep_key->key) { + memcpy(secret->data, ep_key->key->data, ep_key->key->len); + offset += ep_key->key->len; + } + + memcpy(secret->data + offset, first_vector->data, first_vector->len); + offset += first_vector->len; + + memcpy(secret->data + offset, last_vector->data, last_vector->len); + + return secret; +} + +/* Create an association shared key. Follow the algorithm + * described in SCTP-AUTH, Section 6.1 + */ +static struct sctp_auth_bytes *sctp_auth_asoc_create_secret( + const struct sctp_association *asoc, + struct sctp_shared_key *ep_key, + gfp_t gfp) +{ + struct sctp_auth_bytes *local_key_vector; + struct sctp_auth_bytes *peer_key_vector; + struct sctp_auth_bytes *first_vector, + *last_vector; + struct sctp_auth_bytes *secret = NULL; + int cmp; + + + /* Now we need to build the key vectors + * SCTP-AUTH , Section 6.1 + * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO + * parameter sent by each endpoint are concatenated as byte vectors. + * These parameters include the parameter type, parameter length, and + * the parameter value, but padding is omitted; all padding MUST be + * removed from this concatenation before proceeding with further + * computation of keys. Parameters which were not sent are simply + * omitted from the concatenation process. The resulting two vectors + * are called the two key vectors. + */ + + local_key_vector = sctp_auth_make_local_vector(asoc, gfp); + peer_key_vector = sctp_auth_make_peer_vector(asoc, gfp); + + if (!peer_key_vector || !local_key_vector) + goto out; + + /* Figure out the order in wich the key_vectors will be + * added to the endpoint shared key. + * SCTP-AUTH, Section 6.1: + * This is performed by selecting the numerically smaller key + * vector and concatenating it to the endpoint pair shared + * key, and then concatenating the numerically larger key + * vector to that. If the key vectors are equal as numbers + * but differ in length, then the concatenation order is the + * endpoint shared key, followed by the shorter key vector, + * followed by the longer key vector. Otherwise, the key + * vectors are identical, and may be concatenated to the + * endpoint pair key in any order. + */ + cmp = sctp_auth_compare_vectors(local_key_vector, + peer_key_vector); + if (cmp < 0) { + first_vector = local_key_vector; + last_vector = peer_key_vector; + } else { + first_vector = peer_key_vector; + last_vector = local_key_vector; + } + + secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector, + gfp); +out: + kfree(local_key_vector); + kfree(peer_key_vector); + + return secret; +} + +/* + * Populate the association overlay list with the list + * from the endpoint. + */ +int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, + struct sctp_association *asoc, + gfp_t gfp) +{ + struct sctp_shared_key *sh_key; + struct sctp_shared_key *new; + + BUG_ON(!list_empty(&asoc->endpoint_shared_keys)); + + key_for_each(sh_key, &ep->endpoint_shared_keys) { + new = sctp_auth_shkey_create(sh_key->key_id, gfp); + if (!new) + goto nomem; + + new->key = sh_key->key; + sctp_auth_key_hold(new->key); + list_add(&new->key_list, &asoc->endpoint_shared_keys); + } + + return 0; + +nomem: + sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); + return -ENOMEM; +} + + +/* Public interface to creat the association shared key. + * See code above for the algorithm. + */ +int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_auth_bytes *secret; + struct sctp_shared_key *ep_key; + + /* If we don't support AUTH, or peer is not capable + * we don't need to do anything. + */ + if (!sctp_auth_enable || !asoc->peer.auth_capable) + return 0; + + /* If the key_id is non-zero and we couldn't find an + * endpoint pair shared key, we can't compute the + * secret. + * For key_id 0, endpoint pair shared key is a NULL key. + */ + ep_key = sctp_auth_get_shkey(asoc, asoc->active_key_id); + BUG_ON(!ep_key); + + secret = sctp_auth_asoc_create_secret(asoc, ep_key, gfp); + if (!secret) + return -ENOMEM; + + sctp_auth_key_put(asoc->asoc_shared_key); + asoc->asoc_shared_key = secret; + + return 0; +} + + +/* Find the endpoint pair shared key based on the key_id */ +struct sctp_shared_key *sctp_auth_get_shkey( + const struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key = NULL; + + /* First search associations set of endpoint pair shared keys */ + key_for_each(key, &asoc->endpoint_shared_keys) { + if (key->key_id == key_id) + break; + } + + return key; +} + +/* + * Initialize all the possible digest transforms that we can use. Right now + * now, the supported digests are SHA1 and SHA256. We do this here once + * because of the restrictiong that transforms may only be allocated in + * user context. This forces us to pre-allocated all possible transforms + * at the endpoint init time. + */ +int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) +{ + struct crypto_hash *tfm = NULL; + __u16 id; + + /* if the transforms are already allocted, we are done */ + if (!sctp_auth_enable) { + ep->auth_hmacs = NULL; + return 0; + } + + if (ep->auth_hmacs) + return 0; + + /* Allocated the array of pointers to transorms */ + ep->auth_hmacs = kzalloc( + sizeof(struct crypto_hash *) * SCTP_AUTH_NUM_HMACS, + gfp); + if (!ep->auth_hmacs) + return -ENOMEM; + + for (id = 0; id < SCTP_AUTH_NUM_HMACS; id++) { + + /* See is we support the id. Supported IDs have name and + * length fields set, so that we can allocated and use + * them. We can safely just check for name, for without the + * name, we can't allocate the TFM. + */ + if (!sctp_hmac_list[id].hmac_name) + continue; + + /* If this TFM has been allocated, we are all set */ + if (ep->auth_hmacs[id]) + continue; + + /* Allocate the ID */ + tfm = crypto_alloc_hash(sctp_hmac_list[id].hmac_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto out_err; + + ep->auth_hmacs[id] = tfm; + } + + return 0; + +out_err: + /* Clean up any successfull allocations */ + sctp_auth_destroy_hmacs(ep->auth_hmacs); + return -ENOMEM; +} + +/* Destroy the hmac tfm array */ +void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[]) +{ + int i; + + if (!auth_hmacs) + return; + + for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) + { + if (auth_hmacs[i]) + crypto_free_hash(auth_hmacs[i]); + } + kfree(auth_hmacs); +} + + +struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id) +{ + return &sctp_hmac_list[hmac_id]; +} + +/* Get an hmac description information that we can use to build + * the AUTH chunk + */ +struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 n_elt; + __u16 id = 0; + int i; + + /* If we have a default entry, use it */ + if (asoc->default_hmac_id) + return &sctp_hmac_list[asoc->default_hmac_id]; + + /* Since we do not have a default entry, find the first entry + * we support and return that. Do not cache that id. + */ + hmacs = asoc->peer.peer_hmacs; + if (!hmacs) + return NULL; + + n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1; + for (i = 0; i < n_elt; i++) { + id = ntohs(hmacs->hmac_ids[i]); + + /* Check the id is in the supported range */ + if (id > SCTP_AUTH_HMAC_ID_MAX) + continue; + + /* See is we support the id. Supported IDs have name and + * length fields set, so that we can allocated and use + * them. We can safely just check for name, for without the + * name, we can't allocate the TFM. + */ + if (!sctp_hmac_list[id].hmac_name) + continue; + + break; + } + + if (id == 0) + return NULL; + + return &sctp_hmac_list[id]; +} + +static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) +{ + int found = 0; + int i; + + for (i = 0; i < n_elts; i++) { + if (hmac_id == hmacs[i]) { + found = 1; + break; + } + } + + return found; +} + +/* See if the HMAC_ID is one that we claim as supported */ +int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, + __u16 hmac_id) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 n_elt; + + if (!asoc) + return 0; + + hmacs = (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs; + n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1; + + return __sctp_auth_find_hmacid(hmacs->hmac_ids, n_elt, hmac_id); +} + + +/* Cache the default HMAC id. This to follow this text from SCTP-AUTH: + * Section 6.1: + * The receiver of a HMAC-ALGO parameter SHOULD use the first listed + * algorithm it supports. + */ +void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, + struct sctp_hmac_algo_param *hmacs) +{ + struct sctp_endpoint *ep; + __u16 id; + int i; + int n_params; + + /* if the default id is already set, use it */ + if (asoc->default_hmac_id) + return; + + n_params = (ntohs(hmacs->param_hdr.length) + - sizeof(sctp_paramhdr_t)) >> 1; + ep = asoc->ep; + for (i = 0; i < n_params; i++) { + id = ntohs(hmacs->hmac_ids[i]); + + /* Check the id is in the supported range */ + if (id > SCTP_AUTH_HMAC_ID_MAX) + continue; + + /* If this TFM has been allocated, use this id */ + if (ep->auth_hmacs[id]) { + asoc->default_hmac_id = id; + break; + } + } +} + + +/* Check to see if the given chunk is supposed to be authenticated */ +static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param) +{ + unsigned short len; + int found = 0; + int i; + + if (!param) + return 0; + + len = ntohs(param->param_hdr.length) - sizeof(sctp_paramhdr_t); + + /* SCTP-AUTH, Section 3.2 + * The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH + * chunks MUST NOT be listed in the CHUNKS parameter. However, if + * a CHUNKS parameter is received then the types for INIT, INIT-ACK, + * SHUTDOWN-COMPLETE and AUTH chunks MUST be ignored. + */ + for (i = 0; !found && i < len; i++) { + switch (param->chunks[i]) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + case SCTP_CID_SHUTDOWN_COMPLETE: + case SCTP_CID_AUTH: + break; + + default: + if (param->chunks[i] == chunk) + found = 1; + break; + } + } + + return found; +} + +/* Check if peer requested that this chunk is authenticated */ +int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) +{ + if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable) + return 0; + + return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); +} + +/* Check if we requested that peer authenticate this chunk. */ +int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc) +{ + if (!sctp_auth_enable || !asoc) + return 0; + + return __sctp_auth_cid(chunk, + (struct sctp_chunks_param *)asoc->c.auth_chunks); +} + +/* SCTP-AUTH: Section 6.2: + * The sender MUST calculate the MAC as described in RFC2104 [2] using + * the hash function H as described by the MAC Identifier and the shared + * association key K based on the endpoint pair shared key described by + * the shared key identifier. The 'data' used for the computation of + * the AUTH-chunk is given by the AUTH chunk with its HMAC field set to + * zero (as shown in Figure 6) followed by all chunks that are placed + * after the AUTH chunk in the SCTP packet. + */ +void sctp_auth_calculate_hmac(const struct sctp_association *asoc, + struct sk_buff *skb, + struct sctp_auth_chunk *auth, + gfp_t gfp) +{ + struct scatterlist sg; + struct hash_desc desc; + struct sctp_auth_bytes *asoc_key; + __u16 key_id, hmac_id; + __u8 *digest; + unsigned char *end; + int free_key = 0; + + /* Extract the info we need: + * - hmac id + * - key id + */ + key_id = ntohs(auth->auth_hdr.shkey_id); + hmac_id = ntohs(auth->auth_hdr.hmac_id); + + if (key_id == asoc->active_key_id) + asoc_key = asoc->asoc_shared_key; + else { + struct sctp_shared_key *ep_key; + + ep_key = sctp_auth_get_shkey(asoc, key_id); + if (!ep_key) + return; + + asoc_key = sctp_auth_asoc_create_secret(asoc, ep_key, gfp); + if (!asoc_key) + return; + + free_key = 1; + } + + /* set up scatter list */ + end = skb_tail_pointer(skb); + sg.page = virt_to_page(auth); + sg.offset = (unsigned long)(auth) % PAGE_SIZE; + sg.length = end - (unsigned char *)auth; + + desc.tfm = asoc->ep->auth_hmacs[hmac_id]; + desc.flags = 0; + + digest = auth->auth_hdr.hmac; + if (crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len)) + goto free; + + crypto_hash_digest(&desc, &sg, sg.length, digest); + +free: + if (free_key) + sctp_auth_key_put(asoc_key); +} + +/* API Helpers */ + +/* Add a chunk to the endpoint authenticated chunk list */ +int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id) +{ + struct sctp_chunks_param *p = ep->auth_chunk_list; + __u16 nchunks; + __u16 param_len; + + /* If this chunk is already specified, we are done */ + if (__sctp_auth_cid(chunk_id, p)) + return 0; + + /* Check if we can add this chunk to the array */ + param_len = ntohs(p->param_hdr.length); + nchunks = param_len - sizeof(sctp_paramhdr_t); + if (nchunks == SCTP_NUM_CHUNK_TYPES) + return -EINVAL; + + p->chunks[nchunks] = chunk_id; + p->param_hdr.length = htons(param_len + 1); + return 0; +} + +/* Add hmac identifires to the endpoint list of supported hmac ids */ +int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, + struct sctp_hmacalgo *hmacs) +{ + int has_sha1 = 0; + __u16 id; + int i; + + /* Scan the list looking for unsupported id. Also make sure that + * SHA1 is specified. + */ + for (i = 0; i < hmacs->shmac_num_idents; i++) { + id = hmacs->shmac_idents[i]; + + if (SCTP_AUTH_HMAC_ID_SHA1 == id) + has_sha1 = 1; + + if (!sctp_hmac_list[id].hmac_name) + return -EOPNOTSUPP; + } + + if (!has_sha1) + return -EINVAL; + + memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], + hmacs->shmac_num_idents * sizeof(__u16)); + ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + + hmacs->shmac_num_idents * sizeof(__u16)); + return 0; +} + +/* Set a new shared key on either endpoint or association. If the + * the key with a same ID already exists, replace the key (remove the + * old key and add a new one). + */ +int sctp_auth_set_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + struct sctp_authkey *auth_key) +{ + struct sctp_shared_key *cur_key = NULL; + struct sctp_auth_bytes *key; + struct list_head *sh_keys; + int replace = 0; + + /* Try to find the given key id to see if + * we are doing a replace, or adding a new key + */ + if (asoc) + sh_keys = &asoc->endpoint_shared_keys; + else + sh_keys = &ep->endpoint_shared_keys; + + key_for_each(cur_key, sh_keys) { + if (cur_key->key_id == auth_key->sca_keynumber) { + replace = 1; + break; + } + } + + /* If we are not replacing a key id, we need to allocate + * a shared key. + */ + if (!replace) { + cur_key = sctp_auth_shkey_create(auth_key->sca_keynumber, + GFP_KERNEL); + if (!cur_key) + return -ENOMEM; + } + + /* Create a new key data based on the info passed in */ + key = sctp_auth_create_key(auth_key->sca_keylen, GFP_KERNEL); + if (!key) + goto nomem; + + memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylen); + + /* If we are replacing, remove the old keys data from the + * key id. If we are adding new key id, add it to the + * list. + */ + if (replace) + sctp_auth_key_put(cur_key->key); + else + list_add(&cur_key->key_list, sh_keys); + + cur_key->key = key; + sctp_auth_key_hold(key); + + return 0; +nomem: + if (!replace) + sctp_auth_shkey_free(cur_key); + + return -ENOMEM; +} + +int sctp_auth_set_active_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key; + struct list_head *sh_keys; + int found = 0; + + /* The key identifier MUST correst to an existing key */ + if (asoc) + sh_keys = &asoc->endpoint_shared_keys; + else + sh_keys = &ep->endpoint_shared_keys; + + key_for_each(key, sh_keys) { + if (key->key_id == key_id) { + found = 1; + break; + } + } + + if (!found) + return -EINVAL; + + if (asoc) { + asoc->active_key_id = key_id; + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + } else + ep->active_key_id = key_id; + + return 0; +} + +int sctp_auth_del_key_id(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key; + struct list_head *sh_keys; + int found = 0; + + /* The key identifier MUST NOT be the current active key + * The key identifier MUST correst to an existing key + */ + if (asoc) { + if (asoc->active_key_id == key_id) + return -EINVAL; + + sh_keys = &asoc->endpoint_shared_keys; + } else { + if (ep->active_key_id == key_id) + return -EINVAL; + + sh_keys = &ep->endpoint_shared_keys; + } + + key_for_each(key, sh_keys) { + if (key->key_id == key_id) { + found = 1; + break; + } + } + + if (!found) + return -EINVAL; + + /* Delete the shared key */ + list_del_init(&key->key_list); + sctp_auth_shkey_free(key); + + return 0; +} diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 77fb7b06a9c..619d0f2dee5 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -194,6 +194,18 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, max = asoc->frag_point; + /* If the the peer requested that we authenticate DATA chunks + * we need to accound for bundling of the AUTH chunks along with + * DATA. + */ + if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) { + struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); + + if (hmac_desc) + max -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + + hmac_desc->hmac_len); + } + whole = 0; first_len = max; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 8f485a0d14b..2d2d81ef4a6 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -69,12 +69,56 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, gfp_t gfp) { + struct sctp_hmac_algo_param *auth_hmacs = NULL; + struct sctp_chunks_param *auth_chunks = NULL; + struct sctp_shared_key *null_key; + int err; + memset(ep, 0, sizeof(struct sctp_endpoint)); ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); if (!ep->digest) return NULL; + if (sctp_auth_enable) { + /* Allocate space for HMACS and CHUNKS authentication + * variables. There are arrays that we encode directly + * into parameters to make the rest of the operations easier. + */ + auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) + + sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); + if (!auth_hmacs) + goto nomem; + + auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) + + SCTP_NUM_CHUNK_TYPES, gfp); + if (!auth_chunks) + goto nomem; + + /* Initialize the HMACS parameter. + * SCTP-AUTH: Section 3.3 + * Every endpoint supporting SCTP chunk authentication MUST + * support the HMAC based on the SHA-1 algorithm. + */ + auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO; + auth_hmacs->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); + auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1); + + /* Initialize the CHUNKS parameter */ + auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; + + /* If the Add-IP functionality is enabled, we must + * authenticate, ASCONF and ASCONF-ACK chunks + */ + if (sctp_addip_enable) { + auth_chunks->chunks[0] = SCTP_CID_ASCONF; + auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; + auth_chunks->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); + } + } + /* Initialize the base structure. */ /* What type of endpoint are we? */ ep->base.type = SCTP_EP_TYPE_SOCKET; @@ -102,6 +146,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Use SCTP specific send buffer space queues. */ ep->sndbuf_policy = sctp_sndbuf_policy; + sk->sk_write_space = sctp_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); @@ -113,7 +158,36 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, ep->last_key = ep->current_key = 0; ep->key_changed_at = jiffies; + /* SCTP-AUTH extensions*/ + INIT_LIST_HEAD(&ep->endpoint_shared_keys); + null_key = sctp_auth_shkey_create(0, GFP_KERNEL); + if (!null_key) + goto nomem; + + list_add(&null_key->key_list, &ep->endpoint_shared_keys); + + /* Allocate and initialize transorms arrays for suported HMACs. */ + err = sctp_auth_init_hmacs(ep, gfp); + if (err) + goto nomem_hmacs; + + /* Add the null key to the endpoint shared keys list and + * set the hmcas and chunks pointers. + */ + ep->auth_hmacs_list = auth_hmacs; + ep->auth_chunk_list = auth_chunks; + return ep; + +nomem_hmacs: + sctp_auth_destroy_keys(&ep->endpoint_shared_keys); +nomem: + /* Free all allocations */ + kfree(auth_hmacs); + kfree(auth_chunks); + kfree(ep->digest); + return NULL; + } /* Create a sctp_endpoint with all that boring stuff initialized. @@ -186,6 +260,16 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) /* Free the digest buffer */ kfree(ep->digest); + /* SCTP-AUTH: Free up AUTH releated data such as shared keys + * chunks and hmacs arrays that were allocated + */ + sctp_auth_destroy_keys(&ep->endpoint_shared_keys); + kfree(ep->auth_hmacs_list); + kfree(ep->auth_chunk_list); + + /* AUTH - Free any allocated HMAC transform containers */ + sctp_auth_destroy_hmacs(ep->auth_hmacs); + /* Cleanup. */ sctp_inq_free(&ep->base.inqueue); sctp_bind_addr_free(&ep->base.bind_addr); @@ -316,6 +400,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) sctp_subtype_t subtype; sctp_state_t state; int error = 0; + int first_time = 1; /* is this the first time through the looop */ if (ep->base.dead) return; @@ -327,6 +412,29 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) while (NULL != (chunk = sctp_inq_pop(inqueue))) { subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + /* If the first chunk in the packet is AUTH, do special + * processing specified in Section 6.3 of SCTP-AUTH spec + */ + if (first_time && (subtype.chunk == SCTP_CID_AUTH)) { + struct sctp_chunkhdr *next_hdr; + + next_hdr = sctp_inq_peek(inqueue); + if (!next_hdr) + goto normal; + + /* If the next chunk is COOKIE-ECHO, skip the AUTH + * chunk while saving a pointer to it so we can do + * Authentication later (during cookie-echo + * processing). + */ + if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { + chunk->auth_chunk = skb_clone(chunk->skb, + GFP_ATOMIC); + chunk->auth = 1; + continue; + } + } +normal: /* We might have grown an association since last we * looked, so try again. * @@ -342,6 +450,8 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) } state = asoc ? asoc->state : SCTP_STATE_CLOSED; + if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) + continue; /* Remember where the last DATA chunk came from so we * know where to send the SACK. @@ -365,5 +475,8 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) */ if (!sctp_sk(sk)->ep) break; + + if (first_time) + first_time = 0; } } diff --git a/net/sctp/input.c b/net/sctp/input.c index f9a0c9276e3..86503e7fa21 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -911,15 +911,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, ch = (sctp_chunkhdr_t *) skb->data; - /* If this is INIT/INIT-ACK look inside the chunk too. */ - switch (ch->type) { - case SCTP_CID_INIT: - case SCTP_CID_INIT_ACK: - break; - default: - return NULL; - } - /* The code below will attempt to walk the chunk and extract * parameter information. Before we do that, we need to verify * that the chunk length doesn't cause overflow. Otherwise, we'll @@ -964,6 +955,60 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, return NULL; } +/* SCTP-AUTH, Section 6.3: +* If the receiver does not find a STCB for a packet containing an AUTH +* chunk as the first chunk and not a COOKIE-ECHO chunk as the second +* chunk, it MUST use the chunks after the AUTH chunk to look up an existing +* association. +* +* This means that any chunks that can help us identify the association need +* to be looked at to find this assocation. +* +* TODO: The only chunk currently defined that can do that is ASCONF, but we +* don't support that functionality yet. +*/ +static struct sctp_association *__sctp_rcv_auth_lookup(struct sk_buff *skb, + const union sctp_addr *paddr, + const union sctp_addr *laddr, + struct sctp_transport **transportp) +{ + /* XXX - walk through the chunks looking for something that can + * help us find the association. INIT, and INIT-ACK are not permitted. + * That leaves ASCONF, but we don't support that yet. + */ + return NULL; +} + +/* + * There are circumstances when we need to look inside the SCTP packet + * for information to help us find the association. Examples + * include looking inside of INIT/INIT-ACK chunks or after the AUTH + * chunks. + */ +static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb, + const union sctp_addr *paddr, + const union sctp_addr *laddr, + struct sctp_transport **transportp) +{ + sctp_chunkhdr_t *ch; + + ch = (sctp_chunkhdr_t *) skb->data; + + /* If this is INIT/INIT-ACK look inside the chunk too. */ + switch (ch->type) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + return __sctp_rcv_init_lookup(skb, laddr, transportp); + break; + + case SCTP_CID_AUTH: + return __sctp_rcv_auth_lookup(skb, paddr, laddr, transportp); + break; + } + + return NULL; +} + /* Lookup an association for an inbound skb. */ static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, const union sctp_addr *paddr, @@ -979,7 +1024,7 @@ static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, * parameters within the INIT or INIT-ACK. */ if (!asoc) - asoc = __sctp_rcv_init_lookup(skb, laddr, transportp); + asoc = __sctp_rcv_lookup_harder(skb, paddr, laddr, transportp); return asoc; } diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index e4ea7fdf36e..f10fe7fbf24 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -100,6 +100,25 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) q->immediate.func(&q->immediate); } +/* Peek at the next chunk on the inqeue. */ +struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) +{ + struct sctp_chunk *chunk; + sctp_chunkhdr_t *ch = NULL; + + chunk = queue->in_progress; + /* If there is no more chunks in this packet, say so */ + if (chunk->singleton || + chunk->end_of_packet || + chunk->pdiscard) + return NULL; + + ch = (sctp_chunkhdr_t *)chunk->chunk_end; + + return ch; +} + + /* Extract a chunk from an SCTP inqueue. * * WARNING: If you need to put the chunk on another queue, you need to diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 670fd2740b8..9de3ddaa276 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -493,7 +493,7 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, if (addr1->sa.sa_family != addr2->sa.sa_family) { if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET6 && - IPV6_ADDR_MAPPED == ipv6_addr_type(&addr2->v6.sin6_addr)) { + ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { if (addr2->v6.sin6_port == addr1->v4.sin_port && addr2->v6.sin6_addr.s6_addr32[3] == addr1->v4.sin_addr.s_addr) @@ -501,7 +501,7 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, } if (addr2->sa.sa_family == AF_INET && addr1->sa.sa_family == AF_INET6 && - IPV6_ADDR_MAPPED == ipv6_addr_type(&addr1->v6.sin6_addr)) { + ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { if (addr1->v6.sin6_port == addr2->v4.sin_port && addr1->v6.sin6_addr.s6_addr32[3] == addr2->v4.sin_addr.s_addr) @@ -631,7 +631,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct sctp6_sock *newsctp6sk; - newsk = sk_alloc(PF_INET6, GFP_KERNEL, sk->sk_prot, 1); + newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot, 1); if (!newsk) goto out; @@ -855,7 +855,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) if (type & IPV6_ADDR_LINKLOCAL) { if (!addr->v6.sin6_scope_id) return 0; - dev = dev_get_by_index(addr->v6.sin6_scope_id); + dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); if (!dev) return 0; if (!ipv6_chk_addr(&addr->v6.sin6_addr, dev, 0)) { @@ -886,7 +886,7 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) if (type & IPV6_ADDR_LINKLOCAL) { if (!addr->v6.sin6_scope_id) return 0; - dev = dev_get_by_index(addr->v6.sin6_scope_id); + dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); if (!dev) return 0; dev_put(dev); diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c index fcfb9d806de..2cf6ad6ff8c 100644 --- a/net/sctp/objcnt.c +++ b/net/sctp/objcnt.c @@ -58,6 +58,7 @@ SCTP_DBG_OBJCNT(chunk); SCTP_DBG_OBJCNT(addr); SCTP_DBG_OBJCNT(ssnmap); SCTP_DBG_OBJCNT(datamsg); +SCTP_DBG_OBJCNT(keys); /* An array to make it easy to pretty print the debug information * to the proc fs. @@ -73,6 +74,7 @@ static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = { SCTP_DBG_OBJCNT_ENTRY(addr), SCTP_DBG_OBJCNT_ENTRY(ssnmap), SCTP_DBG_OBJCNT_ENTRY(datamsg), + SCTP_DBG_OBJCNT_ENTRY(keys), }; /* Callback from procfs to read out objcount information. diff --git a/net/sctp/output.c b/net/sctp/output.c index d85543def75..847639d542c 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -79,7 +79,10 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, packet->vtag = vtag; packet->has_cookie_echo = 0; packet->has_sack = 0; + packet->has_auth = 0; + packet->has_data = 0; packet->ipfragok = 0; + packet->auth = NULL; if (ecn_capable && sctp_packet_empty(packet)) { chunk = sctp_get_ecne_prepend(packet->transport->asoc); @@ -121,8 +124,11 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, packet->vtag = 0; packet->has_cookie_echo = 0; packet->has_sack = 0; + packet->has_auth = 0; + packet->has_data = 0; packet->ipfragok = 0; packet->malloced = 0; + packet->auth = NULL; return packet; } @@ -181,6 +187,39 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, return retval; } +/* Try to bundle an auth chunk into the packet. */ +static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, + struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = pkt->transport->asoc; + struct sctp_chunk *auth; + sctp_xmit_t retval = SCTP_XMIT_OK; + + /* if we don't have an association, we can't do authentication */ + if (!asoc) + return retval; + + /* See if this is an auth chunk we are bundling or if + * auth is already bundled. + */ + if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->auth) + return retval; + + /* if the peer did not request this chunk to be authenticated, + * don't do it + */ + if (!chunk->auth) + return retval; + + auth = sctp_make_auth(asoc); + if (!auth) + return retval; + + retval = sctp_packet_append_chunk(pkt, auth); + + return retval; +} + /* Try to bundle a SACK with the packet. */ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, struct sctp_chunk *chunk) @@ -227,12 +266,17 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet, chunk); - retval = sctp_packet_bundle_sack(packet, chunk); - psize = packet->size; + /* Try to bundle AUTH chunk */ + retval = sctp_packet_bundle_auth(packet, chunk); + if (retval != SCTP_XMIT_OK) + goto finish; + /* Try to bundle SACK chunk */ + retval = sctp_packet_bundle_sack(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; + psize = packet->size; pmtu = ((packet->transport->asoc) ? (packet->transport->asoc->pathmtu) : (packet->transport->pathmtu)); @@ -241,10 +285,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, /* Decide if we need to fragment or resubmit later. */ if (too_big) { - /* Both control chunks and data chunks with TSNs are - * non-fragmentable. + /* It's OK to fragmet at IP level if any one of the following + * is true: + * 1. The packet is empty (meaning this chunk is greater + * the MTU) + * 2. The chunk we are adding is a control chunk + * 3. The packet doesn't have any data in it yet and data + * requires authentication. */ - if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk)) { + if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || + (!packet->has_data && chunk->auth)) { /* We no longer do re-fragmentation. * Just fragment at the IP layer, if we * actually hit this condition @@ -266,16 +316,31 @@ append: /* DATA is a special case since we must examine both rwnd and cwnd * before we send DATA. */ - if (sctp_chunk_is_data(chunk)) { + switch (chunk->chunk_hdr->type) { + case SCTP_CID_DATA: retval = sctp_packet_append_data(packet, chunk); /* Disallow SACK bundling after DATA. */ packet->has_sack = 1; + /* Disallow AUTH bundling after DATA */ + packet->has_auth = 1; + /* Let it be knows that packet has DATA in it */ + packet->has_data = 1; if (SCTP_XMIT_OK != retval) goto finish; - } else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type) + break; + case SCTP_CID_COOKIE_ECHO: packet->has_cookie_echo = 1; - else if (SCTP_CID_SACK == chunk->chunk_hdr->type) + break; + + case SCTP_CID_SACK: packet->has_sack = 1; + break; + + case SCTP_CID_AUTH: + packet->has_auth = 1; + packet->auth = chunk; + break; + } /* It is OK to send this chunk. */ list_add_tail(&chunk->list, &packet->chunk_list); @@ -303,6 +368,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) int padding; /* How much padding do we need? */ __u8 has_data = 0; struct dst_entry *dst = tp->dst; + unsigned char *auth = NULL; /* pointer to auth in skb data */ + __u32 cksum_buf_len = sizeof(struct sctphdr); SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); @@ -356,16 +423,6 @@ int sctp_packet_transmit(struct sctp_packet *packet) sh->vtag = htonl(packet->vtag); sh->checksum = 0; - /* 2) Calculate the Adler-32 checksum of the whole packet, - * including the SCTP common header and all the - * chunks. - * - * Note: Adler-32 is no longer applicable, as has been replaced - * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. - */ - if (!(dst->dev->features & NETIF_F_NO_CSUM)) - crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr)); - /** * 6.10 Bundling * @@ -416,14 +473,16 @@ int sctp_packet_transmit(struct sctp_packet *packet) if (padding) memset(skb_put(chunk->skb, padding), 0, padding); - if (dst->dev->features & NETIF_F_NO_CSUM) - memcpy(skb_put(nskb, chunk->skb->len), + /* if this is the auth chunk that we are adding, + * store pointer where it will be added and put + * the auth into the packet. + */ + if (chunk == packet->auth) + auth = skb_tail_pointer(nskb); + + cksum_buf_len += chunk->skb->len; + memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data, chunk->skb->len); - else - crc32 = sctp_update_copy_cksum(skb_put(nskb, - chunk->skb->len), - chunk->skb->data, - chunk->skb->len, crc32); SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", "*** Chunk", chunk, @@ -445,9 +504,31 @@ int sctp_packet_transmit(struct sctp_packet *packet) sctp_chunk_free(chunk); } - /* Perform final transformation on checksum. */ - if (!(dst->dev->features & NETIF_F_NO_CSUM)) + /* SCTP-AUTH, Section 6.2 + * The sender MUST calculate the MAC as described in RFC2104 [2] + * using the hash function H as described by the MAC Identifier and + * the shared association key K based on the endpoint pair shared key + * described by the shared key identifier. The 'data' used for the + * computation of the AUTH-chunk is given by the AUTH chunk with its + * HMAC field set to zero (as shown in Figure 6) followed by all + * chunks that are placed after the AUTH chunk in the SCTP packet. + */ + if (auth) + sctp_auth_calculate_hmac(asoc, nskb, + (struct sctp_auth_chunk *)auth, + GFP_ATOMIC); + + /* 2) Calculate the Adler-32 checksum of the whole packet, + * including the SCTP common header and all the + * chunks. + * + * Note: Adler-32 is no longer applicable, as has been replaced + * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. + */ + if (!(dst->dev->features & NETIF_F_NO_CSUM)) { + crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); crc32 = sctp_end_cksum(crc32); + } /* 3) Put the resultant value into the checksum field in the * common header, and leave the rest of the bits unchanged. diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 3d036cdfae4..81b26c5ffd4 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -51,6 +51,8 @@ #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/seq_file.h> +#include <linux/bootmem.h> +#include <net/net_namespace.h> #include <net/protocol.h> #include <net/ip.h> #include <net/ipv6.h> @@ -82,6 +84,10 @@ static struct sctp_af *sctp_af_v6_specific; struct kmem_cache *sctp_chunk_cachep __read_mostly; struct kmem_cache *sctp_bucket_cachep __read_mostly; +int sysctl_sctp_mem[3]; +int sysctl_sctp_rmem[3]; +int sysctl_sctp_wmem[3]; + /* Return the address of the control sock. */ struct sock *sctp_get_ctl_sock(void) { @@ -93,7 +99,7 @@ static __init int sctp_proc_init(void) { if (!proc_net_sctp) { struct proc_dir_entry *ent; - ent = proc_mkdir("net/sctp", NULL); + ent = proc_mkdir("sctp", init_net.proc_net); if (ent) { ent->owner = THIS_MODULE; proc_net_sctp = ent; @@ -126,7 +132,7 @@ static void sctp_proc_exit(void) if (proc_net_sctp) { proc_net_sctp = NULL; - remove_proc_entry("net/sctp", NULL); + remove_proc_entry("sctp", init_net.proc_net); } } @@ -173,7 +179,7 @@ static void sctp_get_local_addr_list(void) struct sctp_af *af; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { __list_for_each(pos, &sctp_address_families) { af = list_entry(pos, struct sctp_af, list); af->copy_addrlist(&sctp_local_addr_list, dev); @@ -546,7 +552,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk, { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; - struct sock *newsk = sk_alloc(PF_INET, GFP_KERNEL, sk->sk_prot, 1); + struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1); if (!newsk) goto out; @@ -987,6 +993,8 @@ SCTP_STATIC __init int sctp_init(void) int i; int status = -EINVAL; unsigned long goal; + unsigned long limit; + int max_share; int order; /* SCTP_DEBUG sanity check. */ @@ -1077,6 +1085,31 @@ SCTP_STATIC __init int sctp_init(void) /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); + /* Set the pressure threshold to be a fraction of global memory that + * is up to 1/2 at 256 MB, decreasing toward zero with the amount of + * memory, with a floor of 128 pages. + * Note this initalizes the data in sctpv6_prot too + * Unabashedly stolen from tcp_init + */ + limit = min(num_physpages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); + limit = (limit * (num_physpages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); + limit = max(limit, 128UL); + sysctl_sctp_mem[0] = limit / 4 * 3; + sysctl_sctp_mem[1] = limit; + sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; + + /* Set per-socket limits to no more than 1/128 the pressure threshold*/ + limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); + max_share = min(4UL*1024*1024, limit); + + sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */ + sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); + sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); + + sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM; + sysctl_sctp_wmem[1] = 16*1024; + sysctl_sctp_wmem[2] = max(64*1024, max_share); + /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ @@ -1139,9 +1172,6 @@ SCTP_STATIC __init int sctp_init(void) sctp_port_hashtable[i].chain = NULL; } - spin_lock_init(&sctp_port_alloc_lock); - sctp_port_rover = sysctl_local_port_range[0] - 1; - printk(KERN_INFO "SCTP: Hash tables configured " "(established %d bind %d)\n", sctp_assoc_hashsize, sctp_port_hashsize); @@ -1152,6 +1182,9 @@ SCTP_STATIC __init int sctp_init(void) /* Enable PR-SCTP by default. */ sctp_prsctp_enable = 1; + /* Disable AUTH by default. */ + sctp_auth_enable = 0; + sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 23ae37ec871..f983a369d4e 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -179,6 +179,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, sctp_supported_addrs_param_t sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; + sctp_supported_ext_param_t ext_param; + int num_ext = 0; + __u8 extensions[3]; + sctp_paramhdr_t *auth_chunks = NULL, + *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * @@ -202,11 +207,52 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); chunksize += sizeof(ecap_param); - if (sctp_prsctp_enable) + if (sctp_prsctp_enable) { chunksize += sizeof(prsctp_param); + extensions[num_ext] = SCTP_CID_FWD_TSN; + num_ext += 1; + } + /* ADDIP: Section 4.2.7: + * An implementation supporting this extension [ADDIP] MUST list + * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and + * INIT-ACK parameters. + */ + if (sctp_addip_enable) { + extensions[num_ext] = SCTP_CID_ASCONF; + extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; + num_ext += 2; + } + chunksize += sizeof(aiparam); chunksize += vparam_len; + /* Account for AUTH related parameters */ + if (sctp_auth_enable) { + /* Add random parameter length*/ + chunksize += sizeof(asoc->c.auth_random); + + /* Add HMACS parameter length if any were defined */ + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; + if (auth_hmacs->length) + chunksize += ntohs(auth_hmacs->length); + else + auth_hmacs = NULL; + + /* Add CHUNKS parameter length */ + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; + if (auth_chunks->length) + chunksize += ntohs(auth_chunks->length); + else + auth_hmacs = NULL; + + extensions[num_ext] = SCTP_CID_AUTH; + num_ext += 1; + } + + /* If we have any extensions to report, account for that */ + if (num_ext) + chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; + /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 3: An INIT chunk MUST NOT contain more than one Host @@ -241,12 +287,38 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); + + /* Add the supported extensions paramter. Be nice and add this + * fist before addiding the parameters for the extensions themselves + */ + if (num_ext) { + ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; + ext_param.param_hdr.length = + htons(sizeof(sctp_supported_ext_param_t) + num_ext); + sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), + &ext_param); + sctp_addto_chunk(retval, num_ext, extensions); + } + if (sctp_prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); + aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + + /* Add SCTP-AUTH chunks to the parameter list */ + if (sctp_auth_enable) { + sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), + asoc->c.auth_random); + if (auth_hmacs) + sctp_addto_chunk(retval, ntohs(auth_hmacs->length), + auth_hmacs); + if (auth_chunks) + sctp_addto_chunk(retval, ntohs(auth_chunks->length), + auth_chunks); + } nodata: kfree(addrs.v); return retval; @@ -264,6 +336,12 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, int cookie_len; size_t chunksize; sctp_adaptation_ind_param_t aiparam; + sctp_supported_ext_param_t ext_param; + int num_ext = 0; + __u8 extensions[3]; + sctp_paramhdr_t *auth_chunks = NULL, + *auth_hmacs = NULL, + *auth_random = NULL; retval = NULL; @@ -294,11 +372,41 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, chunksize += sizeof(ecap_param); /* Tell peer that we'll do PR-SCTP only if peer advertised. */ - if (asoc->peer.prsctp_capable) + if (asoc->peer.prsctp_capable) { chunksize += sizeof(prsctp_param); + extensions[num_ext] = SCTP_CID_FWD_TSN; + num_ext += 1; + } + + if (sctp_addip_enable) { + extensions[num_ext] = SCTP_CID_ASCONF; + extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; + num_ext += 2; + } + chunksize += sizeof(ext_param) + num_ext; chunksize += sizeof(aiparam); + if (asoc->peer.auth_capable) { + auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; + chunksize += ntohs(auth_random->length); + + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; + if (auth_hmacs->length) + chunksize += ntohs(auth_hmacs->length); + else + auth_hmacs = NULL; + + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; + if (auth_chunks->length) + chunksize += ntohs(auth_chunks->length); + else + auth_chunks = NULL; + + extensions[num_ext] = SCTP_CID_AUTH; + num_ext += 1; + } + /* Now allocate and fill out the chunk. */ retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) @@ -314,6 +422,14 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, sctp_addto_chunk(retval, cookie_len, cookie); if (asoc->peer.ecn_capable) sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); + if (num_ext) { + ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; + ext_param.param_hdr.length = + htons(sizeof(sctp_supported_ext_param_t) + num_ext); + sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), + &ext_param); + sctp_addto_chunk(retval, num_ext, extensions); + } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); @@ -322,6 +438,17 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, aiparam.adaptation_ind = htonl(sctp_sk(asoc->base.sk)->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + if (asoc->peer.auth_capable) { + sctp_addto_chunk(retval, ntohs(auth_random->length), + auth_random); + if (auth_hmacs) + sctp_addto_chunk(retval, ntohs(auth_hmacs->length), + auth_hmacs); + if (auth_chunks) + sctp_addto_chunk(retval, ntohs(auth_chunks->length), + auth_chunks); + } + /* We need to remove the const qualifier at this point. */ retval->asoc = (struct sctp_association *) asoc; @@ -839,6 +966,26 @@ err_chunk: return retval; } +/* Append bytes to the end of a parameter. Will panic if chunk is not big + * enough. + */ +static void *sctp_addto_param(struct sctp_chunk *chunk, int len, + const void *data) +{ + void *target; + int chunklen = ntohs(chunk->chunk_hdr->length); + + target = skb_put(chunk->skb, len); + + memcpy(target, data, len); + + /* Adjust the chunk length field. */ + chunk->chunk_hdr->length = htons(chunklen + len); + chunk->chunk_end = skb_tail_pointer(chunk->skb); + + return target; +} + /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, @@ -964,6 +1111,41 @@ nodata: return retval; } +struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) +{ + struct sctp_chunk *retval; + struct sctp_hmac *hmac_desc; + struct sctp_authhdr auth_hdr; + __u8 *hmac; + + /* Get the first hmac that the peer told us to use */ + hmac_desc = sctp_auth_asoc_get_hmac(asoc); + if (unlikely(!hmac_desc)) + return NULL; + + retval = sctp_make_chunk(asoc, SCTP_CID_AUTH, 0, + hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); + if (!retval) + return NULL; + + auth_hdr.hmac_id = htons(hmac_desc->hmac_id); + auth_hdr.shkey_id = htons(asoc->active_key_id); + + retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), + &auth_hdr); + + hmac = skb_put(retval->skb, hmac_desc->hmac_len); + memset(hmac, 0, hmac_desc->hmac_len); + + /* Adjust the chunk header to include the empty MAC */ + retval->chunk_hdr->length = + htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); + retval->chunk_end = skb_tail_pointer(retval->skb); + + return retval; +} + + /******************************************************************** * 2nd Level Abstractions ********************************************************************/ @@ -1078,6 +1260,10 @@ struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, retval->chunk_hdr = chunk_hdr; retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); + /* Determine if the chunk needs to be authenticated */ + if (sctp_auth_send_cid(type, asoc)) + retval->auth = 1; + /* Set the skb to the belonging sock for accounting. */ skb->sk = sk; @@ -1146,25 +1332,6 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) return target; } -/* Append bytes to the end of a parameter. Will panic if chunk is not big - * enough. - */ -void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) -{ - void *target; - int chunklen = ntohs(chunk->chunk_hdr->length); - - target = skb_put(chunk->skb, len); - - memcpy(target, data, len); - - /* Adjust the chunk length field. */ - chunk->chunk_hdr->length = htons(chunklen + len); - chunk->chunk_end = skb_tail_pointer(chunk->skb); - - return target; -} - /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. @@ -1663,6 +1830,35 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, return 0; } +static void sctp_process_ext_param(struct sctp_association *asoc, + union sctp_params param) +{ + __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); + int i; + + for (i = 0; i < num_ext; i++) { + switch (param.ext->chunks[i]) { + case SCTP_CID_FWD_TSN: + if (sctp_prsctp_enable && + !asoc->peer.prsctp_capable) + asoc->peer.prsctp_capable = 1; + break; + case SCTP_CID_AUTH: + /* if the peer reports AUTH, assume that he + * supports AUTH. + */ + asoc->peer.auth_capable = 1; + break; + case SCTP_CID_ASCONF: + case SCTP_CID_ASCONF_ACK: + asoc->peer.addip_capable = 1; + break; + default: + break; + } + } +} + /* RFC 3.2.1 & the Implementers Guide 2.2. * * The Parameter Types are encoded such that the @@ -1779,15 +1975,52 @@ static int sctp_verify_param(const struct sctp_association *asoc, case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: case SCTP_PARAM_ECN_CAPABLE: case SCTP_PARAM_ADAPTATION_LAYER_IND: + case SCTP_PARAM_SUPPORTED_EXT: break; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ return sctp_process_hn_param(asoc, param, chunk, err_chunk); + case SCTP_PARAM_FWD_TSN_SUPPORT: if (sctp_prsctp_enable) break; + goto fallthrough; + + case SCTP_PARAM_RANDOM: + if (!sctp_auth_enable) + goto fallthrough; + + /* SCTP-AUTH: Secion 6.1 + * If the random number is not 32 byte long the association + * MUST be aborted. The ABORT chunk SHOULD contain the error + * cause 'Protocol Violation'. + */ + if (SCTP_AUTH_RANDOM_LENGTH != + ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) + return sctp_process_inv_paramlength(asoc, param.p, + chunk, err_chunk); + break; + + case SCTP_PARAM_CHUNKS: + if (!sctp_auth_enable) + goto fallthrough; + + /* SCTP-AUTH: Section 3.2 + * The CHUNKS parameter MUST be included once in the INIT or + * INIT-ACK chunk if the sender wants to receive authenticated + * chunks. Its maximum length is 260 bytes. + */ + if (260 < ntohs(param.p->length)) + return sctp_process_inv_paramlength(asoc, param.p, + chunk, err_chunk); + break; + + case SCTP_PARAM_HMAC_ALGO: + if (!sctp_auth_enable) + break; /* Fall Through */ +fallthrough: default: SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", ntohs(param.p->type), cid); @@ -1892,13 +2125,29 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, } /* Process the initialization parameters. */ - sctp_walk_params(param, peer_init, init_hdr.params) { if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } + /* AUTH: After processing the parameters, make sure that we + * have all the required info to potentially do authentications. + */ + if (asoc->peer.auth_capable && (!asoc->peer.peer_random || + !asoc->peer.peer_hmacs)) + asoc->peer.auth_capable = 0; + + + /* If the peer claims support for ADD-IP without support + * for AUTH, disable support for ADD-IP. + */ + if (asoc->peer.addip_capable && !asoc->peer.auth_capable) { + asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | + SCTP_PARAM_DEL_IP | + SCTP_PARAM_SET_PRIMARY); + } + /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); @@ -2128,12 +2377,57 @@ static int sctp_process_param(struct sctp_association *asoc, asoc->peer.adaptation_ind = param.aind->adaptation_ind; break; + case SCTP_PARAM_SUPPORTED_EXT: + sctp_process_ext_param(asoc, param); + break; + case SCTP_PARAM_FWD_TSN_SUPPORT: if (sctp_prsctp_enable) { asoc->peer.prsctp_capable = 1; break; } /* Fall Through */ + goto fall_through; + + case SCTP_PARAM_RANDOM: + if (!sctp_auth_enable) + goto fall_through; + + /* Save peer's random parameter */ + asoc->peer.peer_random = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_random) { + retval = 0; + break; + } + break; + + case SCTP_PARAM_HMAC_ALGO: + if (!sctp_auth_enable) + goto fall_through; + + /* Save peer's HMAC list */ + asoc->peer.peer_hmacs = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_hmacs) { + retval = 0; + break; + } + + /* Set the default HMAC the peer requested*/ + sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); + break; + + case SCTP_PARAM_CHUNKS: + if (!sctp_auth_enable) + goto fall_through; + + asoc->peer.peer_chunks = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_chunks) + retval = 0; + break; +fall_through: default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 8d789008349..bbdc938da86 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1524,6 +1524,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_cmd_adaptation_ind(commands, asoc); break; + case SCTP_CMD_ASSOC_SHKEY: + error = sctp_auth_asoc_init_active_key(asoc, + GFP_ATOMIC); + break; + default: printk(KERN_WARNING "Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index a583d67cab6..f01b408508f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -138,6 +138,11 @@ static sctp_disposition_t sctp_sf_violation_chunk( void *arg, sctp_cmd_seq_t *commands); +static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk); + /* Small helper function that checks if the chunk length * is of the appropriate length. The 'required_length' argument * is set to be the size of a specific chunk we are testing. @@ -495,8 +500,6 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, &err_chunk)) { - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes if there is any. */ @@ -521,6 +524,22 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); error = SCTP_ERROR_INV_PARAM; } + + /* SCTP-AUTH, Section 6.3: + * It should be noted that if the receiver wants to tear + * down an association in an authenticated way only, the + * handling of malformed packets should not result in + * tearing down the association. + * + * This means that if we only want to abort associations + * in an authenticated way (i.e AUTH+ABORT), then we + * can't destory this association just becuase the packet + * was malformed. + */ + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + + SCTP_INC_STATS(SCTP_MIB_ABORTEDS); return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc, chunk->transport); } @@ -549,6 +568,11 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_COOKIE_ECHOED)); + /* SCTP-AUTH: genereate the assocition shared keys so that + * we can potentially signe the COOKIE-ECHO. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL()); + /* 5.1 C) "A" shall then send the State Cookie received in the * INIT ACK chunk in a COOKIE ECHO chunk, ... */ @@ -686,6 +710,44 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, peer_init, GFP_ATOMIC)) goto nomem_init; + /* SCTP-AUTH: Now that we've populate required fields in + * sctp_process_init, set up the assocaition shared keys as + * necessary so that we can potentially authenticate the ACK + */ + error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC); + if (error) + goto nomem_init; + + /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo + * is supposed to be authenticated and we have to do delayed + * authentication. We've just recreated the association using + * the information in the cookie and now it's much easier to + * do the authentication. + */ + if (chunk->auth_chunk) { + struct sctp_chunk auth; + sctp_ierror_t ret; + + /* set-up our fake chunk so that we can process it */ + auth.skb = chunk->auth_chunk; + auth.asoc = chunk->asoc; + auth.sctp_hdr = chunk->sctp_hdr; + auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk, + sizeof(sctp_chunkhdr_t)); + skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t)); + auth.transport = chunk->transport; + + ret = sctp_sf_authenticate(ep, new_asoc, type, &auth); + + /* We can now safely free the auth_chunk clone */ + kfree_skb(chunk->auth_chunk); + + if (ret != SCTP_IERROR_NO_ERROR) { + sctp_association_free(new_asoc); + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + } + } + repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem_init; @@ -1247,6 +1309,26 @@ static void sctp_tietags_populate(struct sctp_association *new_asoc, new_asoc->c.initial_tsn = asoc->c.initial_tsn; } +static void sctp_auth_params_populate(struct sctp_association *new_asoc, + const struct sctp_association *asoc) +{ + /* Only perform this if AUTH extension is enabled */ + if (!sctp_auth_enable) + return; + + /* We need to provide the same parameter information as + * was in the original INIT. This means that we need to copy + * the HMACS, CHUNKS, and RANDOM parameter from the original + * assocaition. + */ + memcpy(new_asoc->c.auth_random, asoc->c.auth_random, + sizeof(asoc->c.auth_random)); + memcpy(new_asoc->c.auth_hmacs, asoc->c.auth_hmacs, + sizeof(asoc->c.auth_hmacs)); + memcpy(new_asoc->c.auth_chunks, asoc->c.auth_chunks, + sizeof(asoc->c.auth_chunks)); +} + /* * Compare vtag/tietag values to determine unexpected COOKIE-ECHO * handling action. @@ -1404,6 +1486,8 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( sctp_tietags_populate(new_asoc, asoc); + sctp_auth_params_populate(new_asoc, asoc); + /* B) "Z" shall respond immediately with an INIT ACK chunk. */ /* If there are errors need to be reported for unknown parameters, @@ -3618,6 +3702,169 @@ gen_shutdown: } /* + * SCTP-AUTH Section 6.3 Receving authenticated chukns + * + * The receiver MUST use the HMAC algorithm indicated in the HMAC + * Identifier field. If this algorithm was not specified by the + * receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk + * during association setup, the AUTH chunk and all chunks after it MUST + * be discarded and an ERROR chunk SHOULD be sent with the error cause + * defined in Section 4.1. + * + * If an endpoint with no shared key receives a Shared Key Identifier + * other than 0, it MUST silently discard all authenticated chunks. If + * the endpoint has at least one endpoint pair shared key for the peer, + * it MUST use the key specified by the Shared Key Identifier if a + * key has been configured for that Shared Key Identifier. If no + * endpoint pair shared key has been configured for that Shared Key + * Identifier, all authenticated chunks MUST be silently discarded. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * The return value is the disposition of the chunk. + */ +static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk) +{ + struct sctp_authhdr *auth_hdr; + struct sctp_hmac *hmac; + unsigned int sig_len; + __u16 key_id; + __u8 *save_digest; + __u8 *digest; + + /* Pull in the auth header, so we can do some more verification */ + auth_hdr = (struct sctp_authhdr *)chunk->skb->data; + chunk->subh.auth_hdr = auth_hdr; + skb_pull(chunk->skb, sizeof(struct sctp_authhdr)); + + /* Make sure that we suport the HMAC algorithm from the auth + * chunk. + */ + if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id)) + return SCTP_IERROR_AUTH_BAD_HMAC; + + /* Make sure that the provided shared key identifier has been + * configured + */ + key_id = ntohs(auth_hdr->shkey_id); + if (key_id != asoc->active_key_id && !sctp_auth_get_shkey(asoc, key_id)) + return SCTP_IERROR_AUTH_BAD_KEYID; + + + /* Make sure that the length of the signature matches what + * we expect. + */ + sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t); + hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id)); + if (sig_len != hmac->hmac_len) + return SCTP_IERROR_PROTO_VIOLATION; + + /* Now that we've done validation checks, we can compute and + * verify the hmac. The steps involved are: + * 1. Save the digest from the chunk. + * 2. Zero out the digest in the chunk. + * 3. Compute the new digest + * 4. Compare saved and new digests. + */ + digest = auth_hdr->hmac; + skb_pull(chunk->skb, sig_len); + + save_digest = kmemdup(digest, sig_len, GFP_ATOMIC); + if (!save_digest) + goto nomem; + + memset(digest, 0, sig_len); + + sctp_auth_calculate_hmac(asoc, chunk->skb, + (struct sctp_auth_chunk *)chunk->chunk_hdr, + GFP_ATOMIC); + + /* Discard the packet if the digests do not match */ + if (memcmp(save_digest, digest, sig_len)) { + kfree(save_digest); + return SCTP_IERROR_BAD_SIG; + } + + kfree(save_digest); + chunk->auth = 1; + + return SCTP_IERROR_NO_ERROR; +nomem: + return SCTP_IERROR_NOMEM; +} + +sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_authhdr *auth_hdr; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *err_chunk; + sctp_ierror_t error; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + } + + /* Make sure that the AUTH chunk has valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk))) + return sctp_sf_violation_chunklen(ep, asoc, type, arg, + commands); + + auth_hdr = (struct sctp_authhdr *)chunk->skb->data; + error = sctp_sf_authenticate(ep, asoc, type, chunk); + switch (error) { + case SCTP_IERROR_AUTH_BAD_HMAC: + /* Generate the ERROR chunk and discard the rest + * of the packet + */ + err_chunk = sctp_make_op_error(asoc, chunk, + SCTP_ERROR_UNSUP_HMAC, + &auth_hdr->hmac_id, + sizeof(__u16)); + if (err_chunk) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err_chunk)); + } + /* Fall Through */ + case SCTP_IERROR_AUTH_BAD_KEYID: + case SCTP_IERROR_BAD_SIG: + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + break; + case SCTP_IERROR_PROTO_VIOLATION: + return sctp_sf_violation_chunklen(ep, asoc, type, arg, + commands); + break; + case SCTP_IERROR_NOMEM: + return SCTP_DISPOSITION_NOMEM; + default: + break; + } + + if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) { + struct sctp_ulpevent *ev; + + ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id), + SCTP_AUTH_NEWKEY, GFP_ATOMIC); + + if (!ev) + return -ENOMEM; + + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + } + + return SCTP_DISPOSITION_CONSUME; +} + +/* * Process an unknown chunk. * * Section: 3.2. Also, 2.1 in the implementor's guide. @@ -3822,6 +4069,20 @@ static sctp_disposition_t sctp_sf_abort_violation( if (!abort) goto nomem; + /* SCTP-AUTH, Section 6.3: + * It should be noted that if the receiver wants to tear + * down an association in an authenticated way only, the + * handling of malformed packets should not result in + * tearing down the association. + * + * This means that if we only want to abort associations + * in an authenticated way (i.e AUTH+ABORT), then we + * can't destory this association just becuase the packet + * was malformed. + */ + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + goto discard; + if (asoc) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); @@ -3859,6 +4120,7 @@ static sctp_disposition_t sctp_sf_abort_violation( SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); } +discard: sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); @@ -5428,10 +5690,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, sctp_verb_t deliver; int tmp; __u32 tsn; - int account_value; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sock *sk = asoc->base.sk; - int rcvbuf_over = 0; data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); @@ -5441,48 +5701,6 @@ static int sctp_eat_data(const struct sctp_association *asoc, /* ASSERT: Now skb->data is really the user data. */ - /* - * If we are established, and we have used up our receive buffer - * memory, think about droping the frame. - * Note that we have an opportunity to improve performance here. - * If we accept one chunk from an skbuff, we have to keep all the - * memory of that skbuff around until the chunk is read into user - * space. Therefore, once we accept 1 chunk we may as well accept all - * remaining chunks in the skbuff. The data_accepted flag helps us do - * that. - */ - if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) { - /* - * If the receive buffer policy is 1, then each - * association can allocate up to sk_rcvbuf bytes - * otherwise, all the associations in aggregate - * may allocate up to sk_rcvbuf bytes - */ - if (asoc->ep->rcvbuf_policy) - account_value = atomic_read(&asoc->rmem_alloc); - else - account_value = atomic_read(&sk->sk_rmem_alloc); - if (account_value > sk->sk_rcvbuf) { - /* - * We need to make forward progress, even when we are - * under memory pressure, so we always allow the - * next tsn after the ctsn ack point to be accepted. - * This lets us avoid deadlocks in which we have to - * drop frames that would otherwise let us drain the - * receive queue. - */ - if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn) - return SCTP_IERROR_IGNORE_TSN; - - /* - * We're going to accept the frame but we should renege - * to make space for it. This will send us down that - * path later in this function. - */ - rcvbuf_over = 1; - } - } - /* Process ECN based congestion. * * Since the chunk structure is reused for all chunks within @@ -5542,18 +5760,9 @@ static int sctp_eat_data(const struct sctp_association *asoc, * seems a bit troublesome in that frag_point varies based on * PMTU. In cases, such as loopback, this might be a rather * large spill over. - * NOTE: If we have a full receive buffer here, we only renege if - * our receiver can still make progress without the tsn being - * received. We do this because in the event that the associations - * receive queue is empty we are filling a leading gap, and since - * reneging moves the gap to the end of the tsn stream, we are likely - * to stall again very shortly. Avoiding the renege when we fill a - * leading gap is a good heuristic for avoiding such steady state - * stalls. - */ - if (!asoc->rwnd || asoc->rwnd_over || - (datalen > asoc->rwnd + asoc->frag_point) || - (rcvbuf_over && (!skb_queue_len(&sk->sk_receive_queue)))) { + */ + if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || + (datalen > asoc->rwnd + asoc->frag_point))) { /* If this is the next TSN, consider reneging to make * room. Note: Playing nice with a confused sender. A @@ -5574,6 +5783,21 @@ static int sctp_eat_data(const struct sctp_association *asoc, } /* + * Also try to renege to limit our memory usage in the event that + * we are under memory pressure + * If we can't renege, don't worry about it, the sk_stream_rmem_schedule + * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our + * memory usage too much + */ + if (*sk->sk_prot_creator->memory_pressure) { + if (sctp_tsnmap_has_gap(map) && + (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { + SCTP_DEBUG_PRINTK("Under Pressure! Reneging for tsn:%u\n", tsn); + deliver = SCTP_CMD_RENEGE; + } + } + + /* * Section 3.3.10.9 No User Data (9) * * Cause of error diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index ddb0ba3974b..a93a4bc8f68 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -523,6 +523,34 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN TYPE_SCTP_FWD_TSN, }; /*state_fn_t prsctp_chunk_event_table[][] */ +#define TYPE_SCTP_AUTH { \ + /* SCTP_STATE_EMPTY */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ +} /* TYPE_SCTP_AUTH */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_AUTH, +}; /*state_fn_t auth_chunk_event_table[][] */ + static const sctp_sm_table_entry_t chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { /* SCTP_STATE_EMPTY */ @@ -976,5 +1004,10 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, return &addip_chunk_event_table[1][state]; } + if (sctp_auth_enable) { + if (cid == SCTP_CID_AUTH) + return &auth_chunk_event_table[0][state]; + } + return &chunk_event_table_unknown[state]; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 772fbfb4bfd..9c6a4b5f626 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -107,23 +107,42 @@ static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; +extern struct kmem_cache *sctp_bucket_cachep; +extern int sysctl_sctp_mem[3]; +extern int sysctl_sctp_rmem[3]; +extern int sysctl_sctp_wmem[3]; + +static int sctp_memory_pressure; +static atomic_t sctp_memory_allocated; +static atomic_t sctp_sockets_allocated; + +static void sctp_enter_memory_pressure(void) +{ + sctp_memory_pressure = 1; +} + + /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { - struct sock *sk = asoc->base.sk; - int amt = 0; + int amt; - if (asoc->ep->sndbuf_policy) { - /* make sure that no association uses more than sk_sndbuf */ - amt = sk->sk_sndbuf - asoc->sndbuf_used; + if (asoc->ep->sndbuf_policy) + amt = asoc->sndbuf_used; + else + amt = atomic_read(&asoc->base.sk->sk_wmem_alloc); + + if (amt >= asoc->base.sk->sk_sndbuf) { + if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) + amt = 0; + else { + amt = sk_stream_wspace(asoc->base.sk); + if (amt < 0) + amt = 0; + } } else { - /* do socket level accounting */ - amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + amt = asoc->base.sk->sk_sndbuf - amt; } - - if (amt < 0) - amt = 0; - return amt; } @@ -155,6 +174,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); + sk_charge_skb(sk, chunk->skb); } /* Verify that this is a valid address. */ @@ -2926,6 +2946,164 @@ static int sctp_setsockopt_maxburst(struct sock *sk, return 0; } +/* + * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) + * + * This set option adds a chunk type that the user is requesting to be + * received only in an authenticated way. Changes to the list of chunks + * will only effect future associations on the socket. + */ +static int sctp_setsockopt_auth_chunk(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authchunk val; + + if (optlen != sizeof(struct sctp_authchunk)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + switch (val.sauth_chunk) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + case SCTP_CID_SHUTDOWN_COMPLETE: + case SCTP_CID_AUTH: + return -EINVAL; + } + + /* add this chunk id to the endpoint */ + return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk); +} + +/* + * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) + * + * This option gets or sets the list of HMAC algorithms that the local + * endpoint requires the peer to use. + */ +static int sctp_setsockopt_hmac_ident(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_hmacalgo *hmacs; + int err; + + if (optlen < sizeof(struct sctp_hmacalgo)) + return -EINVAL; + + hmacs = kmalloc(optlen, GFP_KERNEL); + if (!hmacs) + return -ENOMEM; + + if (copy_from_user(hmacs, optval, optlen)) { + err = -EFAULT; + goto out; + } + + if (hmacs->shmac_num_idents == 0 || + hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) { + err = -EINVAL; + goto out; + } + + err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs); +out: + kfree(hmacs); + return err; +} + +/* + * 7.1.20. Set a shared key (SCTP_AUTH_KEY) + * + * This option will set a shared secret key which is used to build an + * association shared key. + */ +static int sctp_setsockopt_auth_key(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authkey *authkey; + struct sctp_association *asoc; + int ret; + + if (optlen <= sizeof(struct sctp_authkey)) + return -EINVAL; + + authkey = kmalloc(optlen, GFP_KERNEL); + if (!authkey) + return -ENOMEM; + + if (copy_from_user(authkey, optval, optlen)) { + ret = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); + if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { + ret = -EINVAL; + goto out; + } + + ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); +out: + kfree(authkey); + return ret; +} + +/* + * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) + * + * This option will get or set the active shared key to be used to build + * the association shared key. + */ +static int sctp_setsockopt_active_key(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (optlen != sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc, + val.scact_keynumber); +} + +/* + * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) + * + * This set option will delete a shared secret key from use. + */ +static int sctp_setsockopt_del_key(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (optlen != sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc, + val.scact_keynumber); + +} + + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -3049,6 +3227,21 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; + case SCTP_AUTH_CHUNK: + retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); + break; + case SCTP_HMAC_IDENT: + retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); + break; + case SCTP_AUTH_KEY: + retval = sctp_setsockopt_auth_key(sk, optval, optlen); + break; + case SCTP_AUTH_ACTIVE_KEY: + retval = sctp_setsockopt_active_key(sk, optval, optlen); + break; + case SCTP_AUTH_DELETE_KEY: + retval = sctp_setsockopt_del_key(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -3293,6 +3486,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->hmac = NULL; SCTP_DBG_OBJCNT_INC(sock); + atomic_inc(&sctp_sockets_allocated); return 0; } @@ -3306,7 +3500,7 @@ SCTP_STATIC int sctp_destroy_sock(struct sock *sk) /* Release our hold on the endpoint. */ ep = sctp_sk(sk)->ep; sctp_endpoint_free(ep); - + atomic_dec(&sctp_sockets_allocated); return 0; } @@ -4819,6 +5013,118 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, return -ENOTSUPP; } +static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 param_len; + + hmacs = sctp_sk(sk)->ep->auth_hmacs_list; + param_len = ntohs(hmacs->param_hdr.length); + + if (len < param_len) + return -EINVAL; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, hmacs->hmac_ids, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_active_key(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (len < sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) + val.scact_keynumber = asoc->active_key_id; + else + val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; + + return 0; +} + +static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_authchunks val; + struct sctp_association *asoc; + struct sctp_chunks_param *ch; + char __user *to; + + if (len <= sizeof(struct sctp_authchunks)) + return -EINVAL; + + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + return -EFAULT; + + to = val.gauth_chunks; + asoc = sctp_id2assoc(sk, val.gauth_assoc_id); + if (!asoc) + return -EINVAL; + + ch = asoc->peer.peer_chunks; + + /* See if the user provided enough room for all the data */ + if (len < ntohs(ch->param_hdr.length)) + return -EINVAL; + + len = ntohs(ch->param_hdr.length); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(to, ch->chunks, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_authchunks val; + struct sctp_association *asoc; + struct sctp_chunks_param *ch; + char __user *to; + + if (len <= sizeof(struct sctp_authchunks)) + return -EINVAL; + + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + return -EFAULT; + + to = val.gauth_chunks; + asoc = sctp_id2assoc(sk, val.gauth_assoc_id); + if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) + ch = (struct sctp_chunks_param*)asoc->c.auth_chunks; + else + ch = sctp_sk(sk)->ep->auth_chunk_list; + + if (len < ntohs(ch->param_hdr.length)) + return -EINVAL; + + len = ntohs(ch->param_hdr.length); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(to, ch->chunks, len)) + return -EFAULT; + + return 0; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4942,6 +5248,25 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; + case SCTP_AUTH_KEY: + case SCTP_AUTH_CHUNK: + case SCTP_AUTH_DELETE_KEY: + retval = -EOPNOTSUPP; + break; + case SCTP_HMAC_IDENT: + retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); + break; + case SCTP_AUTH_ACTIVE_KEY: + retval = sctp_getsockopt_active_key(sk, len, optval, optlen); + break; + case SCTP_PEER_AUTH_CHUNKS: + retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, + optlen); + break; + case SCTP_LOCAL_AUTH_CHUNKS: + retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -4989,22 +5314,14 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) sctp_local_bh_disable(); if (snum == 0) { - /* Search for an available port. - * - * 'sctp_port_rover' was the last port assigned, so - * we start to search from 'sctp_port_rover + - * 1'. What we do is first check if port 'rover' is - * already in the hash table; if not, we use that; if - * it is, we try next. - */ - int low = sysctl_local_port_range[0]; - int high = sysctl_local_port_range[1]; - int remaining = (high - low) + 1; - int rover; - int index; - - sctp_spin_lock(&sctp_port_alloc_lock); - rover = sctp_port_rover; + /* Search for an available port. */ + int low, high, remaining, index; + unsigned int rover; + + inet_get_local_port_range(&low, &high); + remaining = (high - low) + 1; + rover = net_random() % remaining + low; + do { rover++; if ((rover < low) || (rover > high)) @@ -5019,8 +5336,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) next: sctp_spin_unlock(&head->lock); } while (--remaining > 0); - sctp_port_rover = rover; - sctp_spin_unlock(&sctp_port_alloc_lock); /* Exhausted local port range during search? */ ret = 1; @@ -5720,6 +6035,12 @@ static void sctp_wfree(struct sk_buff *skb) atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); + /* + * This undoes what is done via sk_charge_skb + */ + sk->sk_wmem_queued -= skb->truesize; + sk->sk_forward_alloc += skb->truesize; + sock_wfree(skb); __sctp_write_space(asoc); @@ -5737,6 +6058,11 @@ void sctp_sock_rfree(struct sk_buff *skb) struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); + + /* + * Mimic the behavior of sk_stream_rfree + */ + sk->sk_forward_alloc += event->rmem_len; } @@ -6126,6 +6452,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_release_sock(newsk); } + /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", @@ -6148,6 +6475,12 @@ struct proto sctp_prot = { .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), + .sysctl_mem = sysctl_sctp_mem, + .sysctl_rmem = sysctl_sctp_rmem, + .sysctl_wmem = sysctl_sctp_wmem, + .memory_pressure = &sctp_memory_pressure, + .enter_memory_pressure = sctp_enter_memory_pressure, + .memory_allocated = &sctp_memory_allocated, }; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) @@ -6172,5 +6505,11 @@ struct proto sctpv6_prot = { .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), + .sysctl_mem = sysctl_sctp_mem, + .sysctl_rmem = sysctl_sctp_rmem, + .sysctl_wmem = sysctl_sctp_wmem, + .memory_pressure = &sctp_memory_pressure, + .enter_memory_pressure = sctp_enter_memory_pressure, + .memory_allocated = &sctp_memory_allocated, }; #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index e2c679baf91..0669778e433 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -52,6 +52,10 @@ static int int_max = INT_MAX; static long sack_timer_min = 1; static long sack_timer_max = 500; +extern int sysctl_sctp_mem[3]; +extern int sysctl_sctp_rmem[3]; +extern int sysctl_sctp_wmem[3]; + static ctl_table sctp_table[] = { { .ctl_name = NET_SCTP_RTO_INITIAL, @@ -226,6 +230,39 @@ static ctl_table sctp_table[] = { .extra1 = &sack_timer_min, .extra2 = &sack_timer_max, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sctp_mem", + .data = &sysctl_sctp_mem, + .maxlen = sizeof(sysctl_sctp_mem), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sctp_rmem", + .data = &sysctl_sctp_rmem, + .maxlen = sizeof(sysctl_sctp_rmem), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sctp_wmem", + .data = &sysctl_sctp_wmem, + .maxlen = sizeof(sysctl_sctp_wmem), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "auth_enable", + .data = &sctp_auth_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + .strategy = &sysctl_intvec + }, { .ctl_name = 0 } }; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index bfecb353ab3..2c17c7efad4 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -685,6 +685,24 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_ulpevent *event = NULL; struct sk_buff *skb; size_t padding, len; + int rx_count; + + /* + * check to see if we need to make space for this + * new skb, expand the rcvbuffer if needed, or drop + * the frame + */ + if (asoc->ep->rcvbuf_policy) + rx_count = atomic_read(&asoc->rmem_alloc); + else + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); + + if (rx_count >= asoc->base.sk->sk_rcvbuf) { + + if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || + (!sk_stream_rmem_schedule(asoc->base.sk, chunk->skb))) + goto fail; + } /* Clone the original skb, sharing the data. */ skb = skb_clone(chunk->skb, gfp); @@ -795,6 +813,43 @@ fail: return NULL; } +struct sctp_ulpevent *sctp_ulpevent_make_authkey( + const struct sctp_association *asoc, __u16 key_id, + __u32 indication, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_authkey_event *ak; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + ak = (struct sctp_authkey_event *) + skb_put(skb, sizeof(struct sctp_authkey_event)); + + ak->auth_type = SCTP_AUTHENTICATION_EVENT; + ak->auth_flags = 0; + ak->auth_length = sizeof(struct sctp_authkey_event); + + ak->auth_keynumber = key_id; + ak->auth_altkeynumber = 0; + ak->auth_indication = indication; + + /* + * The association id field, holds the identifier for the association. + */ + sctp_ulpevent_set_owner(event, asoc); + ak->auth_assoc_id = sctp_assoc2id(asoc); + + return event; +fail: + return NULL; +} + + /* Return the notification type, assuming this is a notification * event. */ diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index fa0ba2a5564..b9370956b18 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -1027,6 +1027,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, sctp_ulpq_partial_delivery(ulpq, chunk, gfp); } + sk_stream_mem_reclaim(asoc->base.sk); return; } |