aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/ipv4/Kconfig16
-rw-r--r--net/ipv4/tcp.c137
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c673
-rw-r--r--net/ipv4/tcp_minisocks.c64
-rw-r--r--net/ipv4/tcp_output.c111
-rw-r--r--net/ipv6/tcp_ipv6.c568
10 files changed, 1531 insertions, 60 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 34d6d197c3b..35985334dae 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -509,7 +509,7 @@ out:
return err;
}
-static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
+static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{
int err;
struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
@@ -724,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
- dccp_v4_ctl_send_reset(skb);
+ dccp_v4_ctl_send_reset(sk, skb);
discard:
kfree_skb(skb);
return 0;
@@ -913,7 +913,7 @@ no_dccp_socket:
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
- dccp_v4_ctl_send_reset(skb);
+ dccp_v4_ctl_send_reset(sk, skb);
}
discard_it:
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index fc326173c21..e0a0607862e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -310,7 +310,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
kfree_skb(inet6_rsk(req)->pktopts);
}
-static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
+static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{
struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
@@ -805,7 +805,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
- dccp_v6_ctl_send_reset(skb);
+ dccp_v6_ctl_send_reset(sk, skb);
discard:
if (opt_skb != NULL)
__kfree_skb(opt_skb);
@@ -902,7 +902,7 @@ no_dccp_socket:
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
- dccp_v6_ctl_send_reset(skb);
+ dccp_v6_ctl_send_reset(sk, skb);
}
discard_it:
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 0c49733f5be..3975048d809 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -246,7 +246,7 @@ listen_overflow:
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
- req->rsk_ops->send_reset(skb);
+ req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index bc298bcc344..39e0cb76358 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -618,5 +618,21 @@ config DEFAULT_TCP_CONG
default "reno" if DEFAULT_RENO
default "cubic"
+config TCP_MD5SIG
+ bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ select CRYPTO
+ select CRYPTO_MD5
+ ---help---
+ RFC2385 specifices a method of giving MD5 protection to TCP sessions.
+ Its main (only?) use is to protect BGP sessions between core routers
+ on the Internet.
+
+ If unsure, say N.
+
+config TCP_MD5SIG_DEBUG
+ bool "TCP: MD5 Signature Option debugging"
+ depends on TCP_MD5SIG
+
source "net/ipv4/ipvs/Kconfig"
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c05e8edaf54..dadef867a3b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -258,6 +258,7 @@
#include <linux/bootmem.h>
#include <linux/cache.h>
#include <linux/err.h>
+#include <linux/crypto.h>
#include <net/icmp.h>
#include <net/tcp.h>
@@ -1942,6 +1943,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
}
break;
+#ifdef CONFIG_TCP_MD5SIG
+ case TCP_MD5SIG:
+ /* Read the IP->Key mappings from userspace */
+ err = tp->af_specific->md5_parse(sk, optval, optlen);
+ break;
+#endif
+
default:
err = -ENOPROTOOPT;
break;
@@ -2231,6 +2239,135 @@ out:
}
EXPORT_SYMBOL(tcp_tso_segment);
+#ifdef CONFIG_TCP_MD5SIG
+static unsigned long tcp_md5sig_users;
+static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
+
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
+ if (p) {
+ if (p->md5_desc.tfm)
+ crypto_free_hash(p->md5_desc.tfm);
+ kfree(p);
+ p = NULL;
+ }
+ }
+ free_percpu(pool);
+}
+
+void tcp_free_md5sig_pool(void)
+{
+ struct tcp_md5sig_pool **pool = NULL;
+
+ spin_lock(&tcp_md5sig_pool_lock);
+ if (--tcp_md5sig_users == 0) {
+ pool = tcp_md5sig_pool;
+ tcp_md5sig_pool = NULL;
+ }
+ spin_unlock(&tcp_md5sig_pool_lock);
+ if (pool)
+ __tcp_free_md5sig_pool(pool);
+}
+
+EXPORT_SYMBOL(tcp_free_md5sig_pool);
+
+struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
+{
+ int cpu;
+ struct tcp_md5sig_pool **pool;
+
+ pool = alloc_percpu(struct tcp_md5sig_pool *);
+ if (!pool)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ struct tcp_md5sig_pool *p;
+ struct crypto_hash *hash;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto out_free;
+ *per_cpu_ptr(pool, cpu) = p;
+
+ hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (!hash || IS_ERR(hash))
+ goto out_free;
+
+ p->md5_desc.tfm = hash;
+ }
+ return pool;
+out_free:
+ __tcp_free_md5sig_pool(pool);
+ return NULL;
+}
+
+struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
+{
+ struct tcp_md5sig_pool **pool;
+ int alloc = 0;
+
+retry:
+ spin_lock(&tcp_md5sig_pool_lock);
+ pool = tcp_md5sig_pool;
+ if (tcp_md5sig_users++ == 0) {
+ alloc = 1;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ } else if (!pool) {
+ tcp_md5sig_users--;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ cpu_relax();
+ goto retry;
+ } else
+ spin_unlock(&tcp_md5sig_pool_lock);
+
+ if (alloc) {
+ /* we cannot hold spinlock here because this may sleep. */
+ struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
+ spin_lock(&tcp_md5sig_pool_lock);
+ if (!p) {
+ tcp_md5sig_users--;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ return NULL;
+ }
+ pool = tcp_md5sig_pool;
+ if (pool) {
+ /* oops, it has already been assigned. */
+ spin_unlock(&tcp_md5sig_pool_lock);
+ __tcp_free_md5sig_pool(p);
+ } else {
+ tcp_md5sig_pool = pool = p;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ }
+ }
+ return pool;
+}
+
+EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+
+struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+{
+ struct tcp_md5sig_pool **p;
+ spin_lock(&tcp_md5sig_pool_lock);
+ p = tcp_md5sig_pool;
+ if (p)
+ tcp_md5sig_users++;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ return (p ? *per_cpu_ptr(p, cpu) : NULL);
+}
+
+EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+
+void __tcp_put_md5sig_pool(void) {
+ __tcp_free_md5sig_pool(tcp_md5sig_pool);
+}
+
+EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+#endif
+
extern void __skb_cb_too_small_for_tcp(int, int);
extern struct tcp_congestion_ops tcp_reno;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4a8c96cdec7..6ab3423674b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
opt_rx->sack_ok) {
TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
}
+#ifdef CONFIG_TCP_MD5SIG
+ case TCPOPT_MD5SIG:
+ /*
+ * The MD5 Hash has already been
+ * checked (see tcp_v{4,6}_do_rcv()).
+ */
+ break;
+#endif
};
ptr+=opsize-2;
length-=opsize;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0ad0904bf56..8c8e8112f98 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -78,6 +78,9 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
@@ -89,6 +92,13 @@ static struct socket *tcp_socket;
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr);
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ __be32 saddr, __be32 daddr, struct tcphdr *th,
+ int protocol, int tcplen);
+#endif
+
struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
.lhash_users = ATOMIC_INIT(0),
@@ -526,11 +536,19 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
* Exception: precedence violation. We do not implement it in any case.
*/
-static void tcp_v4_send_reset(struct sk_buff *skb)
+static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = skb->h.th;
- struct tcphdr rth;
+ struct {
+ struct tcphdr th;
+#ifdef CONFIG_TCP_MD5SIG
+ u32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
+#endif
+ } rep;
struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+#endif
/* Never send a reset in response to a reset. */
if (th->rst)
@@ -540,29 +558,50 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
return;
/* Swap the send and the receive. */
- memset(&rth, 0, sizeof(struct tcphdr));
- rth.dest = th->source;
- rth.source = th->dest;
- rth.doff = sizeof(struct tcphdr) / 4;
- rth.rst = 1;
+ memset(&rep, 0, sizeof(rep));
+ rep.th.dest = th->source;
+ rep.th.source = th->dest;
+ rep.th.doff = sizeof(struct tcphdr) / 4;
+ rep.th.rst = 1;
if (th->ack) {
- rth.seq = th->ack_seq;
+ rep.th.seq = th->ack_seq;
} else {
- rth.ack = 1;
- rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
- skb->len - (th->doff << 2));
+ rep.th.ack = 1;
+ rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
+ skb->len - (th->doff << 2));
}
memset(&arg, 0, sizeof arg);
- arg.iov[0].iov_base = (unsigned char *)&rth;
- arg.iov[0].iov_len = sizeof rth;
+ arg.iov[0].iov_base = (unsigned char *)&rep;
+ arg.iov[0].iov_len = sizeof(rep.th);
+
+#ifdef CONFIG_TCP_MD5SIG
+ key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL;
+ if (key) {
+ rep.opt[0] = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ /* Update length and the length the header thinks exists */
+ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+ rep.th.doff = arg.iov[0].iov_len / 4;
+
+ tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
+ key,
+ skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
+ &rep.th, IPPROTO_TCP,
+ arg.iov[0].iov_len);
+ }
+#endif
+
arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
skb->nh.iph->saddr, /*XXX*/
sizeof(struct tcphdr), IPPROTO_TCP, 0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
- ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
+ ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
@@ -572,15 +611,24 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
outside socket context is ugly, certainly. What can I do?
*/
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
+ struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 ts)
{
struct tcphdr *th = skb->h.th;
struct {
struct tcphdr th;
- u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2];
+ u32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
+#ifdef CONFIG_TCP_MD5SIG
+ + (TCPOLEN_MD5SIG_ALIGNED >> 2)
+#endif
+ ];
} rep;
struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+ struct tcp_md5sig_key tw_key;
+#endif
memset(&rep.th, 0, sizeof(struct tcphdr));
memset(&arg, 0, sizeof arg);
@@ -588,12 +636,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
if (ts) {
- rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) |
- TCPOLEN_TIMESTAMP);
- rep.tsopt[1] = htonl(tcp_time_stamp);
- rep.tsopt[2] = htonl(ts);
- arg.iov[0].iov_len = sizeof(rep);
+ rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ rep.opt[1] = htonl(tcp_time_stamp);
+ rep.opt[2] = htonl(ts);
+ arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED;
}
/* Swap the send and the receive. */
@@ -605,6 +653,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
rep.th.ack = 1;
rep.th.window = htons(win);
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * The SKB holds an imcoming packet, but may not have a valid ->sk
+ * pointer. This is especially the case when we're dealing with a
+ * TIME_WAIT ack, because the sk structure is long gone, and only
+ * the tcp_timewait_sock remains. So the md5 key is stashed in that
+ * structure, and we use it in preference. I believe that (twsk ||
+ * skb->sk) holds true, but we program defensively.
+ */
+ if (!twsk && skb->sk) {
+ key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr);
+ } else if (twsk && twsk->tw_md5_keylen) {
+ tw_key.key = twsk->tw_md5_key;
+ tw_key.keylen = twsk->tw_md5_keylen;
+ key = &tw_key;
+ } else {
+ key = NULL;
+ }
+
+ if (key) {
+ int offset = (ts) ? 3 : 0;
+
+ rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+ rep.th.doff = arg.iov[0].iov_len/4;
+
+ tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
+ key,
+ skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
+ &rep.th, IPPROTO_TCP,
+ arg.iov[0].iov_len);
+ }
+#endif
+
arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
skb->nh.iph->saddr, /*XXX*/
arg.iov[0].iov_len, IPPROTO_TCP, 0);
@@ -618,9 +704,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
- const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+ struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
inet_twsk_put(tw);
@@ -628,7 +714,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
{
- tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
+ tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
+ tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
req->ts_recent);
}
@@ -714,6 +801,461 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk,
return dopt;
}
+#ifdef CONFIG_TCP_MD5SIG
+/*
+ * RFC2385 MD5 checksumming requires a mapping of
+ * IP address->MD5 Key.
+ * We need to maintain these in the sk structure.
+ */
+
+/* Find the Key structure for an address. */
+static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int i;
+
+ if (!tp->md5sig_info || !tp->md5sig_info->entries4)
+ return NULL;
+ for (i = 0; i < tp->md5sig_info->entries4; i++) {
+ if (tp->md5sig_info->keys4[i].addr == addr)
+ return (struct tcp_md5sig_key *)&tp->md5sig_info->keys4[i];
+ }
+ return NULL;
+}
+
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+ struct sock *addr_sk)
+{
+ return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_lookup);
+
+struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
+ struct request_sock *req)
+{
+ return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
+}
+
+/* This can be called on a newly created socket, from other files */
+int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
+ u8 *newkey, u8 newkeylen)
+{
+ /* Add Key to the list */
+ struct tcp4_md5sig_key *key;
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp4_md5sig_key *keys;
+
+ key = (struct tcp4_md5sig_key *) tcp_v4_md5_do_lookup(sk, addr);
+ if (key) {
+ /* Pre-existing entry - just update that one. */
+ kfree (key->key);
+ key->key = newkey;
+ key->keylen = newkeylen;
+ } else {
+ if (!tp->md5sig_info) {
+ tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
+ if (!tp->md5sig_info) {
+ kfree(newkey);
+ return -ENOMEM;
+ }
+ }
+ if (tcp_alloc_md5sig_pool() == NULL) {
+ kfree(newkey);
+ return -ENOMEM;
+ }
+ if (tp->md5sig_info->alloced4 == tp->md5sig_info->entries4) {
+ keys = kmalloc((sizeof(struct tcp4_md5sig_key) *
+ (tp->md5sig_info->entries4 + 1)), GFP_ATOMIC);
+ if (!keys) {
+ kfree(newkey);
+ tcp_free_md5sig_pool();
+ return -ENOMEM;
+ }
+
+ if (tp->md5sig_info->entries4)
+ memcpy(keys, tp->md5sig_info->keys4,
+ (sizeof (struct tcp4_md5sig_key) *
+ tp->md5sig_info->entries4));
+
+ /* Free old key list, and reference new one */
+ if (tp->md5sig_info->keys4)
+ kfree(tp->md5sig_info->keys4);
+ tp->md5sig_info->keys4 = keys;
+ tp->md5sig_info->alloced4++;
+ }
+ tp->md5sig_info->entries4++;
+ tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].addr = addr;
+ tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].key = newkey;
+ tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].keylen = newkeylen;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_add);
+
+static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
+ u8 *newkey, u8 newkeylen)
+{
+ return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
+ newkey, newkeylen);
+}
+
+int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int i;
+
+ for (i = 0; i < tp->md5sig_info->entries4; i++) {
+ if (tp->md5sig_info->keys4[i].addr == addr) {
+ /* Free the key */
+ kfree(tp->md5sig_info->keys4[i].key);
+ tp->md5sig_info->entries4--;
+
+ if (tp->md5sig_info->entries4 == 0) {
+ kfree(tp->md5sig_info->keys4);
+ tp->md5sig_info->keys4 = NULL;
+ } else {
+ /* Need to do some manipulation */
+ if (tp->md5sig_info->entries4 != i)
+ memcpy(&tp->md5sig_info->keys4[i],
+ &tp->md5sig_info->keys4[i+1],
+ (tp->md5sig_info->entries4 - i)
+ * sizeof (struct tcp4_md5sig_key));
+ }
+ tcp_free_md5sig_pool();
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_del);
+
+static void tcp_v4_clear_md5_list (struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Free each key, then the set of key keys,
+ * the crypto element, and then decrement our
+ * hold on the last resort crypto.
+ */
+ if (tp->md5sig_info->entries4) {
+ int i;
+ for (i = 0; i < tp->md5sig_info->entries4; i++)
+ kfree(tp->md5sig_info->keys4[i].key);
+ tp->md5sig_info->entries4 = 0;
+ tcp_free_md5sig_pool();
+ }
+ if (tp->md5sig_info->keys4) {
+ kfree(tp->md5sig_info->keys4);
+ tp->md5sig_info->keys4 = NULL;
+ tp->md5sig_info->alloced4 = 0;
+ }
+}
+
+static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval,
+ int optlen)
+{
+ struct tcp_md5sig cmd;
+ struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
+ u8 *newkey;
+
+ if (optlen < sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user (&cmd, optval, sizeof(cmd)))
+ return -EFAULT;
+
+ if (sin->sin_family != AF_INET)
+ return -EINVAL;
+
+ if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
+ if (!tcp_sk(sk)->md5sig_info)
+ return -ENOENT;
+ return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
+ }
+
+ if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
+ return -EINVAL;
+
+ if (!tcp_sk(sk)->md5sig_info) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_md5sig_info *p;
+
+ p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
+ if (!p)
+ return -EINVAL;
+
+ tp->md5sig_info = p;
+
+ }
+
+ newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
+ if (!newkey)
+ return -ENOMEM;
+ memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
+ return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
+ newkey, cmd.tcpm_keylen);
+}
+
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ __be32 saddr, __be32 daddr,
+ struct tcphdr *th, int protocol,
+ int tcplen)
+{
+ struct scatterlist sg[4];
+ __u16 data_len;
+ int block = 0;
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ int i;
+#endif
+ __u16 old_checksum;
+ struct tcp_md5sig_pool *hp;
+ struct tcp4_pseudohdr *bp;
+ struct hash_desc *desc;
+ int err;
+ unsigned int nbytes = 0;
+
+ /*
+ * Okay, so RFC2385 is turned on for this connection,
+ * so we need to generate the MD5 hash for the packet now.
+ */
+
+ hp = tcp_get_md5sig_pool();
+ if (!hp)
+ goto clear_hash_noput;
+
+ bp = &hp->md5_blk.ip4;
+ desc = &hp->md5_desc;
+
+ /*
+ * 1. the TCP pseudo-header (in the order: source IP address,
+ * destination IP address, zero-padded protocol number, and
+ * segment length)
+ */
+ bp->saddr = saddr;
+ bp->daddr = daddr;
+ bp->pad = 0;
+ bp->protocol = protocol;
+ bp->len = htons(tcplen);
+ sg_set_buf(&sg[block++], bp, sizeof(*bp));
+ nbytes += sizeof(*bp);
+
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ printk("Calcuating hash for: ");
+ for (i = 0; i < sizeof (*bp); i++)
+ printk ("%02x ", (unsigned int)((unsigned char *)bp)[i]);
+ printk(" ");
+#endif
+
+ /* 2. the TCP header, excluding options, and assuming a
+ * checksum of zero/
+ */
+ old_checksum = th->check;
+ th->check = 0;
+ sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
+ nbytes += sizeof(struct tcphdr);
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ for (i = 0; i < sizeof (struct tcphdr); i++)
+ printk (" %02x", (unsigned int)((unsigned char *)th)[i]);
+#endif
+ /* 3. the TCP segment data (if any) */
+ data_len = tcplen - (th->doff << 2);
+ if (data_len > 0) {
+ unsigned char *data = (unsigned char *)th + (th->doff << 2);
+ sg_set_buf(&sg[block++], data, data_len);
+ nbytes += data_len;
+ }
+
+ /* 4. an independently-specified key or password, known to both
+ * TCPs and presumably connection-specific
+ */
+ sg_set_buf(&sg[block++], key->key, key->keylen);
+ nbytes += key->keylen;
+
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ printk (" and password: ");
+ for (i = 0; i < key->keylen; i++)
+ printk ("%02x ", (unsigned int)key->key[i]);
+#endif
+
+ /* Now store the Hash into the packet */
+ err = crypto_hash_init(desc);
+ if (err)
+ goto clear_hash;
+ err = crypto_hash_update(desc, sg, nbytes);
+ if (err)
+ goto clear_hash;
+ err = crypto_hash_final(desc, md5_hash);
+ if (err)
+ goto clear_hash;
+
+ /* Reset header, and free up the crypto */
+ tcp_put_md5sig_pool();
+ th->check = old_checksum;
+
+out:
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ printk(" result:");
+ for (i = 0; i < 16; i++)
+ printk (" %02x", (unsigned int)(((u8*)md5_hash)[i]));
+ printk("\n");
+#endif
+ return 0;
+clear_hash:
+ tcp_put_md5sig_pool();
+clear_hash_noput:
+ memset(md5_hash, 0, 16);
+ goto out;
+}
+
+int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ struct sock *sk,
+ struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcphdr *th, int protocol,
+ int tcplen)
+{
+ __be32 saddr, daddr;
+
+ if (sk) {
+ saddr = inet_sk(sk)->saddr;
+ daddr = inet_sk(sk)->daddr;
+ } else {
+ struct rtable *rt = (struct rtable *)dst;
+ BUG_ON(!rt);
+ saddr = rt->rt_src;
+ daddr = rt->rt_dst;
+ }
+ return tcp_v4_do_calc_md5_hash(md5_hash, key,
+ saddr, daddr,
+ th, protocol, tcplen);
+}
+
+EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
+
+static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
+{
+ /*
+ * This gets called for each TCP segment that arrives
+ * so we want to be efficient.
+ * We have 3 drop cases:
+ * o No MD5 hash and one expected.
+ * o MD5 hash and we're not expecting one.
+ * o MD5 hash and its wrong.
+ */
+ __u8 *hash_location = NULL;
+ struct tcp_md5sig_key *hash_expected;
+ struct iphdr *iph = skb->nh.iph;
+ struct tcphdr *th = skb->h.th;
+ int length = (th->doff << 2) - sizeof (struct tcphdr);
+ int genhash;
+ unsigned char *ptr;
+ unsigned char newhash[16];
+
+ hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
+
+ /*
+ * If the TCP option length is less than the TCP_MD5SIG
+ * option length, then we can shortcut
+ */
+ if (length < TCPOLEN_MD5SIG) {
+ if (hash_expected)
+ return 1;
+ else
+ return 0;
+ }
+
+ /* Okay, we can't shortcut - we have to grub through the options */
+ ptr = (unsigned char *)(th + 1);
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ switch (opcode) {
+ case TCPOPT_EOL:
+ goto done_opts;
+ case TCPOPT_NOP:
+ length--;
+ continue;
+ default:
+ opsize = *ptr++;
+ if (opsize < 2)
+ goto done_opts;
+ if (opsize > length)
+ goto done_opts;
+
+ if (opcode == TCPOPT_MD5SIG) {
+ hash_location = ptr;
+ goto done_opts;
+ }
+ }
+ ptr += opsize-2;
+ length -= opsize;
+ }
+done_opts:
+ /* We've parsed the options - do we have a hash? */
+ if (!hash_expected && !hash_location)
+ return 0;
+
+ if (hash_expected && !hash_location) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash NOT expected but found "
+ "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+ NIPQUAD (iph->saddr), ntohs(th->source),
+ NIPQUAD (iph->daddr), ntohs(th->dest));
+ }
+ return 1;
+ }
+
+ if (!hash_expected && hash_location) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash NOT expected but found "
+ "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+ NIPQUAD (iph->saddr), ntohs(th->source),
+ NIPQUAD (iph->daddr), ntohs(th->dest));
+ }
+ return 1;
+ }
+
+ /* Okay, so this is hash_expected and hash_location -
+ * so we need to calculate the checksum.
+ */
+ genhash = tcp_v4_do_calc_md5_hash(newhash,
+ hash_expected,
+ iph->saddr, iph->daddr,
+ th, sk->sk_protocol,
+ skb->len);
+
+ if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash failed for "
+ "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
+ NIPQUAD (iph->saddr), ntohs(th->source),
+ NIPQUAD (iph->daddr), ntohs(th->dest),
+ genhash ? " tcp_v4_calc_md5_hash failed" : "");
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ do {
+ int i;
+ printk("Received: ");
+ for (i = 0; i < 16; i++)
+ printk("%02x ", 0xff & (int)hash_location[i]);
+ printk("\n");
+ printk("Calculated: ");
+ for (i = 0; i < 16; i++)
+ printk("%02x ", 0xff & (int)newhash[i]);
+ printk("\n");
+ } while(0);
+#endif
+ }
+ return 1;
+ }
+ return 0;
+}
+
+#endif
+
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock),
@@ -723,9 +1265,16 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.send_reset = tcp_v4_send_reset,
};
+struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+#ifdef CONFIG_TCP_MD5SIG
+ .md5_lookup = tcp_v4_reqsk_md5_lookup,
+#endif
+};
+
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.twsk_unique = tcp_twsk_unique,
+ .twsk_destructor= tcp_twsk_destructor,
};
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -773,6 +1322,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (!req)
goto drop;
+#ifdef CONFIG_TCP_MD5SIG
+ tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
+#endif
+
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = 536;
tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
@@ -891,6 +1444,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+#endif
if (sk_acceptq_is_full(sk))
goto exit_overflow;
@@ -925,6 +1481,24 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
tcp_initialize_rcv_mss(newsk);
+#ifdef CONFIG_TCP_MD5SIG
+ /* Copy over the MD5 key from the original socket */
+ if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
+ /*
+ * We're using one, so create a matching key
+ * on the newsk structure. If we fail to get
+ * memory, then we end up not copying the key
+ * across. Shucks.
+ */
+ char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
+ if (newkey) {
+ memcpy(newkey, key->key, key->keylen);
+ tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
+ newkey, key->keylen);
+ }
+ }
+#endif
+
__inet_hash(&tcp_hashinfo, newsk, 0);
__inet_inherit_port(&tcp_hashinfo, sk, newsk);
@@ -1000,10 +1574,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
*/
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
+ struct sock *rsk;
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * We really want to reject the packet as early as possible
+ * if:
+ * o We're expecting an MD5'd packet and this is no MD5 tcp option
+ * o There is an MD5 option and we're not expecting one
+ */
+ if (tcp_v4_inbound_md5_hash (sk, skb))
+ goto discard;
+#endif
+
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
- if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
+ if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
+ rsk = sk;
goto reset;
+ }
TCP_CHECK_TIMER(sk);
return 0;
}
@@ -1017,20 +1605,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
+ if (tcp_child_process(sk, nsk, skb)) {
+ rsk = nsk;
goto reset;
+ }
return 0;
}
}
TCP_CHECK_TIMER(sk);
- if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
+ if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
+ rsk = sk;
goto reset;
+ }
TCP_CHECK_TIMER(sk);
return 0;
reset:
- tcp_v4_send_reset(skb);
+ tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
/* Be careful here. If this function gets more complicated and
@@ -1139,7 +1731,7 @@ no_tcp_socket:
bad_packet:
TCP_INC_STATS_BH(TCP_MIB_INERRS);
} else {
- tcp_v4_send_reset(skb);
+ tcp_v4_send_reset(NULL, skb);
}
discard_it:
@@ -1262,6 +1854,15 @@ struct inet_connection_sock_af_ops ipv4_specific = {
#endif
};
+struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
+#ifdef CONFIG_TCP_MD5SIG
+ .md5_lookup = tcp_v4_md5_lookup,
+ .calc_md5_hash = tcp_v4_calc_md5_hash,
+ .md5_add = tcp_v4_md5_add_func,
+ .md5_parse = tcp_v4_parse_md5_keys,
+#endif
+};
+
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
@@ -1301,6 +1902,9 @@ static int tcp_v4_init_sock(struct sock *sk)
icsk->icsk_af_ops = &ipv4_specific;
icsk->icsk_sync_mss = tcp_sync_mss;
+#ifdef CONFIG_TCP_MD5SIG
+ tp->af_specific = &tcp_sock_ipv4_specific;
+#endif
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -1324,6 +1928,15 @@ int tcp_v4_destroy_sock(struct sock *sk)
/* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue);
+#ifdef CONFIG_TCP_MD5SIG
+ /* Clean up the MD5 key list, if any */
+ if (tp->md5sig_info) {
+ tcp_v4_clear_md5_list(sk);
+ kfree(tp->md5sig_info);
+ tp->md5sig_info = NULL;
+ }
+#endif
+
#ifdef CONFIG_NET_DMA
/* Cleans up our sk_async_wait_queue */
__skb_queue_purge(&sk->sk_async_wait_queue);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0163d982690..ac55d8892cf 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_ipv6only = np->ipv6only;
}
#endif
+
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * The timewait bucket does not have the key DB from the
+ * sock structure. We just make a quick copy of the
+ * md5 key being used (if indeed we are using one)
+ * so the timewait ack generating code has the key.
+ */
+ do {
+ struct tcp_md5sig_key *key;
+ memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
+ tcptw->tw_md5_keylen = 0;
+ key = tp->af_specific->md5_lookup(sk, sk);
+ if (key != NULL) {
+ memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
+ tcptw->tw_md5_keylen = key->keylen;
+ if (tcp_alloc_md5sig_pool() == NULL)
+ BUG();
+ }
+ } while(0);
+#endif
+
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
@@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tcp_done(sk);
}
+void tcp_twsk_destructor(struct sock *sk)
+{
+ struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+#ifdef CONFIG_TCP_MD5SIG
+ if (twsk->tw_md5_keylen)
+ tcp_put_md5sig_pool();
+#endif
+}
+
+EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
+
/* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
*
@@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
+#ifdef CONFIG_TCP_MD5SIG
+ newtp->md5sig_info = NULL; /*XXX*/
+ if (newtp->af_specific->md5_lookup(sk, newsk))
+ newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
@@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
req, NULL);
if (child == NULL)
goto listen_overflow;
+#ifdef CONFIG_TCP_MD5SIG
+ else {
+ /* Copy over the MD5 key from the original socket */
+ struct tcp_md5sig_key *key;
+ struct tcp_sock *tp = tcp_sk(sk);
+ key = tp->af_specific->md5_lookup(sk, child);
+ if (key != NULL) {
+ /*
+ * We're using one, so create a matching key on the
+ * newsk structure. If we fail to get memory then we
+ * end up not copying the key across. Shucks.
+ */
+ char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
+ if (newkey) {
+ if (!tcp_alloc_md5sig_pool())
+ BUG();
+ memcpy(newkey, key->key, key->keylen);
+ tp->af_specific->md5_add(child, child,
+ newkey,
+ key->keylen);
+ }
+ }
+ }
+#endif
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
@@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
embryonic_reset:
NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
if (!(flg & TCP_FLAG_RST))
- req->rsk_ops->send_reset(skb);
+ req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req, prev);
return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6a8581ab9a2..32c1a972fa3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk)
}
static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
- __u32 tstamp)
+ __u32 tstamp, __u8 **md5_hash)
{
if (tp->rx_opt.tstamp_ok) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
@@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
tp->rx_opt.eff_sacks--;
}
}
+#ifdef CONFIG_TCP_MD5SIG
+ if (md5_hash) {
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ *md5_hash = (__u8 *)ptr;
+ }
+#endif
}
/* Construct a tcp options header for a SYN or SYN_ACK packet.
* If this is every changed make sure to change the definition of
* MAX_SYN_SIZE to match the new maximum number of options that you
* can generate.
+ *
+ * Note - that with the RFC2385 TCP option, we make room for the
+ * 16 byte MD5 hash. This will be filled in later, so the pointer for the
+ * location to be filled is passed back up.
*/
static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
int offer_wscale, int wscale, __u32 tstamp,
- __u32 ts_recent)
+ __u32 ts_recent, __u8 **md5_hash)
{
/* We always get an MSS option.
* The option bytes which will be seen in normal data
@@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
(TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) |
(wscale));
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * If MD5 is enabled, then we set the option, and include the size
+ * (always 18). The actual MD5 hash is added just before the
+ * packet is sent.
+ */
+ if (md5_hash) {
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ *md5_hash = (__u8 *) ptr;
+ }
+#endif
}
/* This routine actually transmits TCP packets queued in by
@@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
struct tcp_sock *tp;
struct tcp_skb_cb *tcb;
int tcp_header_size;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *md5;
+ __u8 *md5_hash_location;
+#endif
struct tcphdr *th;
int sysctl_flags;
int err;
@@ -424,6 +455,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * Are we doing MD5 on this segment? If so - make
+ * room for it.
+ */
+ md5 = tp->af_specific->md5_lookup(sk, sk);
+ if (md5)
+ tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
th = (struct tcphdr *) skb_push(skb, tcp_header_size);
skb->h.th = th;
@@ -460,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
(sysctl_flags & SYSCTL_FLAG_WSCALE),
tp->rx_opt.rcv_wscale,
tcb->when,
- tp->rx_opt.ts_recent);
+ tp->rx_opt.ts_recent,
+
+#ifdef CONFIG_TCP_MD5SIG
+ md5 ? &md5_hash_location :
+#endif
+ NULL);
} else {
tcp_build_and_update_options((__be32 *)(th + 1),
- tp, tcb->when);
+ tp, tcb->when,
+#ifdef CONFIG_TCP_MD5SIG
+ md5 ? &md5_hash_location :
+#endif
+ NULL);
TCP_ECN_send(sk, tp, skb, tcp_header_size);
}
+#ifdef CONFIG_TCP_MD5SIG
+ /* Calculate the MD5 hash, as we have all we need now */
+ if (md5) {
+ tp->af_specific->calc_md5_hash(md5_hash_location,
+ md5,
+ sk, NULL, NULL,
+ skb->h.th,
+ sk->sk_protocol,
+ skb->len);
+ }
+#endif
+
icsk->icsk_af_ops->send_check(sk, skb->len, skb);
if (likely(tcb->flags & TCPCB_FLAG_ACK))
@@ -840,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
(tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
+#ifdef CONFIG_TCP_MD5SIG
+ if (tp->af_specific->md5_lookup(sk, sk))
+ mss_now -= TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
xmit_size_goal = mss_now;
if (doing_tso) {
@@ -2033,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct tcphdr *th;
int tcp_header_size;
struct sk_buff *skb;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *md5;
+ __u8 *md5_hash_location;
+#endif
skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
if (skb == NULL)
@@ -2048,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
(ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
/* SACK_PERM is in the place of NOP NOP of TS */
((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
+
+#ifdef CONFIG_TCP_MD5SIG
+ /* Are we doing MD5 on this segment? If so - make room for it */
+ md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
+ if (md5)
+ tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
+#endif
skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
memset(th, 0, sizeof(struct tcphdr));
@@ -2085,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
TCP_SKB_CB(skb)->when,
- req->ts_recent);
+ req->ts_recent,
+ (
+#ifdef CONFIG_TCP_MD5SIG
+ md5 ? &md5_hash_location :
+#endif
+ NULL)
+ );
skb->csum = 0;
th->doff = (tcp_header_size >> 2);
TCP_INC_STATS(TCP_MIB_OUTSEGS);
+
+#ifdef CONFIG_TCP_MD5SIG
+ /* Okay, we have all we need - do the md5 hash if needed */
+ if (md5) {
+ tp->af_specific->calc_md5_hash(md5_hash_location,
+ md5,
+ NULL, dst, req,
+ skb->h.th, sk->sk_protocol,
+ skb->len);
+ }
+#endif
+
return skb;
}
@@ -2108,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk)
tp->tcp_header_len = sizeof(struct tcphdr) +
(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
+#ifdef CONFIG_TCP_MD5SIG
+ if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+ tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
/* If user gave his TCP_MAXSEG, record it to clamp */
if (tp->rx_opt.user_mss)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9a88395a762..663d1d23801 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -66,10 +66,13 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
/* Socket used for sending RSTs and ACKs */
static struct socket *tcp6_socket;
-static void tcp_v6_send_reset(struct sk_buff *skb);
+static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
static void tcp_v6_send_check(struct sock *sk, int len,
struct sk_buff *skb);
@@ -78,6 +81,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static struct inet_connection_sock_af_ops ipv6_mapped;
static struct inet_connection_sock_af_ops ipv6_specific;
+static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
+static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
{
@@ -208,6 +213,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
icsk->icsk_af_ops = &ipv6_mapped;
sk->sk_backlog_rcv = tcp_v4_do_rcv;
+#ifdef CONFIG_TCP_MD5SIG
+ tp->af_specific = &tcp_sock_ipv6_mapped_specific;
+#endif
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
@@ -215,6 +223,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
icsk->icsk_ext_hdr_len = exthdrlen;
icsk->icsk_af_ops = &ipv6_specific;
sk->sk_backlog_rcv = tcp_v6_do_rcv;
+#ifdef CONFIG_TCP_MD5SIG
+ tp->af_specific = &tcp_sock_ipv6_specific;
+#endif
goto failure;
} else {
ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
@@ -518,6 +529,396 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
kfree_skb(inet6_rsk(req)->pktopts);
}
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
+ struct in6_addr *addr)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int i;
+
+ BUG_ON(tp == NULL);
+
+ if (!tp->md5sig_info || !tp->md5sig_info->entries6)
+ return NULL;
+
+ for (i = 0; i < tp->md5sig_info->entries6; i++) {
+ if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
+ return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
+ }
+ return NULL;
+}
+
+static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
+ struct sock *addr_sk)
+{
+ return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
+}
+
+static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
+ struct request_sock *req)
+{
+ return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
+}
+
+static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
+ char *newkey, u8 newkeylen)
+{
+ /* Add key to the list */
+ struct tcp6_md5sig_key *key;
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp6_md5sig_key *keys;
+
+ key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
+ if (key) {
+ /* modify existing entry - just update that one */
+ kfree(key->key);
+ key->key = newkey;
+ key->keylen = newkeylen;
+ } else {
+ /* reallocate new list if current one is full. */
+ if (!tp->md5sig_info) {
+ tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
+ if (!tp->md5sig_info) {
+ kfree(newkey);
+ return -ENOMEM;
+ }
+ }
+ tcp_alloc_md5sig_pool();
+ if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
+ keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
+ (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
+
+ if (!keys) {
+ tcp_free_md5sig_pool();
+ kfree(newkey);
+ return -ENOMEM;
+ }
+
+ if (tp->md5sig_info->entries6)
+ memmove(keys, tp->md5sig_info->keys6,
+ (sizeof (tp->md5sig_info->keys6[0]) *
+ tp->md5sig_info->entries6));
+
+ kfree(tp->md5sig_info->keys6);
+ tp->md5sig_info->keys6 = keys;
+ tp->md5sig_info->alloced6++;
+ }
+
+ ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
+ peer);
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
+
+ tp->md5sig_info->entries6++;
+ }
+ return 0;
+}
+
+static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
+ u8 *newkey, __u8 newkeylen)
+{
+ return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
+ newkey, newkeylen);
+}
+
+static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int i;
+
+ for (i = 0; i < tp->md5sig_info->entries6; i++) {
+ if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
+ /* Free the key */
+ kfree(tp->md5sig_info->keys6[i].key);
+ tp->md5sig_info->entries6--;
+
+ if (tp->md5sig_info->entries6 == 0) {
+ kfree(tp->md5sig_info->keys6);
+ tp->md5sig_info->keys6 = NULL;
+
+ tcp_free_md5sig_pool();
+
+ return 0;
+ } else {
+ /* shrink the database */
+ if (tp->md5sig_info->entries6 != i)
+ memmove(&tp->md5sig_info->keys6[i],
+ &tp->md5sig_info->keys6[i+1],
+ (tp->md5sig_info->entries6 - i)
+ * sizeof (tp->md5sig_info->keys6[0]));
+ }
+ }
+ }
+ return -ENOENT;
+}
+
+static void tcp_v6_clear_md5_list (struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int i;
+
+ if (tp->md5sig_info->entries6) {
+ for (i = 0; i < tp->md5sig_info->entries6; i++)
+ kfree(tp->md5sig_info->keys6[i].key);
+ tp->md5sig_info->entries6 = 0;
+ tcp_free_md5sig_pool();
+ }
+
+ kfree(tp->md5sig_info->keys6);
+ tp->md5sig_info->keys6 = NULL;
+ tp->md5sig_info->alloced6 = 0;
+
+ if (tp->md5sig_info->entries4) {
+ for (i = 0; i < tp->md5sig_info->entries4; i++)
+ kfree(tp->md5sig_info->keys4[i].key);
+ tp->md5sig_info->entries4 = 0;
+ tcp_free_md5sig_pool();
+ }
+
+ kfree(tp->md5sig_info->keys4);
+ tp->md5sig_info->keys4 = NULL;
+ tp->md5sig_info->alloced4 = 0;
+}
+
+static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
+ int optlen)
+{
+ struct tcp_md5sig cmd;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
+ u8 *newkey;
+
+ if (optlen < sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, optval, sizeof(cmd)))
+ return -EFAULT;
+
+ if (sin6->sin6_family != AF_INET6)
+ return -EINVAL;
+
+ if (!cmd.tcpm_keylen) {
+ if (!tcp_sk(sk)->md5sig_info)
+ return -ENOENT;
+ if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
+ return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
+ return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
+ }
+
+ if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
+ return -EINVAL;
+
+ if (!tcp_sk(sk)->md5sig_info) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_md5sig_info *p;
+
+ p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ tp->md5sig_info = p;
+ }
+
+ newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
+ if (!newkey)
+ return -ENOMEM;
+ memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
+ if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
+ return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
+ newkey, cmd.tcpm_keylen);
+ }
+ return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
+}
+
+static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ struct tcphdr *th, int protocol,
+ int tcplen)
+{
+ struct scatterlist sg[4];
+ __u16 data_len;
+ int block = 0;
+ __u16 cksum;
+ struct tcp_md5sig_pool *hp;
+ struct tcp6_pseudohdr *bp;
+ struct hash_desc *desc;
+ int err;
+ unsigned int nbytes = 0;
+
+ hp = tcp_get_md5sig_pool();
+ if (!hp) {
+ printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
+ goto clear_hash_noput;
+ }
+ bp = &hp->md5_blk.ip6;
+ desc = &hp->md5_desc;
+
+ /* 1. TCP pseudo-header (RFC2460) */
+ ipv6_addr_copy(&bp->saddr, saddr);
+ ipv6_addr_copy(&bp->daddr, daddr);
+ bp->len = htonl(tcplen);
+ bp->protocol = htonl(protocol);
+
+ sg_set_buf(&sg[block++], bp, sizeof(*bp));
+ nbytes += sizeof(*bp);
+
+ /* 2. TCP header, excluding options */
+ cksum = th->check;
+ th->check = 0;
+ sg_set_buf(&sg[block++], th, sizeof(*th));
+ nbytes += sizeof(*th);
+
+ /* 3. TCP segment data (if any) */
+ data_len = tcplen - (th->doff << 2);
+ if (data_len > 0) {
+ u8 *data = (u8 *)th + (th->doff << 2);
+ sg_set_buf(&sg[block++], data, data_len);
+ nbytes += data_len;
+ }
+
+ /* 4. shared key */
+ sg_set_buf(&sg[block++], key->key, key->keylen);
+ nbytes += key->keylen;
+
+ /* Now store the hash into the packet */
+ err = crypto_hash_init(desc);
+ if (err) {
+ printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
+ goto clear_hash;
+ }
+ err = crypto_hash_update(desc, sg, nbytes);
+ if (err) {
+ printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
+ goto clear_hash;
+ }
+ err = crypto_hash_final(desc, md5_hash);
+ if (err) {
+ printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
+ goto clear_hash;
+ }
+
+ /* Reset header, and free up the crypto */
+ tcp_put_md5sig_pool();
+ th->check = cksum;
+out:
+ return 0;
+clear_hash:
+ tcp_put_md5sig_pool();
+clear_hash_noput:
+ memset(md5_hash, 0, 16);
+ goto out;
+}
+
+static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ struct sock *sk,
+ struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcphdr *th, int protocol,
+ int tcplen)
+{
+ struct in6_addr *saddr, *daddr;
+
+ if (sk) {
+ saddr = &inet6_sk(sk)->saddr;
+ daddr = &inet6_sk(sk)->daddr;
+ } else {
+ saddr = &inet6_rsk(req)->loc_addr;
+ daddr = &inet6_rsk(req)->rmt_addr;
+ }
+ return tcp_v6_do_calc_md5_hash(md5_hash, key,
+ saddr, daddr,
+ th, protocol, tcplen);
+}
+
+static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
+{
+ __u8 *hash_location = NULL;
+ struct tcp_md5sig_key *hash_expected;
+ struct ipv6hdr *ip6h = skb->nh.ipv6h;
+ struct tcphdr *th = skb->h.th;
+ int length = (th->doff << 2) - sizeof (*th);
+ int genhash;
+ u8 *ptr;
+ u8 newhash[16];
+
+ hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
+
+ /* If the TCP option is too short, we can short cut */
+ if (length < TCPOLEN_MD5SIG)
+ return hash_expected ? 1 : 0;
+
+ /* parse options */
+ ptr = (u8*)(th + 1);
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ switch(opcode) {
+ case TCPOPT_EOL:
+ goto done_opts;
+ case TCPOPT_NOP:
+ length--;
+ continue;
+ default:
+ opsize = *ptr++;
+ if (opsize < 2 || opsize > length)
+ goto done_opts;
+ if (opcode == TCPOPT_MD5SIG) {
+ hash_location = ptr;
+ goto done_opts;
+ }
+ }
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+
+done_opts:
+ /* do we have a hash as expected? */
+ if (!hash_expected) {
+ if (!hash_location)
+ return 0;
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash NOT expected but found "
+ "(" NIP6_FMT ", %u)->"
+ "(" NIP6_FMT ", %u)\n",
+ NIP6(ip6h->saddr), ntohs(th->source),
+ NIP6(ip6h->daddr), ntohs(th->dest));
+ }
+ return 1;
+ }
+
+ if (!hash_location) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash expected but NOT found "
+ "(" NIP6_FMT ", %u)->"
+ "(" NIP6_FMT ", %u)\n",
+ NIP6(ip6h->saddr), ntohs(th->source),
+ NIP6(ip6h->daddr), ntohs(th->dest));
+ }
+ return 1;
+ }
+
+ /* check the signature */
+ genhash = tcp_v6_do_calc_md5_hash(newhash,
+ hash_expected,
+ &ip6h->saddr, &ip6h->daddr,
+ th, sk->sk_protocol,
+ skb->len);
+ if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash %s for "
+ "(" NIP6_FMT ", %u)->"
+ "(" NIP6_FMT ", %u)\n",
+ genhash ? "failed" : "mismatch",
+ NIP6(ip6h->saddr), ntohs(th->source),
+ NIP6(ip6h->daddr), ntohs(th->dest));
+ }
+ return 1;
+ }
+ return 0;
+}
+#endif
+
static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.family = AF_INET6,
.obj_size = sizeof(struct tcp6_request_sock),
@@ -527,9 +928,16 @@ static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.send_reset = tcp_v6_send_reset
};
+struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+#ifdef CONFIG_TCP_MD5SIG
+ .md5_lookup = tcp_v6_reqsk_md5_lookup,
+#endif
+};
+
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
.twsk_unique = tcp_twsk_unique,
+ .twsk_destructor= tcp_twsk_destructor,
};
static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
@@ -566,11 +974,15 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
return 0;
}
-static void tcp_v6_send_reset(struct sk_buff *skb)
+static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = skb->h.th, *t1;
struct sk_buff *buff;
struct flowi fl;
+ int tot_len = sizeof(*th);
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+#endif
if (th->rst)
return;
@@ -578,25 +990,35 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
return;
+#ifdef CONFIG_TCP_MD5SIG
+ if (sk)
+ key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
+ else
+ key = NULL;
+
+ if (key)
+ tot_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
/*
* We need to grab some memory, and put together an RST,
* and then put it into the queue to be sent.
*/
- buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
+ buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
GFP_ATOMIC);
if (buff == NULL)
return;
- skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
+ skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
- t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
+ t1 = (struct tcphdr *) skb_push(buff, tot_len);
/* Swap the send and the receive. */
memset(t1, 0, sizeof(*t1));
t1->dest = th->source;
t1->source = th->dest;
- t1->doff = sizeof(*t1)/4;
+ t1->doff = tot_len / 4;
t1->rst = 1;
if(th->ack) {
@@ -607,6 +1029,22 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
+ skb->len - (th->doff<<2));
}
+#ifdef CONFIG_TCP_MD5SIG
+ if (key) {
+ u32 *opt = (u32*)(t1 + 1);
+ opt[0] = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
+ key,
+ &skb->nh.ipv6h->daddr,
+ &skb->nh.ipv6h->saddr,
+ t1, IPPROTO_TCP,
+ tot_len);
+ }
+#endif
+
buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
memset(&fl, 0, sizeof(fl));
@@ -637,15 +1075,37 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
kfree_skb(buff);
}
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
+static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
+ struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
{
struct tcphdr *th = skb->h.th, *t1;
struct sk_buff *buff;
struct flowi fl;
int tot_len = sizeof(struct tcphdr);
+ u32 *topt;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+ struct tcp_md5sig_key tw_key;
+#endif
+
+#ifdef CONFIG_TCP_MD5SIG
+ if (!tw && skb->sk) {
+ key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
+ } else if (tw && tw->tw_md5_keylen) {
+ tw_key.key = tw->tw_md5_key;
+ tw_key.keylen = tw->tw_md5_keylen;
+ key = &tw_key;
+ } else {
+ key = NULL;
+ }
+#endif
if (ts)
tot_len += TCPOLEN_TSTAMP_ALIGNED;
+#ifdef CONFIG_TCP_MD5SIG
+ if (key)
+ tot_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
GFP_ATOMIC);
@@ -665,15 +1125,29 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
t1->ack_seq = htonl(ack);
t1->ack = 1;
t1->window = htons(win);
+
+ topt = (u32*)(t1 + 1);
if (ts) {
- u32 *ptr = (u32*)(t1 + 1);
- *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
- *ptr++ = htonl(tcp_time_stamp);
- *ptr = htonl(ts);
+ *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+ *topt++ = htonl(tcp_time_stamp);
+ *topt = htonl(ts);
}
+#ifdef CONFIG_TCP_MD5SIG
+ if (key) {
+ *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
+ tcp_v6_do_calc_md5_hash((__u8 *)topt,
+ key,
+ &skb->nh.ipv6h->daddr,
+ &skb->nh.ipv6h->saddr,
+ t1, IPPROTO_TCP,
+ tot_len);
+ }
+#endif
+
buff->csum = csum_partial((char *)t1, tot_len, 0);
memset(&fl, 0, sizeof(fl));
@@ -704,9 +1178,9 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
- const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+ struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcptw->tw_ts_recent);
@@ -715,7 +1189,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
{
- tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
+ tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
}
@@ -786,6 +1260,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (req == NULL)
goto drop;
+#ifdef CONFIG_TCP_MD5SIG
+ tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
+#endif
+
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
@@ -844,6 +1322,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *newtp;
struct sock *newsk;
struct ipv6_txoptions *opt;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+#endif
if (skb->protocol == htons(ETH_P_IP)) {
/*
@@ -874,6 +1355,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
+#ifdef CONFIG_TCP_MD5SIG
+ newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
+#endif
+
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
@@ -1008,6 +1493,23 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
+#ifdef CONFIG_TCP_MD5SIG
+ /* Copy over the MD5 key from the original socket */
+ if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
+ /* We're using one, so create a matching key
+ * on the newsk structure. If we fail to get
+ * memory, then we end up not copying the key
+ * across. Shucks.
+ */
+ char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
+ if (newkey) {
+ memcpy(newkey, key->key, key->keylen);
+ tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
+ newkey, key->keylen);
+ }
+ }
+#endif
+
__inet6_hash(&tcp_hashinfo, newsk);
inet_inherit_port(&tcp_hashinfo, sk, newsk);
@@ -1067,6 +1569,11 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
+#ifdef CONFIG_TCP_MD5SIG
+ if (tcp_v6_inbound_md5_hash (sk, skb))
+ goto discard;
+#endif
+
if (sk_filter(sk, skb))
goto discard;
@@ -1132,7 +1639,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
- tcp_v6_send_reset(skb);
+ tcp_v6_send_reset(sk, skb);
discard:
if (opt_skb)
__kfree_skb(opt_skb);
@@ -1257,7 +1764,7 @@ no_tcp_socket:
bad_packet:
TCP_INC_STATS_BH(TCP_MIB_INERRS);
} else {
- tcp_v6_send_reset(skb);
+ tcp_v6_send_reset(NULL, skb);
}
discard_it:
@@ -1336,6 +1843,15 @@ static struct inet_connection_sock_af_ops ipv6_specific = {
#endif
};
+static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
+#ifdef CONFIG_TCP_MD5SIG
+ .md5_lookup = tcp_v6_md5_lookup,
+ .calc_md5_hash = tcp_v6_calc_md5_hash,
+ .md5_add = tcp_v6_md5_add_func,
+ .md5_parse = tcp_v6_parse_md5_keys,
+#endif
+};
+
/*
* TCP over IPv4 via INET6 API
*/
@@ -1358,6 +1874,15 @@ static struct inet_connection_sock_af_ops ipv6_mapped = {
#endif
};
+static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
+#ifdef CONFIG_TCP_MD5SIG
+ .md5_lookup = tcp_v4_md5_lookup,
+ .calc_md5_hash = tcp_v4_calc_md5_hash,
+ .md5_add = tcp_v6_md5_add_func,
+ .md5_parse = tcp_v6_parse_md5_keys,
+#endif
+};
+
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
@@ -1397,6 +1922,10 @@ static int tcp_v6_init_sock(struct sock *sk)
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+#ifdef CONFIG_TCP_MD5SIG
+ tp->af_specific = &tcp_sock_ipv6_specific;
+#endif
+
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -1407,6 +1936,11 @@ static int tcp_v6_init_sock(struct sock *sk)
static int tcp_v6_destroy_sock(struct sock *sk)
{
+#ifdef CONFIG_TCP_MD5SIG
+ /* Clean up the MD5 key list */
+ if (tcp_sk(sk)->md5sig_info)
+ tcp_v6_clear_md5_list(sk);
+#endif
tcp_v4_destroy_sock(sk);
return inet6_destroy_sock(sk);
}