aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-12-04 15:59:07 +1100
committerPaul Mackerras <paulus@samba.org>2006-12-04 15:59:07 +1100
commit79acbb3ff2d8095b692e1502b9eb2ccec348de26 (patch)
tree6ab773e5a8f9de2cd6443362b21d0d6fffe3b35e /net/ipv4/tcp.c
parent19a79859e168640f8e16d7b216d211c1c52b687a (diff)
parent2b5f6dcce5bf94b9b119e9ed8d537098ec61c3d2 (diff)
Merge branch 'linux-2.6' into for-linus
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c167
1 files changed, 154 insertions, 13 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 66e9a729f6d..090c690627e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -258,6 +258,7 @@
#include <linux/bootmem.h>
#include <linux/cache.h>
#include <linux/err.h>
+#include <linux/crypto.h>
#include <net/icmp.h>
#include <net/tcp.h>
@@ -462,11 +463,12 @@ static inline int forced_push(struct tcp_sock *tp)
static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb)
{
- skb->csum = 0;
- TCP_SKB_CB(skb)->seq = tp->write_seq;
- TCP_SKB_CB(skb)->end_seq = tp->write_seq;
- TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
- TCP_SKB_CB(skb)->sacked = 0;
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+
+ skb->csum = 0;
+ tcb->seq = tcb->end_seq = tp->write_seq;
+ tcb->flags = TCPCB_FLAG_ACK;
+ tcb->sacked = 0;
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
sk_charge_skb(sk, skb);
@@ -1942,6 +1944,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
}
break;
+#ifdef CONFIG_TCP_MD5SIG
+ case TCP_MD5SIG:
+ /* Read the IP->Key mappings from userspace */
+ err = tp->af_specific->md5_parse(sk, optval, optlen);
+ break;
+#endif
+
default:
err = -ENOPROTOOPT;
break;
@@ -2154,7 +2163,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
struct tcphdr *th;
unsigned thlen;
unsigned int seq;
- unsigned int delta;
+ __be32 delta;
unsigned int oldlen;
unsigned int len;
@@ -2207,7 +2216,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
do {
th->fin = th->psh = 0;
- th->check = ~csum_fold(th->check + delta);
+ th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
+ (__force u32)delta));
if (skb->ip_summed != CHECKSUM_PARTIAL)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
@@ -2221,7 +2231,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
} while (skb->next);
delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
- th->check = ~csum_fold(th->check + delta);
+ th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
+ (__force u32)delta));
if (skb->ip_summed != CHECKSUM_PARTIAL)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
@@ -2231,6 +2242,135 @@ out:
}
EXPORT_SYMBOL(tcp_tso_segment);
+#ifdef CONFIG_TCP_MD5SIG
+static unsigned long tcp_md5sig_users;
+static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
+
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
+ if (p) {
+ if (p->md5_desc.tfm)
+ crypto_free_hash(p->md5_desc.tfm);
+ kfree(p);
+ p = NULL;
+ }
+ }
+ free_percpu(pool);
+}
+
+void tcp_free_md5sig_pool(void)
+{
+ struct tcp_md5sig_pool **pool = NULL;
+
+ spin_lock(&tcp_md5sig_pool_lock);
+ if (--tcp_md5sig_users == 0) {
+ pool = tcp_md5sig_pool;
+ tcp_md5sig_pool = NULL;
+ }
+ spin_unlock(&tcp_md5sig_pool_lock);
+ if (pool)
+ __tcp_free_md5sig_pool(pool);
+}
+
+EXPORT_SYMBOL(tcp_free_md5sig_pool);
+
+static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
+{
+ int cpu;
+ struct tcp_md5sig_pool **pool;
+
+ pool = alloc_percpu(struct tcp_md5sig_pool *);
+ if (!pool)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ struct tcp_md5sig_pool *p;
+ struct crypto_hash *hash;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto out_free;
+ *per_cpu_ptr(pool, cpu) = p;
+
+ hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (!hash || IS_ERR(hash))
+ goto out_free;
+
+ p->md5_desc.tfm = hash;
+ }
+ return pool;
+out_free:
+ __tcp_free_md5sig_pool(pool);
+ return NULL;
+}
+
+struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
+{
+ struct tcp_md5sig_pool **pool;
+ int alloc = 0;
+
+retry:
+ spin_lock(&tcp_md5sig_pool_lock);
+ pool = tcp_md5sig_pool;
+ if (tcp_md5sig_users++ == 0) {
+ alloc = 1;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ } else if (!pool) {
+ tcp_md5sig_users--;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ cpu_relax();
+ goto retry;
+ } else
+ spin_unlock(&tcp_md5sig_pool_lock);
+
+ if (alloc) {
+ /* we cannot hold spinlock here because this may sleep. */
+ struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
+ spin_lock(&tcp_md5sig_pool_lock);
+ if (!p) {
+ tcp_md5sig_users--;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ return NULL;
+ }
+ pool = tcp_md5sig_pool;
+ if (pool) {
+ /* oops, it has already been assigned. */
+ spin_unlock(&tcp_md5sig_pool_lock);
+ __tcp_free_md5sig_pool(p);
+ } else {
+ tcp_md5sig_pool = pool = p;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ }
+ }
+ return pool;
+}
+
+EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+
+struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+{
+ struct tcp_md5sig_pool **p;
+ spin_lock(&tcp_md5sig_pool_lock);
+ p = tcp_md5sig_pool;
+ if (p)
+ tcp_md5sig_users++;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ return (p ? *per_cpu_ptr(p, cpu) : NULL);
+}
+
+EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+
+void __tcp_put_md5sig_pool(void) {
+ __tcp_free_md5sig_pool(tcp_md5sig_pool);
+}
+
+EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+#endif
+
extern void __skb_cb_too_small_for_tcp(int, int);
extern struct tcp_congestion_ops tcp_reno;
@@ -2270,7 +2410,7 @@ void __init tcp_init(void)
thash_entries,
(num_physpages >= 128 * 1024) ?
13 : 15,
- HASH_HIGHMEM,
+ 0,
&tcp_hashinfo.ehash_size,
NULL,
0);
@@ -2286,7 +2426,7 @@ void __init tcp_init(void)
tcp_hashinfo.ehash_size,
(num_physpages >= 128 * 1024) ?
13 : 15,
- HASH_HIGHMEM,
+ 0,
&tcp_hashinfo.bhash_size,
NULL,
64 * 1024);
@@ -2316,9 +2456,10 @@ void __init tcp_init(void)
sysctl_max_syn_backlog = 128;
}
- sysctl_tcp_mem[0] = 768 << order;
- sysctl_tcp_mem[1] = 1024 << order;
- sysctl_tcp_mem[2] = 1536 << order;
+ /* Allow no more than 3/4 kernel memory (usually less) allocated to TCP */
+ sysctl_tcp_mem[0] = (1536 / sizeof (struct inet_bind_hashbucket)) << order;
+ sysctl_tcp_mem[1] = sysctl_tcp_mem[0] * 4 / 3;
+ sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);