aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c163
1 files changed, 136 insertions, 27 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c5aca0bb116..0cd71b84e48 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -277,8 +277,7 @@
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
-atomic_t tcp_orphan_count = ATOMIC_INIT(0);
-
+struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
int sysctl_tcp_mem[3] __read_mostly;
@@ -290,9 +289,12 @@ EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);
atomic_t tcp_memory_allocated; /* Current allocated memory. */
-atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
-
EXPORT_SYMBOL(tcp_memory_allocated);
+
+/*
+ * Current number of TCP sockets.
+ */
+struct percpu_counter tcp_sockets_allocated;
EXPORT_SYMBOL(tcp_sockets_allocated);
/*
@@ -520,8 +522,12 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{
struct tcp_splice_state *tss = rd_desc->arg.data;
+ int ret;
- return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
+ ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags);
+ if (ret > 0)
+ rd_desc->count -= ret;
+ return ret;
}
static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
@@ -529,6 +535,7 @@ static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
/* Store TCP splice context information in read_descriptor_t. */
read_descriptor_t rd_desc = {
.arg.data = tss,
+ .count = tss->len,
};
return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
@@ -578,10 +585,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
else if (!ret) {
if (spliced)
break;
- if (flags & SPLICE_F_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
@@ -613,11 +616,13 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
tss.len -= ret;
spliced += ret;
+ if (!timeo)
+ break;
release_sock(sk);
lock_sock(sk);
if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
- (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current))
break;
}
@@ -1315,7 +1320,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
- __get_cpu_var(softnet_data).net_dma) {
+ dma_find_channel(DMA_MEMCPY)) {
preempt_enable_no_resched();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1525,7 +1530,7 @@ do_prequeue:
if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = get_softnet_dma();
+ tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1630,7 +1635,6 @@ skip_copy:
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
- dma_chan_put(tp->ucopy.dma_chan);
tp->ucopy.dma_chan = NULL;
}
if (tp->ucopy.pinned_list) {
@@ -1680,7 +1684,7 @@ void tcp_set_state(struct sock *sk, int state)
inet_put_port(sk);
/* fall through */
default:
- if (oldstate==TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
}
@@ -1690,7 +1694,7 @@ void tcp_set_state(struct sock *sk, int state)
sk->sk_state = state;
#ifdef STATE_TRACE
- SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
+ SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
#endif
}
EXPORT_SYMBOL_GPL(tcp_set_state);
@@ -1834,7 +1838,6 @@ adjudge_to_death:
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
- atomic_inc(sk->sk_prot->orphan_count);
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
@@ -1847,6 +1850,8 @@ adjudge_to_death:
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
goto out;
@@ -1885,9 +1890,11 @@ adjudge_to_death:
}
}
if (sk->sk_state != TCP_CLOSE) {
+ int orphan_count = percpu_counter_read_positive(
+ sk->sk_prot->orphan_count);
+
sk_mem_reclaim(sk);
- if (tcp_too_many_orphans(sk,
- atomic_read(sk->sk_prot->orphan_count))) {
+ if (tcp_too_many_orphans(sk, orphan_count)) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
"sockets\n");
@@ -2382,7 +2389,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
unsigned int seq;
__be32 delta;
unsigned int oldlen;
- unsigned int len;
+ unsigned int mss;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
@@ -2398,10 +2405,13 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
oldlen = (u16)~skb->len;
__skb_pull(skb, thlen);
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
- int mss;
if (unlikely(type &
~(SKB_GSO_TCPV4 |
@@ -2412,7 +2422,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
goto out;
- mss = skb_shinfo(skb)->gso_size;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
@@ -2423,8 +2432,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
if (IS_ERR(segs))
goto out;
- len = skb_shinfo(skb)->gso_size;
- delta = htonl(oldlen + (thlen + len));
+ delta = htonl(oldlen + (thlen + mss));
skb = segs;
th = tcp_hdr(skb);
@@ -2440,7 +2448,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
csum_fold(csum_partial(skb_transport_header(skb),
thlen, skb->csum));
- seq += len;
+ seq += mss;
skb = skb->next;
th = tcp_hdr(skb);
@@ -2461,6 +2469,105 @@ out:
}
EXPORT_SYMBOL(tcp_tso_segment);
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+ struct sk_buff **pp = NULL;
+ struct sk_buff *p;
+ struct tcphdr *th;
+ struct tcphdr *th2;
+ unsigned int thlen;
+ unsigned int flags;
+ unsigned int total;
+ unsigned int mss = 1;
+ int flush = 1;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ goto out;
+
+ th = tcp_hdr(skb);
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ goto out;
+
+ if (!pskb_may_pull(skb, thlen))
+ goto out;
+
+ th = tcp_hdr(skb);
+ __skb_pull(skb, thlen);
+
+ flags = tcp_flag_word(th);
+
+ for (; (p = *head); head = &p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ th2 = tcp_hdr(p);
+
+ if (th->source != th2->source || th->dest != th2->dest) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ goto found;
+ }
+
+ goto out_check_final;
+
+found:
+ flush = NAPI_GRO_CB(p)->flush;
+ flush |= flags & TCP_FLAG_CWR;
+ flush |= (flags ^ tcp_flag_word(th2)) &
+ ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
+ flush |= th->ack_seq != th2->ack_seq || th->window != th2->window;
+ flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
+
+ total = p->len;
+ mss = skb_shinfo(p)->gso_size;
+
+ flush |= skb->len > mss || skb->len <= 0;
+ flush |= ntohl(th2->seq) + total != ntohl(th->seq);
+
+ if (flush || skb_gro_receive(head, skb)) {
+ mss = 1;
+ goto out_check_final;
+ }
+
+ p = *head;
+ th2 = tcp_hdr(p);
+ tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
+
+out_check_final:
+ flush = skb->len < mss;
+ flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
+ TCP_FLAG_SYN | TCP_FLAG_FIN);
+
+ if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
+ pp = head;
+
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+EXPORT_SYMBOL(tcp_gro_receive);
+
+int tcp_gro_complete(struct sk_buff *skb)
+{
+ struct tcphdr *th = tcp_hdr(skb);
+
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ return 0;
+}
+EXPORT_SYMBOL(tcp_gro_complete);
+
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
static struct tcp_md5sig_pool **tcp_md5sig_pool;
@@ -2650,7 +2757,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
void tcp_done(struct sock *sk)
{
- if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
+ if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE);
@@ -2685,6 +2792,8 @@ void __init tcp_init(void)
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
+ percpu_counter_init(&tcp_sockets_allocated, 0);
+ percpu_counter_init(&tcp_orphan_count, 0);
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
@@ -2707,8 +2816,8 @@ void __init tcp_init(void)
thash_entries ? 0 : 512 * 1024);
tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
- INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
- INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
+ INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
+ INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
}
if (inet_ehash_locks_alloc(&tcp_hashinfo))
panic("TCP: failed to alloc ehash_locks");