diff options
author | Chris Leech <christopher.leech@intel.com> | 2007-03-08 09:57:36 -0800 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2007-07-11 16:10:53 -0700 |
commit | 2b1244a43be97f504494b557a7f7a65fe0d00dba (patch) | |
tree | d3be36597917dbbae664fc6eb97dbe876c5e44e3 /net | |
parent | 72d0b7a81d60f5e64ee7197bc190b9b3265f99dd (diff) |
I/OAT: Only offload copies for TCP when there will be a context switch
The performance wins come with having the DMA copy engine doing the copies
in parallel with the context switch. If there is enough data ready on the
socket at recv time just use a regular copy.
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 450f44bb2c8..0eb7cf1002c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1116,6 +1116,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, long timeo; struct task_struct *user_recv = NULL; int copied_early = 0; + int available = 0; + struct sk_buff *skb; lock_sock(sk); @@ -1142,7 +1144,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, #ifdef CONFIG_NET_DMA tp->ucopy.dma_chan = NULL; preempt_disable(); - if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && + skb = skb_peek_tail(&sk->sk_receive_queue); + if (skb) + available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); + if ((available < target) && + (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { preempt_enable_no_resched(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); @@ -1151,7 +1157,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, #endif do { - struct sk_buff *skb; u32 offset; /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ @@ -1439,7 +1444,6 @@ skip_copy: #ifdef CONFIG_NET_DMA if (tp->ucopy.dma_chan) { - struct sk_buff *skb; dma_cookie_t done, used; dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); |