From 732db659b83579b922c18dee9123e1529b5fb5d2 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Thu, 1 Sep 2005 17:40:26 -0700 Subject: [IPVS]: "extern inline" -> "static inline" "extern inline" doesn't make much sense. Signed-off-by: Adrian Bunk Signed-off-by: David S. Miller --- include/net/ip_vs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/net') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 7a3c43711a1..e426641c519 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -958,7 +958,7 @@ static __inline__ int ip_vs_todrop(void) */ #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) -extern __inline__ char ip_vs_fwd_tag(struct ip_vs_conn *cp) +static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) { char fwd; -- cgit v1.2.3 From d80d99d643090c3cf2b1f9fb3fadd1256f7e384f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 1 Sep 2005 17:48:23 -0700 Subject: [NET]: Add sk_stream_wmem_schedule This patch introduces sk_stream_wmem_schedule as a short-hand for the sk_forward_alloc checking on egress. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/net/sock.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'include/net') diff --git a/include/net/sock.h b/include/net/sock.h index 312cb25cbd1..e51e626e9af 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) sk_stream_mem_schedule(sk, skb->truesize, 1); } +static inline int sk_stream_wmem_schedule(struct sock *sk, int size) +{ + return size <= sk->sk_forward_alloc || + sk_stream_mem_schedule(sk, size, 0); +} + /* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming @@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, skb = alloc_skb_fclone(size + hdr_len, gfp); if (skb) { skb->truesize += mem; - if (sk->sk_forward_alloc >= (int)skb->truesize || - sk_stream_mem_schedule(sk, skb->truesize, 0)) { + if (sk_stream_wmem_schedule(sk, skb->truesize)) { skb_reserve(skb, hdr_len); return skb; } @@ -1227,8 +1232,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) { struct page *page = NULL; - if (sk->sk_forward_alloc >= (int)PAGE_SIZE || - sk_stream_mem_schedule(sk, PAGE_SIZE, 0)) + if (sk_stream_wmem_schedule(sk, PAGE_SIZE)) page = alloc_pages(sk->sk_allocation, 0); else { sk->sk_prot->enter_memory_pressure(); -- cgit v1.2.3 From ef015786152adaff5a6a8bf0c8ea2f70cee8059d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 1 Sep 2005 17:48:59 -0700 Subject: [TCP]: Fix sk_forward_alloc underflow in tcp_sendmsg I've finally found a potential cause of the sk_forward_alloc underflows that people have been reporting sporadically. When tcp_sendmsg tacks on extra bits to an existing TCP_PAGE we don't check sk_forward_alloc even though a large amount of time may have elapsed since we allocated the page. In the mean time someone could've come along and liberated packets and reclaimed sk_forward_alloc memory. This patch makes tcp_sendmsg check sk_forward_alloc every time as we do in do_tcp_sendpages. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/net/sock.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include/net') diff --git a/include/net/sock.h b/include/net/sock.h index e51e626e9af..cf628261da5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1232,9 +1232,8 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) { struct page *page = NULL; - if (sk_stream_wmem_schedule(sk, PAGE_SIZE)) - page = alloc_pages(sk->sk_allocation, 0); - else { + page = alloc_pages(sk->sk_allocation, 0); + if (!page) { sk->sk_prot->enter_memory_pressure(); sk_stream_moderate_sndbuf(sk); } -- cgit v1.2.3 From 6475be16fd9b3c6746ca4d18959246b13c669ea8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 1 Sep 2005 22:47:01 -0700 Subject: [TCP]: Keep TSO enabled even during loss events. All we need to do is resegment the queue so that we record SACK information accurately. The edges of the SACK blocks guide our resegmenting decisions. With help from Herbert Xu. Signed-off-by: David S. Miller --- include/net/tcp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/net') diff --git a/include/net/tcp.h b/include/net/tcp.h index d6bcf1317a6..97af77c4d09 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -454,6 +454,7 @@ extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern void tcp_xmit_retransmit_queue(struct sock *); extern void tcp_simple_retransmit(struct sock *); extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); +extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); extern void tcp_send_probe0(struct sock *); extern void tcp_send_partial(struct sock *); -- cgit v1.2.3