From 55820ee2f8c767a2833b21bd365e5753f50bd8ce Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 5 Jul 2005 14:08:10 -0700 Subject: [NET]: Fix signedness issues in net/core/filter.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the code to load packet data into a register: k = fentry->k; if (k < 0) { ... } else { u32 _tmp, *p; p = skb_header_pointer(skb, k, 4, &_tmp); if (p != NULL) { A = ntohl(*p); continue; } } skb_header_pointer checks if the requested data is within the linear area: int hlen = skb_headlen(skb); if (offset + len <= hlen) return skb->data + offset; When offset is within [INT_MAX-len+1..INT_MAX] the addition will result in a negative number which is <= hlen. I couldn't trigger a crash on my AMD64 with 2GB of memory, but a coworker tried on his x86 machine and it crashed immediately. This patch fixes the check in skb_header_pointer to handle large positive offsets similar to skb_copy_bits. Invalid data can still be accessed using negative offsets (also similar to skb_copy_bits), anyone using negative offsets needs to verify them himself. Thanks to Thomas Vögtle for verifying the problem by crashing his machine and providing me with an Oops. Signed-off-by: Patrick McHardy Acked-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 416a2e4024b..fbcb1865197 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1211,7 +1211,7 @@ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, { int hlen = skb_headlen(skb); - if (offset + len <= hlen) + if (hlen - offset >= len) return skb->data + offset; if (skb_copy_bits(skb, offset, buffer, len) < 0) -- cgit v1.2.3 From e176fe8954a5239c24afe79b1001ba3c29511963 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 5 Jul 2005 14:12:44 -0700 Subject: [NET]: Remove unused security member in sk_buff Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 +--- include/linux/tc_ematch/tc_em_meta.h | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fbcb1865197..1e6290f4f81 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -183,7 +183,6 @@ struct skb_shared_info { * @priority: Packet queueing priority * @users: User count - see {datagram,tcp}.c * @protocol: Packet protocol from driver - * @security: Security level of packet * @truesize: Buffer size * @head: Head of buffer * @data: Data head pointer @@ -255,8 +254,7 @@ struct sk_buff { pkt_type, ip_summed; __u32 priority; - unsigned short protocol, - security; + unsigned short protocol; void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_NETFILTER diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index a6b2cc530af..bcb762d9312 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h @@ -45,7 +45,7 @@ enum TCF_META_ID_REALDEV, TCF_META_ID_PRIORITY, TCF_META_ID_PROTOCOL, - TCF_META_ID_SECURITY, + TCF_META_ID_SECURITY, /* obsolete */ TCF_META_ID_PKTTYPE, TCF_META_ID_PKTLEN, TCF_META_ID_DATALEN, -- cgit v1.2.3 From 1cbb3380ef683f742876f48e3739b3df4ea9e168 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 5 Jul 2005 14:13:41 -0700 Subject: [NET]: Reduce size of sk_buff by 4 bytes Reduce local_df to a bit field and ip_summed to a 2 bits field thus saving 13 bits. Move bit fields, packet type, and protocol into the spare area between the priority and the destructor. Saves 4 bytes on both, 32bit and 64bit architectures. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/skbuff.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1e6290f4f81..14b95041349 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -248,17 +248,18 @@ struct sk_buff { data_len, mac_len, csum; - unsigned char local_df, - cloned:1, - nohdr:1, - pkt_type, - ip_summed; __u32 priority; - unsigned short protocol; + __u8 local_df:1, + cloned:1, + ip_summed:2, + nohdr:1; + /* 3 bits spare */ + __u8 pkt_type; + __u16 protocol; void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_NETFILTER - unsigned long nfmark; + unsigned long nfmark; __u32 nfcache; __u32 nfctinfo; struct nf_conntrack *nfct; -- cgit v1.2.3 From bc971dee6ece1fd0d431948924becd9c50e7b778 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 5 Jul 2005 15:03:46 -0700 Subject: [SHAPER]: Switch to spinlocks. Dave, you were right and the sleeping locks in shaper were broken. Markus Kanet noticed this and also tested the patch below that switches locking to spinlocks. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/if_shaper.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h index 004e6f09a6e..68c896a36a3 100644 --- a/include/linux/if_shaper.h +++ b/include/linux/if_shaper.h @@ -23,7 +23,7 @@ struct shaper __u32 shapeclock; unsigned long recovery; /* Time we can next clock a packet out on an empty queue */ - struct semaphore sem; + spinlock_t lock; struct net_device_stats stats; struct net_device *dev; int (*hard_start_xmit) (struct sk_buff *skb, -- cgit v1.2.3 From c1b4a7e69576d65efc31a8cea0714173c2841244 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 5 Jul 2005 15:24:38 -0700 Subject: [TCP]: Move to new TSO segmenting scheme. Make TSO segment transmit size decisions at send time not earlier. The basic scheme is that we try to build as large a TSO frame as possible when pulling in the user data, but the size of the TSO frame output to the card is determined at transmit time. This is guided by tp->xmit_size_goal. It is always set to a multiple of MSS and tells sendmsg/sendpage how large an SKB to try and build. Later, tcp_write_xmit() and tcp_push_one() chop up the packet if necessary and conditions warrant. These routines can also decide to "defer" in order to wait for more ACKs to arrive and thus allow larger TSO frames to be emitted. A general observation is that TSO elongates the pipe, thus requiring a larger congestion window and larger buffering especially at the sender side. Therefore, it is important that applications 1) get a large enough socket send buffer (this is accomplished by our dynamic send buffer expansion code) 2) do large enough writes. Signed-off-by: David S. Miller --- include/linux/tcp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index dfd93d03f5d..e4fd82e4210 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -286,7 +286,7 @@ struct tcp_sock { __u32 max_window; /* Maximal window ever seen from peer */ __u32 pmtu_cookie; /* Last pmtu seen by socket */ __u32 mss_cache; /* Cached effective mss, not including SACKS */ - __u16 mss_cache_std; /* Like mss_cache, but without TSO */ + __u16 xmit_size_goal; /* Goal for segmenting output packets */ __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ __u8 ca_state; /* State of fast-retransmit machine */ __u8 retransmits; /* Number of unrecovered RTO timeouts. */ -- cgit v1.2.3