From d9cc20484e5e48c6a5deb4387c20fd45bfbdde8c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 16 Sep 2007 16:21:16 -0700 Subject: [NET] skbuff: Add skb_cow_head This patch adds an optimised version of skb_cow that avoids the copy if the header can be modified even if the rest of the payload is cloned. This can be used in encapsulating paths where we only need to modify the header. As it is, this can be used in PPPOE and bridging. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93c27f71122..a656cecd373 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1352,6 +1352,22 @@ static inline int skb_clone_writable(struct sk_buff *skb, int len) skb_headroom(skb) + len <= skb->hdr_len; } +static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, + int cloned) +{ + int delta = 0; + + if (headroom < NET_SKB_PAD) + headroom = NET_SKB_PAD; + if (headroom > skb_headroom(skb)) + delta = headroom - skb_headroom(skb); + + if (delta || cloned) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} + /** * skb_cow - copy header of skb when it is required * @skb: buffer to cow @@ -1366,16 +1382,22 @@ static inline int skb_clone_writable(struct sk_buff *skb, int len) */ static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) { - int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - - skb_headroom(skb); - - if (delta < 0) - delta = 0; + return __skb_cow(skb, headroom, skb_cloned(skb)); +} - if (delta || skb_cloned(skb)) - return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & - ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); - return 0; +/** + * skb_cow_head - skb_cow but only making the head writable + * @skb: buffer to cow + * @headroom: needed headroom + * + * This function is identical to skb_cow except that we replace the + * skb_cloned check by skb_header_cloned. It should be used when + * you only need to push on some header and do not need to modify + * the data. + */ +static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_header_cloned(skb)); } /** -- cgit v1.2.3 From fa890d586cc127ce72597ba0a909bfecf784e10c Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Sun, 16 Sep 2007 17:01:26 -0600 Subject: Fix non-ISA link error in drivers/scsi/advansys.c When CONFIG_ISA is disabled, the isa_driver support will not be compiled in. Define stubs so that we don't get link-time errors. Signed-off-by: Matthew Wilcox Signed-off-by: Linus Torvalds --- include/linux/isa.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/isa.h b/include/linux/isa.h index 1b855335cb1..b0270e3814c 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h @@ -22,7 +22,18 @@ struct isa_driver { #define to_isa_driver(x) container_of((x), struct isa_driver, driver) +#ifdef CONFIG_ISA int isa_register_driver(struct isa_driver *, unsigned int); void isa_unregister_driver(struct isa_driver *); +#else +static inline int isa_register_driver(struct isa_driver *d, unsigned int i) +{ + return 0; +} + +static inline void isa_unregister_driver(struct isa_driver *d) +{ +} +#endif #endif /* __LINUX_ISA_H */ -- cgit v1.2.3 From 735de2230f09741077a645a913de0a04b10208bf Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Tue, 18 Sep 2007 22:46:44 -0700 Subject: Convert uid hash to hlist Surprisingly, but (spotted by Alexey Dobriyan) the uid hash still uses list_heads, thus occupying twice as much place as it could. Convert it to hlist_heads. Signed-off-by: Pavel Emelyanov Signed-off-by: Alexey Dobriyan Acked-by: Serge Hallyn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 2 +- include/linux/user_namespace.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index f4e324ed2e4..6239bc2c2ba 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -593,7 +593,7 @@ struct user_struct { #endif /* Hash table maintenance information */ - struct list_head uidhash_list; + struct hlist_node uidhash_node; uid_t uid; }; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 1101b0ce878..b5f41d4c2ee 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -11,7 +11,7 @@ struct user_namespace { struct kref kref; - struct list_head uidhash_table[UIDHASH_SZ]; + struct hlist_head uidhash_table[UIDHASH_SZ]; struct user_struct *root_user; }; -- cgit v1.2.3 From 28f300d23674fa01ae747c66ce861d4ee6aebe8c Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Tue, 18 Sep 2007 22:46:45 -0700 Subject: Fix user namespace exiting OOPs It turned out, that the user namespace is released during the do_exit() in exit_task_namespaces(), but the struct user_struct is released only during the put_task_struct(), i.e. MUCH later. On debug kernels with poisoned slabs this will cause the oops in uid_hash_remove() because the head of the chain, which resides inside the struct user_namespace, will be already freed and poisoned. Since the uid hash itself is required only when someone can search it, i.e. when the namespace is alive, we can safely unhash all the user_struct-s from it during the namespace exiting. The subsequent free_uid() will complete the user_struct destruction. For example simple program #include char stack[2 * 1024 * 1024]; int f(void *foo) { return 0; } int main(void) { clone(f, stack + 1 * 1024 * 1024, 0x10000000, 0); return 0; } run on kernel with CONFIG_USER_NS turned on will oops the kernel immediately. This was spotted during OpenVZ kernel testing. Signed-off-by: Pavel Emelyanov Signed-off-by: Alexey Dobriyan Acked-by: "Serge E. Hallyn" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 6239bc2c2ba..5445eaec690 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1472,6 +1472,7 @@ static inline struct user_struct *get_uid(struct user_struct *u) } extern void free_uid(struct user_struct *); extern void switch_uid(struct user_struct *); +extern void release_uids(struct user_namespace *ns); #include -- cgit v1.2.3 From 480eccf9ae1073b87bb4fe118971fbf134a5bc61 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Tue, 18 Sep 2007 22:46:47 -0700 Subject: Fix NUMA Memory Policy Reference Counting This patch proposes fixes to the reference counting of memory policy in the page allocation paths and in show_numa_map(). Extracted from my "Memory Policy Cleanups and Enhancements" series as stand-alone. Shared policy lookup [shmem] has always added a reference to the policy, but this was never unrefed after page allocation or after formatting the numa map data. Default system policy should not require additional ref counting, nor should the current task's task policy. However, show_numa_map() calls get_vma_policy() to examine what may be [likely is] another task's policy. The latter case needs protection against freeing of the policy. This patch adds a reference count to a mempolicy returned by get_vma_policy() when the policy is a vma policy or another task's mempolicy. Again, shared policy is already reference counted on lookup. A matching "unref" [__mpol_free()] is performed in alloc_page_vma() for shared and vma policies, and in show_numa_map() for shared and another task's mempolicy. We can call __mpol_free() directly, saving an admittedly inexpensive inline NULL test, because we know we have a non-NULL policy. Handling policy ref counts for hugepages is a bit trickier. huge_zonelist() returns a zone list that might come from a shared or vma 'BIND policy. In this case, we should hold the reference until after the huge page allocation in dequeue_hugepage(). The patch modifies huge_zonelist() to return a pointer to the mempolicy if it needs to be unref'd after allocation. Kernel Build [16cpu, 32GB, ia64] - average of 10 runs: w/o patch w/ refcount patch Avg Std Devn Avg Std Devn Real: 100.59 0.38 100.63 0.43 User: 1209.60 0.37 1209.91 0.31 System: 81.52 0.42 81.64 0.34 Signed-off-by: Lee Schermerhorn Acked-by: Andi Kleen Cc: Christoph Lameter Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5bdd656e88c..a020eb2d4e2 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -159,7 +159,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p); extern struct mempolicy default_policy; extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, - unsigned long addr, gfp_t gfp_flags); + unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol); extern unsigned slab_node(struct mempolicy *policy); extern enum zone_type policy_zone; @@ -256,7 +256,7 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p) #define set_cpuset_being_rebound(x) do {} while (0) static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, - unsigned long addr, gfp_t gfp_flags) + unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol) { return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); } -- cgit v1.2.3