aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@gmail.com>2008-08-30 12:16:35 +0200
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-15 15:49:25 +0200
commitfe55f6d5c0cfec4a710ef6ff63f162b99d5f7842 (patch)
tree4c4f0960d7355647e67f62e30e10c9215c123b65
parentfc7d0c9f2122e8bf58deaf1252b0e750df5b0e91 (diff)
net: use kmemcheck bitfields API for skbuff
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--net/core/skbuff.c8
2 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5fd389162f0..ed6537fc5b4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,6 +15,7 @@
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/cache.h>
@@ -346,6 +347,7 @@ struct sk_buff {
};
};
__u32 priority;
+ kmemcheck_bitfield_begin(flags1);
__u8 local_df:1,
cloned:1,
ip_summed:2,
@@ -356,6 +358,7 @@ struct sk_buff {
ipvs_property:1,
peeked:1,
nf_trace:1;
+ kmemcheck_bitfield_end(flags1);
__be16 protocol;
void (*destructor)(struct sk_buff *skb);
@@ -375,6 +378,8 @@ struct sk_buff {
__u16 tc_verd; /* traffic control verdict */
#endif
#endif
+
+ kmemcheck_bitfield_begin(flags2);
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
@@ -382,6 +387,8 @@ struct sk_buff {
__u8 do_not_encrypt:1;
__u8 requeue:1;
#endif
+ kmemcheck_bitfield_end(flags2);
+
/* 0/13/14 bit hole */
#ifdef CONFIG_NET_DMA
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c2e4fb8f354..f0c4c6ad774 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
@@ -201,6 +202,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
+ kmemcheck_annotate_bitfield(skb, flags1);
+ kmemcheck_annotate_bitfield(skb, flags2);
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
atomic_set(&shinfo->dataref, 1);
@@ -217,6 +220,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
struct sk_buff *child = skb + 1;
atomic_t *fclone_ref = (atomic_t *) (child + 1);
+ kmemcheck_annotate_bitfield(child, flags1);
+ kmemcheck_annotate_bitfield(child, flags2);
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(fclone_ref, 1);
@@ -633,6 +638,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
return NULL;
+
+ kmemcheck_annotate_bitfield(n, flags1);
+ kmemcheck_annotate_bitfield(n, flags2);
n->fclone = SKB_FCLONE_UNAVAILABLE;
}