aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-frv/ftrace.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h29
-rw-r--r--include/asm-m32r/ftrace.h1
-rw-r--r--include/asm-mn10300/ftrace.h1
-rw-r--r--include/linux/async_tx.h9
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/dmaengine.h30
-rw-r--r--include/linux/dw_dmac.h19
-rw-r--r--include/linux/ftrace.h244
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/gfp.h1
-rw-r--r--include/linux/hardirq.h73
-rw-r--r--include/linux/hid.h23
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/irqflags.h8
-rw-r--r--include/linux/jbd.h2
-rw-r--r--include/linux/kallsyms.h15
-rw-r--r--include/linux/kernel.h150
-rw-r--r--include/linux/kmod.h11
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/memory.h6
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h68
-rw-r--r--include/linux/moduleparam.h10
-rw-r--r--include/linux/pwm.h2
-rw-r--r--include/linux/ring_buffer.h38
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slub_def.h53
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/string.h16
-rw-r--r--include/linux/syscalls.h64
-rw-r--r--include/linux/topology.h11
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/tracepoint.h116
-rw-r--r--include/linux/usb/wusb.h3
-rw-r--r--include/trace/block.h70
-rw-r--r--include/trace/irq.h9
-rw-r--r--include/trace/irq_event_types.h55
-rw-r--r--include/trace/kmemtrace.h75
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/power.h32
-rw-r--r--include/trace/sched.h49
-rw-r--r--include/trace/sched_event_types.h337
-rw-r--r--include/trace/skb.h4
-rw-r--r--include/trace/trace_event_types.h5
-rw-r--r--include/trace/trace_events.h5
-rw-r--r--include/trace/workqueue.h25
54 files changed, 1546 insertions, 327 deletions
diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h
new file mode 100644
index 00000000000..40a8c178f10
--- /dev/null
+++ b/include/asm-frv/ftrace.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a654d724d3b..7fa660fd449 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -61,6 +61,30 @@
#define BRANCH_PROFILE()
#endif
+#ifdef CONFIG_EVENT_TRACER
+#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
+ *(_ftrace_events) \
+ VMLINUX_SYMBOL(__stop_ftrace_events) = .;
+#else
+#define FTRACE_EVENTS()
+#endif
+
+#ifdef CONFIG_TRACING
+#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
+ *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+ VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
+#else
+#define TRACE_PRINTKS()
+#endif
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
+ *(__syscalls_metadata) \
+ VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
+#else
+#define TRACE_SYSCALLS()
+#endif
+
/* .data section */
#define DATA_DATA \
*(.data) \
@@ -86,7 +110,10 @@
*(__verbose) \
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
- BRANCH_PROFILE()
+ BRANCH_PROFILE() \
+ TRACE_PRINTKS() \
+ FTRACE_EVENTS() \
+ TRACE_SYSCALLS()
#define RO_DATA(align) \
. = ALIGN((align)); \
diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h
new file mode 100644
index 00000000000..40a8c178f10
--- /dev/null
+++ b/include/asm-m32r/ftrace.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h
new file mode 100644
index 00000000000..40a8c178f10
--- /dev/null
+++ b/include/asm-mn10300/ftrace.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 45f6297821b..5fc2ef8d97f 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -21,6 +21,15 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+/* on architectures without dma-mapping capabilities we need to ensure
+ * that the asynchronous path compiles away
+ */
+#ifdef CONFIG_HAS_DMA
+#define __async_inline
+#else
+#define __async_inline __always_inline
+#endif
+
/**
* dma_chan_ref - object used to manage dma channels received from the
* dmaengine core.
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88..d960889e92e 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
+
+#include <linux/sysfs.h>
+
struct blk_trace {
int trace_state;
struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
+extern struct attribute_group blk_trace_attr_group;
+
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 4316a546beb..665fa70e409 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -365,7 +365,10 @@ int cgroup_task_count(const struct cgroup *cgrp);
/* Return true if cgrp is a descendant of the task's cgroup */
int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
-/* Control Group subsystem type. See Documentation/cgroups.txt for details */
+/*
+ * Control Group subsystem type.
+ * See Documentation/cgroups/cgroups.txt for details
+ */
struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9723edd6455..f2ded21f9a3 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -193,10 +193,10 @@ asmlinkage ssize_t compat_sys_writev(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
const struct compat_iovec __user *vec,
- unsigned long vlen, u32 pos_high, u32 pos_low);
+ unsigned long vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
const struct compat_iovec __user *vec,
- unsigned long vlen, u32 pos_high, u32 pos_low);
+ unsigned long vlen, u32 pos_low, u32 pos_high);
int compat_do_execve(char * filename, compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs * regs);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d95da1020f1..6faa7e549de 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -68,6 +68,7 @@ struct ftrace_branch_data {
unsigned long miss;
unsigned long hit;
};
+ unsigned long miss_hit[2];
};
};
@@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
.line = __LINE__, \
}; \
______r = !!(cond); \
- if (______r) \
- ______f.hit++; \
- else \
- ______f.miss++; \
+ ______f.miss_hit[______r]++; \
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index af0e01d4c66..eb5c2ba2f81 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
struct dentry *debugfs_create_blob(const char *name, mode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
+
+bool debugfs_initialized(void);
+
#else
#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline bool debugfs_initialized(void)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1956c8d46d3..2e2aa3df170 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,9 +23,6 @@
#include <linux/device.h>
#include <linux/uio.h>
-#include <linux/kref.h>
-#include <linux/completion.h>
-#include <linux/rcupdate.h>
#include <linux/dma-mapping.h>
/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
+ * @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
struct dma_device {
unsigned int chancnt;
+ unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
}
#endif
+#ifdef CONFIG_ASYNC_TX_DMA
+#define async_dmaengine_get() dmaengine_get()
+#define async_dmaengine_put() dmaengine_put()
+#define async_dma_find_channel(type) dma_find_channel(type)
+#else
+static inline void async_dmaengine_get(void)
+{
+}
+static inline void async_dmaengine_put(void)
+{
+}
+static inline struct dma_chan *
+async_dma_find_channel(enum dma_transaction_type type)
+{
+ return NULL;
+}
+#endif
+
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
set_bit(tx_type, dstp->bits);
}
+#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
+static inline void
+__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ clear_bit(tx_type, dstp->bits);
+}
+
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
{
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde247f..c8aad713a04 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
+/* DMA API extensions */
+struct dw_cyclic_desc {
+ struct dw_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
#endif /* DW_DMAC_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a7f8134c594..015a3d22cf7 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
-#include <linux/linkage.h>
-#include <linux/fs.h>
-#include <linux/ktime.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/trace_clock.h>
#include <linux/kallsyms.h>
+#include <linux/linkage.h>
#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
loff_t *ppos);
#endif
+struct ftrace_func_command {
+ struct list_head list;
+ char *name;
+ int (*func)(char *func, char *cmd,
+ char *params, int enable);
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
-/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
-#include <asm/ftrace.h>
+
+int ftrace_arch_code_modify_prepare(void);
+int ftrace_arch_code_modify_post_process(void);
+
+struct seq_file;
+
+struct ftrace_probe_ops {
+ void (*func)(unsigned long ip,
+ unsigned long parent_ip,
+ void **data);
+ int (*callback)(unsigned long ip, void **data);
+ void (*free)(void **data);
+ int (*print)(struct seq_file *m,
+ unsigned long ip,
+ struct ftrace_probe_ops *ops,
+ void *data);
+};
+
+extern int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data);
+extern void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data);
+extern void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
+extern void unregister_ftrace_function_probe_all(char *glob);
enum {
FTRACE_FL_FREE = (1 << 0),
@@ -110,15 +145,23 @@ enum {
};
struct dyn_ftrace {
- struct list_head list;
- unsigned long ip; /* address of mcount call-site */
- unsigned long flags;
- struct dyn_arch_ftrace arch;
+ union {
+ unsigned long ip; /* address of mcount call-site */
+ struct dyn_ftrace *freelist;
+ };
+ union {
+ unsigned long flags;
+ struct dyn_ftrace *newlist;
+ };
+ struct dyn_arch_ftrace arch;
};
int ftrace_force_update(void);
void ftrace_set_filter(unsigned char *buf, int len, int reset);
+int register_ftrace_command(struct ftrace_func_command *cmd);
+int unregister_ftrace_command(struct ftrace_func_command *cmd);
+
/* defined in arch */
extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
+
+#ifndef FTRACE_ADDR
+#define FTRACE_ADDR ((unsigned long)ftrace_caller)
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
#endif
/**
- * ftrace_make_nop - convert code into top
+ * ftrace_make_nop - convert code into nop
* @mod: module structure if called by module load initialization
* @rec: the mcount call site record
* @addr: the address that the call site should be calling
@@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod,
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
-
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
@@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void);
# define ftrace_disable_daemon() do { } while (0)
# define ftrace_enable_daemon() do { } while (0)
static inline void ftrace_release(void *start, unsigned long size) { }
+static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+ return -EINVAL;
+}
+static inline int unregister_ftrace_command(char *cmd_name)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
}
-#ifdef CONFIG_FRAME_POINTER
-/* TODO: need to fix this for ARM */
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
-# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
-# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
-# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
-# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
-# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
-#else
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 0UL
-# define CALLER_ADDR2 0UL
-# define CALLER_ADDR3 0UL
-# define CALLER_ADDR4 0UL
-# define CALLER_ADDR5 0UL
-# define CALLER_ADDR6 0UL
-#endif
+#ifndef HAVE_ARCH_CALLER_ADDR
+# ifdef CONFIG_FRAME_POINTER
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
+# else
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 0UL
+# define CALLER_ADDR2 0UL
+# define CALLER_ADDR3 0UL
+# define CALLER_ADDR4 0UL
+# define CALLER_ADDR5 0UL
+# define CALLER_ADDR6 0UL
+# endif
+#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_TRACING
-extern int ftrace_dump_on_oops;
-
-extern void tracing_start(void);
-extern void tracing_stop(void);
-extern void ftrace_off_permanent(void);
-
-extern void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
-
-/**
- * ftrace_printk - printf formatting in the ftrace buffer
- * @fmt: the printf format for printing
- *
- * Note: __ftrace_printk is an internal function for ftrace_printk and
- * the @ip is passed in via the ftrace_printk macro.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving ftrace_printks scattered around in
- * your code.
- */
-# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
-extern int
-__ftrace_printk(unsigned long ip, const char *fmt, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern void ftrace_dump(void);
-#else
-static inline void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
-static inline int
-ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
-
-static inline void tracing_start(void) { }
-static inline void tracing_stop(void) { }
-static inline void ftrace_off_permanent(void) { }
-static inline int
-ftrace_printk(const char *fmt, ...)
-{
- return 0;
-}
-static inline void ftrace_dump(void) { }
-#endif
-
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
extern void ftrace_init_module(struct module *mod,
@@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod,
unsigned long *start, unsigned long *end) { }
#endif
-enum {
- POWER_NONE = 0,
- POWER_CSTATE = 1,
- POWER_PSTATE = 2,
-};
-
-struct power_trace {
-#ifdef CONFIG_POWER_TRACER
- ktime_t stamp;
- ktime_t end;
- int type;
- int state;
-#endif
-};
-
-#ifdef CONFIG_POWER_TRACER
-extern void trace_power_start(struct power_trace *it, unsigned int type,
- unsigned int state);
-extern void trace_power_mark(struct power_trace *it, unsigned int type,
- unsigned int state);
-extern void trace_power_end(struct power_trace *it);
-#else
-static inline void trace_power_start(struct power_trace *it, unsigned int type,
- unsigned int state) { }
-static inline void trace_power_mark(struct power_trace *it, unsigned int type,
- unsigned int state) { }
-static inline void trace_power_end(struct power_trace *it) { }
-#endif
-
-
/*
* Structure that defines an entry function trace.
*/
@@ -398,8 +375,7 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long long time,
- unsigned long func, int *depth);
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
extern void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
@@ -514,6 +490,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
+extern int ftrace_dump_on_oops;
+
#endif /* CONFIG_TRACING */
+
+#ifdef CONFIG_HW_BRANCH_TRACER
+
+void trace_hw_branch(u64 from, u64 to);
+void trace_hw_branch_oops(void);
+
+#else /* CONFIG_HW_BRANCH_TRACER */
+
+static inline void trace_hw_branch(u64 from, u64 to) {}
+static inline void trace_hw_branch_oops(void) {}
+
+#endif /* CONFIG_HW_BRANCH_TRACER */
+
+/*
+ * A syscall entry in the ftrace syscalls array.
+ *
+ * @name: name of the syscall
+ * @nb_args: number of parameters it takes
+ * @types: list of types as strings
+ * @args: list of args as strings (args[i] matches types[i])
+ */
+struct syscall_metadata {
+ const char *name;
+ int nb_args;
+ const char **types;
+ const char **args;
+};
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+extern void arch_init_ftrace_syscalls(void);
+extern struct syscall_metadata *syscall_nr_to_meta(int nr);
+extern void start_ftrace_syscalls(void);
+extern void stop_ftrace_syscalls(void);
+extern void ftrace_syscall_enter(struct pt_regs *regs);
+extern void ftrace_syscall_exit(struct pt_regs *regs);
+#else
+static inline void start_ftrace_syscalls(void) { }
+static inline void stop_ftrace_syscalls(void) { }
+static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
+static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
+#endif
+
#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b0..dca7bf8cffe 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
#define _LINUX_FTRACE_IRQ_H
-#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
+#ifdef CONFIG_FTRACE_NMI_ENTER
extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_exit(void);
#else
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index dd20cd78faa..0bbc15f5453 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -4,6 +4,7 @@
#include <linux/mmzone.h>
#include <linux/stddef.h>
#include <linux/linkage.h>
+#include <linux/topology.h>
struct vm_area_struct;
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dd..faa1cf848bc 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
- * The hardirq count can be overridden per architecture, the default is:
+ * The hardirq count can in theory reach the same as NR_IRQS.
+ * In reality, the number of nested IRQS is limited to the stack
+ * size as well. For archs with over 1000 IRQS it is not practical
+ * to expect that they will all nest. We give a max of 10 bits for
+ * hardirq nesting. An arch may choose to give less than 10 bits.
+ * m68k expects it to be 8.
*
- * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
- * - ( bit 28 is the PREEMPT_ACTIVE flag. )
+ * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
+ * - bit 26 is the NMI_MASK
+ * - bit 28 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x0fff0000
+ * HARDIRQ_MASK: 0x03ff0000
+ * NMI_MASK: 0x04000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
+#define NMI_BITS 1
-#ifndef HARDIRQ_BITS
-#define HARDIRQ_BITS 12
+#define MAX_HARDIRQ_BITS 10
-#ifndef MAX_HARDIRQS_PER_CPU
-#define MAX_HARDIRQS_PER_CPU NR_IRQS
+#ifndef HARDIRQ_BITS
+# define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif
-/*
- * The hardirq mask has to be large enough to have space for potentially
- * all IRQ sources in the system nesting on a single CPU.
- */
-#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
-# error HARDIRQ_BITS is too low!
-#endif
+#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
+#error HARDIRQ_BITS too high!
#endif
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
-#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
+#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low!
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
#if defined(CONFIG_PREEMPT)
# define PREEMPT_INATOMIC_BASE kernel_locked()
# define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
*/
extern void irq_exit(void);
-#define nmi_enter() \
- do { \
- ftrace_nmi_enter(); \
- lockdep_off(); \
- rcu_nmi_enter(); \
- __irq_enter(); \
+#define nmi_enter() \
+ do { \
+ ftrace_nmi_enter(); \
+ BUG_ON(in_nmi()); \
+ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ lockdep_off(); \
+ rcu_nmi_enter(); \
+ trace_hardirq_enter(); \
} while (0)
-#define nmi_exit() \
- do { \
- __irq_exit(); \
- rcu_nmi_exit(); \
- lockdep_on(); \
- ftrace_nmi_exit(); \
+#define nmi_exit() \
+ do { \
+ trace_hardirq_exit(); \
+ rcu_nmi_exit(); \
+ lockdep_on(); \
+ BUG_ON(!in_nmi()); \
+ sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ ftrace_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fa8ee9cef7b..a72876e4358 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -270,6 +270,7 @@ struct hid_item {
#define HID_QUIRK_INVERT 0x00000001
#define HID_QUIRK_NOTOUCH 0x00000002
+#define HID_QUIRK_IGNORE 0x00000004
#define HID_QUIRK_NOGET 0x00000008
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
@@ -603,12 +604,17 @@ struct hid_ll_driver {
int (*open)(struct hid_device *hdev);
void (*close)(struct hid_device *hdev);
+ int (*power)(struct hid_device *hdev, int level);
+
int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
unsigned int code, int value);
int (*parse)(struct hid_device *hdev);
};
+#define PM_HINT_FULLON 1<<5
+#define PM_HINT_NORMAL 1<<1
+
/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
/* We ignore a few input applications that are not widely used */
#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
void hid_output_report(struct hid_report *report, __u8 *data);
struct hid_device *hid_allocate_device(void);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
/**
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...)
__FILE__ , ## arg)
#endif /* HID_FF */
-#ifdef __KERNEL__
-#ifdef CONFIG_HID_COMPAT
-#define HID_COMPAT_LOAD_DRIVER(name) \
-/* prototype to avoid sparse warning */ \
-extern void hid_compat_##name(void); \
-void hid_compat_##name(void) { } \
-EXPORT_SYMBOL(hid_compat_##name)
-#else
-#define HID_COMPAT_LOAD_DRIVER(name)
-#endif /* HID_COMPAT */
-#define HID_COMPAT_CALL_DRIVER(name) do { \
- extern void hid_compat_##name(void); \
- hid_compat_##name(); \
-} while (0)
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c68bffd182b..ce2c07d99fc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -278,6 +278,11 @@ enum
NR_SOFTIRQS
};
+/* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+extern char *softirq_to_name[NR_SOFTIRQS];
+
/* softirq mask and active fields moved to irq_cpustat_t in
* asm/hardirq.h to get better cache usage. KAO
*/
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 74bde13224c..b02a3f1d46a 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -24,8 +24,8 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
-# define trace_softirq_enter() do { current->softirq_context++; } while (0)
-# define trace_softirq_exit() do { current->softirq_context--; } while (0)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
@@ -38,8 +38,8 @@
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
-# define trace_softirq_enter() do { } while (0)
-# define trace_softirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
# define INIT_TRACE_IRQFLAGS
#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 2c6943152c2..53ae4399da2 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -35,7 +35,7 @@
#define journal_oom_retry 1
/*
- * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds
+ * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
* certain classes of error which can occur due to failed IOs. Under
* normal use we want ext3 to continue after such errors, because
* hardware _can_ fail, but for debugging purposes when running tests on
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index f3fe34391d8..792274269f2 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -13,10 +13,17 @@
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+struct module;
+
#ifdef CONFIG_KALLSYMS
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
+/* Call a function on each kallsyms symbol in the core kernel */
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ unsigned long),
+ void *data);
+
extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);
@@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
return 0;
}
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *,
+ unsigned long),
+ void *data)
+{
+ return 0;
+}
+
static inline int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 556d781e69f..d9e75ec7def 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -242,6 +242,19 @@ extern struct ratelimit_state printk_ratelimit_state;
extern int printk_ratelimit(void);
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+#define printk_once(x...) ({ \
+ static int __print_once = 1; \
+ \
+ if (__print_once) { \
+ __print_once = 0; \
+ printk(x); \
+ } \
+})
+
void log_buf_kexec_setup(void);
#else
static inline int vprintk(const char *s, va_list args)
@@ -254,6 +267,10 @@ static inline int printk_ratelimit(void) { return 0; }
static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
unsigned int interval_msec) \
{ return false; }
+
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_once(x...) printk(x)
+
static inline void log_buf_kexec_setup(void)
{
}
@@ -375,6 +392,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
#endif
/*
+ * General tracing related utility functions - trace_printk(),
+ * tracing_on/tracing_off and tracing_start()/tracing_stop
+ *
+ * Use tracing_on/tracing_off when you want to quickly turn on or off
+ * tracing. It simply enables or disables the recording of the trace events.
+ * This also corresponds to the user space debugfs/tracing/tracing_on
+ * file, which gives a means for the kernel and userspace to interact.
+ * Place a tracing_off() in the kernel where you want tracing to end.
+ * From user space, examine the trace, and then echo 1 > tracing_on
+ * to continue tracing.
+ *
+ * tracing_stop/tracing_start has slightly more overhead. It is used
+ * by things like suspend to ram where disabling the recording of the
+ * trace is not enough, but tracing must actually stop because things
+ * like calling smp_processor_id() may crash the system.
+ *
+ * Most likely, you want to use tracing_on/tracing_off.
+ */
+#ifdef CONFIG_RING_BUFFER
+void tracing_on(void);
+void tracing_off(void);
+/* trace_off_permanent stops recording with no way to bring it back */
+void tracing_off_permanent(void);
+int tracing_is_on(void);
+#else
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline void tracing_off_permanent(void) { }
+static inline int tracing_is_on(void) { return 0; }
+#endif
+#ifdef CONFIG_TRACING
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+static inline void __attribute__ ((format (printf, 1, 2)))
+____trace_printk_check_format(const char *fmt, ...)
+{
+}
+#define __trace_printk_check_format(fmt, args...) \
+do { \
+ if (0) \
+ ____trace_printk_check_format(fmt, ##args); \
+} while (0)
+
+/**
+ * trace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __trace_printk is an internal function for trace_printk and
+ * the @ip is passed in via the trace_printk macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_printks scattered around in
+ * your code.
+ */
+
+#define trace_printk(fmt, args...) \
+do { \
+ __trace_printk_check_format(fmt, ##args); \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
+ } else \
+ __trace_printk(_THIS_IP_, fmt, ##args); \
+} while (0)
+
+extern int
+__trace_bprintk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+extern int
+__trace_printk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement.
+ */
+#define ftrace_vprintk(fmt, vargs) \
+do { \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
+ } else \
+ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
+} while (0)
+
+extern int
+__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern void ftrace_dump(void);
+#else
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+static inline int
+trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void ftrace_off_permanent(void) { }
+static inline int
+trace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+static inline int
+ftrace_vprintk(const char *fmt, va_list ap)
+{
+ return 0;
+}
+static inline void ftrace_dump(void) { }
+#endif /* CONFIG_TRACING */
+
+/*
* Display an IP address in readable format.
*/
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 92213a9194e..d5fa565086d 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -29,10 +29,15 @@
#ifdef CONFIG_MODULES
/* modprobe exit status on success, -ve on error. Return value
* usually useless though. */
-extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2)));
-#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
+extern int __request_module(bool wait, const char *name, ...) \
+ __attribute__((format(printf, 2, 3)));
+#define request_module(mod...) __request_module(true, mod)
+#define request_module_nowait(mod...) __request_module(false, mod)
+#define try_then_request_module(x, mod...) \
+ ((x) ?: (__request_module(false, mod), (x)))
#else
-static inline int request_module(const char * name, ...) { return -ENOSYS; }
+static inline int request_module(const char *name, ...) { return -ENOSYS; }
+static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; }
#define try_then_request_module(x, mod...) (x)
#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 76262d83656..b450a262885 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -379,7 +379,7 @@ enum {
ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */
+ ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
/* DMA mask for user DMA control: User visible values; DO NOT
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 42767d1a62e..37fa19b34ef 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -110,4 +110,10 @@ struct memory_accessor {
off_t offset, size_t count);
};
+/*
+ * Kernel text modification mutex, used for code patching. Users of this lock
+ * can sleep.
+ */
+extern struct mutex text_mutex;
+
#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 26ef24076b7..186ec6ab334 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int,
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
-#include <linux/topology.h>
-/* Returns the number of the current Node. */
-#ifndef numa_node_id
-#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
-#endif
-
#ifndef CONFIG_NEED_MULTIPLE_NODES
extern struct pglist_data contig_page_data;
diff --git a/include/linux/module.h b/include/linux/module.h
index 145a75528cc..627ac082e2a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -248,6 +248,10 @@ struct module
const unsigned long *crcs;
unsigned int num_syms;
+ /* Kernel parameters. */
+ struct kernel_param *kp;
+ unsigned int num_kp;
+
/* GPL-only exported symbols. */
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
@@ -329,6 +333,11 @@ struct module
unsigned int num_tracepoints;
#endif
+#ifdef CONFIG_TRACING
+ const char **trace_bprintk_fmt_start;
+ unsigned int num_trace_bprintk_fmt;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head modules_which_use_me;
@@ -350,6 +359,8 @@ struct module
#define MODULE_ARCH_INIT {}
#endif
+extern struct mutex module_mutex;
+
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
@@ -358,10 +369,10 @@ static inline int module_is_live(struct module *mod)
return mod->state != MODULE_STATE_GOING;
}
-/* Is this address in a module? (second is with no locks, for oops) */
-struct module *module_text_address(unsigned long addr);
struct module *__module_text_address(unsigned long addr);
-int is_module_address(unsigned long addr);
+struct module *__module_address(unsigned long addr);
+bool is_module_address(unsigned long addr);
+bool is_module_text_address(unsigned long addr);
static inline int within_module_core(unsigned long addr, struct module *mod)
{
@@ -375,6 +386,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod)
addr < (unsigned long)mod->module_init + mod->init_size;
}
+/* Search for module by name: must hold module_mutex. */
+struct module *find_module(const char *name);
+
+struct symsearch {
+ const struct kernel_symbol *start, *stop;
+ const unsigned long *crcs;
+ enum {
+ NOT_GPL_ONLY,
+ GPL_ONLY,
+ WILL_BE_GPL_ONLY,
+ } licence;
+ bool unused;
+};
+
+/* Search for an exported symbol by name. */
+const struct kernel_symbol *find_symbol(const char *name,
+ struct module **owner,
+ const unsigned long **crc,
+ bool gplok,
+ bool warn);
+
+/* Walk the exported symbol table */
+bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
+ unsigned int symnum, void *data), void *data);
+
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -383,6 +419,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name);
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data);
+
extern void __module_put_and_exit(struct module *mod, long code)
__attribute__((noreturn));
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
@@ -444,6 +484,7 @@ static inline void __module_get(struct module *module)
#define symbol_put_addr(p) do { } while(0)
#endif /* CONFIG_MODULE_UNLOAD */
+int use_module(struct module *a, struct module *b);
/* This is a #define so the string doesn't get put in every .o file */
#define module_name(mod) \
@@ -490,21 +531,24 @@ search_module_extables(unsigned long addr)
return NULL;
}
-/* Is this address in a module? */
-static inline struct module *module_text_address(unsigned long addr)
+static inline struct module *__module_address(unsigned long addr)
{
return NULL;
}
-/* Is this address in a module? (don't take a lock, we're oopsing) */
static inline struct module *__module_text_address(unsigned long addr)
{
return NULL;
}
-static inline int is_module_address(unsigned long addr)
+static inline bool is_module_address(unsigned long addr)
{
- return 0;
+ return false;
+}
+
+static inline bool is_module_text_address(unsigned long addr)
+{
+ return false;
}
/* Get/put a kernel symbol (calls should be symmetric) */
@@ -559,6 +603,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name)
return 0;
}
+static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *,
+ unsigned long),
+ void *data)
+{
+ return 0;
+}
+
static inline int register_module_notifier(struct notifier_block * nb)
{
/* no events will happen anyway, so this can always succeed */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index e4af3399ef4..a4f0b931846 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -138,6 +138,16 @@ extern int parse_args(const char *name,
unsigned num,
int (*unknown)(char *param, char *val));
+/* Called by module remove. */
+#ifdef CONFIG_SYSFS
+extern void destroy_params(const struct kernel_param *params, unsigned num);
+#else
+static inline void destroy_params(const struct kernel_param *params,
+ unsigned num)
+{
+}
+#endif /* !CONFIG_SYSFS */
+
/* All the helper functions */
/* The macros to do compile-time type checking stolen from Jakub
Jelinek, who IIRC came up with this idea for the 2.4 module init code. */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 3945f803d51..7c775751392 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm);
*/
void pwm_disable(struct pwm_device *pwm);
-#endif /* __ASM_ARCH_PWM_H */
+#endif /* __LINUX_PWM_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b35966008..e1b7b217388 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
struct ring_buffer_iter;
/*
- * Don't reference this struct directly, use functions below.
+ * Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
u32 type:2, len:3, time_delta:27;
@@ -18,10 +18,13 @@ struct ring_buffer_event {
/**
* enum ring_buffer_type - internal ring buffer types
*
- * @RINGBUF_TYPE_PADDING: Left over page padding
- * array is ignored
- * size is variable depending on how much
+ * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
+ * If time_delta is 0:
+ * array is ignored
+ * size is variable depending on how much
* padding is needed
+ * If time_delta is non zero:
+ * everything else same as RINGBUF_TYPE_DATA
*
* @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
* array[0] = time delta (28 .. 59)
@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
return event->time_delta;
}
+void ring_buffer_event_discard(struct ring_buffer_event *event);
+
/*
* size is in bytes for each per CPU buffer.
*/
@@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
-struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
- unsigned long length,
- unsigned long *flags);
+struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+ unsigned long length);
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags);
+ struct ring_buffer_event *event);
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data);
@@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
-u64 ring_buffer_time_stamp(int cpu);
-void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
+u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
+void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+ int cpu, u64 *ts);
+void ring_buffer_set_clock(struct ring_buffer *buffer,
+ u64 (*clock)(void));
+
+size_t ring_buffer_page_len(void *page);
-void tracing_on(void);
-void tracing_off(void);
-void tracing_off_permanent(void);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer,
- void **data_page, int cpu, int full);
+int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+ size_t len, int cpu, int full);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9da5aa0771e..b94f3541f67 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -138,6 +138,8 @@ extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
+extern unsigned long get_parent_ip(unsigned long addr);
+
struct seq_file;
struct cfs_rq;
struct task_group;
@@ -1405,6 +1407,8 @@ struct task_struct {
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
+ /* time stamp for last schedule */
+ unsigned long long ftrace_timestamp;
/*
* Number of functions that haven't been traced
* because of depth overrun.
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d7..f4523651fa4 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
+#include <trace/kmemtrace.h>
/* Size description struct for general caches. */
struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-static inline void *kmalloc(size_t size, gfp_t flags)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
+extern size_t slab_buffer_size(struct kmem_cache *cachep);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
+ return kmem_cache_alloc(cachep, flags);
+}
+static inline size_t slab_buffer_size(struct kmem_cache *cachep)
+{
+ return 0;
+}
+#endif
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ struct kmem_cache *cachep;
+ void *ret;
+
if (__builtin_constant_p(size)) {
int i = 0;
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
- return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
- flags);
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
#endif
- return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_notrace(cachep, flags);
+
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
+ size, slab_buffer_size(cachep), flags);
+
+ return ret;
}
return __kmalloc(size, flags);
}
@@ -59,8 +85,25 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid)
+{
+ return kmem_cache_alloc_node(cachep, flags, nodeid);
+}
+#endif
+
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
+ struct kmem_cache *cachep;
+ void *ret;
+
if (__builtin_constant_p(size)) {
int i = 0;
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
- return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
- flags, node);
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
#endif
- return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
- flags, node);
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
+ ret, size, slab_buffer_size(cachep),
+ flags, node);
+
+ return ret;
}
return __kmalloc_node(size, flags, node);
}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab..0ec00b39d00 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags)
{
return kmem_cache_alloc_node(cachep, flags, -1);
}
void *__kmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc_node(size, flags, node);
}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* kmalloc is the normal method of allocating memory
* in the kernel.
*/
-static inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
return __kmalloc_node(size, flags, -1);
}
-static inline void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__kmalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags);
}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e37b6aa8a9f..a1f90528e70 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
+#include <trace/kmemtrace.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -217,13 +218,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+ return kmem_cache_alloc(s, gfpflags);
+}
+#endif
+
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
- return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
+ unsigned int order = get_order(size);
+ void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
+ size, PAGE_SIZE << order, flags);
+
+ return ret;
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
+ void *ret;
+
if (__builtin_constant_p(size)) {
if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags);
@@ -234,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if (!s)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc(s, flags);
+ ret = kmem_cache_alloc_notrace(s, flags);
+
+ kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
+ _THIS_IP_, ret,
+ size, s->size, flags);
+
+ return ret;
}
}
return __kmalloc(size, flags);
@@ -244,8 +269,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node)
+{
+ return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif
+
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
+ void *ret;
+
if (__builtin_constant_p(size) &&
size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
@@ -253,7 +294,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
if (!s)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node(s, flags, node);
+ ret = kmem_cache_alloc_node_notrace(s, flags, node);
+
+ kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+ _THIS_IP_, ret,
+ size, s->size, flags, node);
+
+ return ret;
}
return __kmalloc_node(size, flags, node);
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index bbacb7baa44..a69db820eed 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
/*
* main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
* (defined in asm header):
- */
+ */
/*
* stops all CPUs but the current one:
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
return 0;
}
-void __smp_call_function_single(int cpuid, struct call_single_data *data);
+void __smp_call_function_single(int cpuid, struct call_single_data *data,
+ int wait);
/*
* Generic and arch helpers
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus;
#else /* !SMP */
+static inline void smp_send_stop(void) { }
+
/*
* These macros fold the SMP functionality into a single CPU system
*/
diff --git a/include/linux/string.h b/include/linux/string.h
index 8852739f36d..489019ef169 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
+#include <stdarg.h>
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t);
@@ -112,8 +113,23 @@ extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
+#ifdef CONFIG_BINARY_PRINTF
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+#endif
+
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
+/**
+ * strstarts - does @str start with @prefix?
+ * @str: string to examine
+ * @prefix: prefix to look for.
+ */
+static inline bool strstarts(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
#endif
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b299a82a05e..6470f74074a 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -65,6 +65,7 @@ struct old_linux_dirent;
#include <asm/signal.h>
#include <linux/quota.h>
#include <linux/key.h>
+#include <linux/ftrace.h>
#define __SC_DECL1(t1, a1) t1 a1
#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
@@ -95,7 +96,46 @@ struct old_linux_dirent;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define __SC_STR_ADECL1(t, a) #a
+#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
+#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__)
+#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__)
+#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__)
+#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__)
+
+#define __SC_STR_TDECL1(t, a) #t
+#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__)
+#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__)
+#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__)
+#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
+#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
+
+#define SYSCALL_METADATA(sname, nb) \
+ static const struct syscall_metadata __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("__syscalls_metadata"))) \
+ __syscall_meta_##sname = { \
+ .name = "sys"#sname, \
+ .nb_args = nb, \
+ .types = types_##sname, \
+ .args = args_##sname, \
+ }
+
+#define SYSCALL_DEFINE0(sname) \
+ static const struct syscall_metadata __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("__syscalls_metadata"))) \
+ __syscall_meta_##sname = { \
+ .name = "sys_"#sname, \
+ .nb_args = 0, \
+ }; \
+ asmlinkage long sys_##sname(void)
+
+#else
#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
+#endif
+
#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
@@ -117,10 +157,26 @@ struct old_linux_dirent;
#endif
#endif
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define SYSCALL_DEFINEx(x, sname, ...) \
+ static const char *types_##sname[] = { \
+ __SC_STR_TDECL##x(__VA_ARGS__) \
+ }; \
+ static const char *args_##sname[] = { \
+ __SC_STR_ADECL##x(__VA_ARGS__) \
+ }; \
+ SYSCALL_METADATA(sname, x); \
+ __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+#else
+#define SYSCALL_DEFINEx(x, sname, ...) \
+ __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+#endif
+
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
#define SYSCALL_DEFINE(name) static inline long SYSC_##name
-#define SYSCALL_DEFINEx(x, name, ...) \
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
@@ -134,7 +190,7 @@ struct old_linux_dirent;
#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
-#define SYSCALL_DEFINEx(x, name, ...) \
+#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
@@ -462,9 +518,9 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
size_t count, loff_t pos);
asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
- unsigned long vlen, u32 pos_high, u32 pos_low);
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
- unsigned long vlen, u32 pos_high, u32 pos_low);
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, int mode);
asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a16b9e06f2e..7402c1a27c4 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,11 +38,7 @@
#endif
#ifndef nr_cpus_node
-#define nr_cpus_node(node) \
- ({ \
- node_to_cpumask_ptr(__tmp__, node); \
- cpus_weight(*__tmp__); \
- })
+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
#endif
#define for_each_node_with_cpus(node) \
@@ -200,4 +196,9 @@ int arch_update_cpu_topology(void);
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
+/* Returns the number of the current Node. */
+#ifndef numa_node_id
+#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
+#endif
+
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 00000000000..7a813038408
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_TRACE_CLOCK_H
+#define _LINUX_TRACE_CLOCK_H
+
+/*
+ * 3 trace clock variants, with differing scalability/precision
+ * tradeoffs:
+ *
+ * - local: CPU-local trace clock
+ * - medium: scalable global clock with some jitter
+ * - global: globally monotonic, serialized clock
+ */
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_local(void);
+extern u64 notrace trace_clock(void);
+extern u64 notrace trace_clock_global(void);
+
+#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 75700545836..d35a7ee7611 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -31,8 +31,8 @@ struct tracepoint {
* Keep in sync with vmlinux.lds.h.
*/
-#define TPPROTO(args...) args
-#define TPARGS(args...) args
+#define TP_PROTO(args...) args
+#define TP_ARGS(args...) args
#ifdef CONFIG_TRACEPOINTS
@@ -65,7 +65,7 @@ struct tracepoint {
{ \
if (unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \
- TPPROTO(proto), TPARGS(args)); \
+ TP_PROTO(proto), TP_ARGS(args)); \
} \
static inline int register_trace_##name(void (*probe)(proto)) \
{ \
@@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void)
synchronize_sched();
}
+#define PARAMS(args...) args
+#define TRACE_FORMAT(name, proto, args, fmt) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+
+
+/*
+ * For use with the TRACE_EVENT macro:
+ *
+ * We define a tracepoint, its arguments, its printk format
+ * and its 'fast binay record' layout.
+ *
+ * Firstly, name your tracepoint via TRACE_EVENT(name : the
+ * 'subsystem_event' notation is fine.
+ *
+ * Think about this whole construct as the
+ * 'trace_sched_switch() function' from now on.
+ *
+ *
+ * TRACE_EVENT(sched_switch,
+ *
+ * *
+ * * A function has a regular function arguments
+ * * prototype, declare it via TP_PROTO():
+ * *
+ *
+ * TP_PROTO(struct rq *rq, struct task_struct *prev,
+ * struct task_struct *next),
+ *
+ * *
+ * * Define the call signature of the 'function'.
+ * * (Design sidenote: we use this instead of a
+ * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
+ * *
+ *
+ * TP_ARGS(rq, prev, next),
+ *
+ * *
+ * * Fast binary tracing: define the trace record via
+ * * TP_STRUCT__entry(). You can think about it like a
+ * * regular C structure local variable definition.
+ * *
+ * * This is how the trace record is structured and will
+ * * be saved into the ring buffer. These are the fields
+ * * that will be exposed to user-space in
+ * * /debug/tracing/events/<*>/format.
+ * *
+ * * The declared 'local variable' is called '__entry'
+ * *
+ * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
+ * *
+ * * pid_t prev_pid;
+ * *
+ * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
+ * *
+ * * char prev_comm[TASK_COMM_LEN];
+ * *
+ *
+ * TP_STRUCT__entry(
+ * __array( char, prev_comm, TASK_COMM_LEN )
+ * __field( pid_t, prev_pid )
+ * __field( int, prev_prio )
+ * __array( char, next_comm, TASK_COMM_LEN )
+ * __field( pid_t, next_pid )
+ * __field( int, next_prio )
+ * ),
+ *
+ * *
+ * * Assign the entry into the trace record, by embedding
+ * * a full C statement block into TP_fast_assign(). You
+ * * can refer to the trace record as '__entry' -
+ * * otherwise you can put arbitrary C code in here.
+ * *
+ * * Note: this C code will execute every time a trace event
+ * * happens, on an active tracepoint.
+ * *
+ *
+ * TP_fast_assign(
+ * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ * __entry->prev_pid = prev->pid;
+ * __entry->prev_prio = prev->prio;
+ * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ * __entry->next_pid = next->pid;
+ * __entry->next_prio = next->prio;
+ * )
+ *
+ * *
+ * * Formatted output of a trace record via TP_printk().
+ * * This is how the tracepoint will appear under ftrace
+ * * plugins that make use of this tracepoint.
+ * *
+ * * (raw-binary tracing wont actually perform this step.)
+ * *
+ *
+ * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
+ * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
+ * __entry->next_comm, __entry->next_pid, __entry->next_prio),
+ *
+ * );
+ *
+ * This macro construct is thus used for the regular printk format
+ * tracing setup, it is used to construct a function pointer based
+ * tracepoint callback (this is used by programmatic plugins and
+ * can also by used by generic instrumentation like SystemTap), and
+ * it is also used to expose a structured trace record in
+ * /debug/tracing/events/.
+ */
+
+#define TRACE_EVENT(name, proto, args, struct, assign, print) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+
#endif
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 5f401b644ed..429c631d2aa 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -80,8 +80,7 @@ struct wusb_ckhdid {
u8 data[16];
} __attribute__((packed));
-const static
-struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
+static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
diff --git a/include/trace/block.h b/include/trace/block.h
index 25c6a1fd5b7..25b7068b819 100644
--- a/include/trace/block.h
+++ b/include/trace/block.h
@@ -5,72 +5,72 @@
#include <linux/tracepoint.h>
DECLARE_TRACE(block_rq_abort,
- TPPROTO(struct request_queue *q, struct request *rq),
- TPARGS(q, rq));
+ TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_ARGS(q, rq));
DECLARE_TRACE(block_rq_insert,
- TPPROTO(struct request_queue *q, struct request *rq),
- TPARGS(q, rq));
+ TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_ARGS(q, rq));
DECLARE_TRACE(block_rq_issue,
- TPPROTO(struct request_queue *q, struct request *rq),
- TPARGS(q, rq));
+ TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_ARGS(q, rq));
DECLARE_TRACE(block_rq_requeue,
- TPPROTO(struct request_queue *q, struct request *rq),
- TPARGS(q, rq));
+ TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_ARGS(q, rq));
DECLARE_TRACE(block_rq_complete,
- TPPROTO(struct request_queue *q, struct request *rq),
- TPARGS(q, rq));
+ TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_ARGS(q, rq));
DECLARE_TRACE(block_bio_bounce,
- TPPROTO(struct request_queue *q, struct bio *bio),
- TPARGS(q, bio));
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_ARGS(q, bio));
DECLARE_TRACE(block_bio_complete,
- TPPROTO(struct request_queue *q, struct bio *bio),
- TPARGS(q, bio));
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_ARGS(q, bio));
DECLARE_TRACE(block_bio_backmerge,
- TPPROTO(struct request_queue *q, struct bio *bio),
- TPARGS(q, bio));
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_ARGS(q, bio));
DECLARE_TRACE(block_bio_frontmerge,
- TPPROTO(struct request_queue *q, struct bio *bio),
- TPARGS(q, bio));
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_ARGS(q, bio));
DECLARE_TRACE(block_bio_queue,
- TPPROTO(struct request_queue *q, struct bio *bio),
- TPARGS(q, bio));
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+ TP_ARGS(q, bio));
DECLARE_TRACE(block_getrq,
- TPPROTO(struct request_queue *q, struct bio *bio, int rw),
- TPARGS(q, bio, rw));
+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+ TP_ARGS(q, bio, rw));
DECLARE_TRACE(block_sleeprq,
- TPPROTO(struct request_queue *q, struct bio *bio, int rw),
- TPARGS(q, bio, rw));
+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+ TP_ARGS(q, bio, rw));
DECLARE_TRACE(block_plug,
- TPPROTO(struct request_queue *q),
- TPARGS(q));
+ TP_PROTO(struct request_queue *q),
+ TP_ARGS(q));
DECLARE_TRACE(block_unplug_timer,
- TPPROTO(struct request_queue *q),
- TPARGS(q));
+ TP_PROTO(struct request_queue *q),
+ TP_ARGS(q));
DECLARE_TRACE(block_unplug_io,
- TPPROTO(struct request_queue *q),
- TPARGS(q));
+ TP_PROTO(struct request_queue *q),
+ TP_ARGS(q));
DECLARE_TRACE(block_split,
- TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
- TPARGS(q, bio, pdu));
+ TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
+ TP_ARGS(q, bio, pdu));
DECLARE_TRACE(block_remap,
- TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev,
- sector_t from, sector_t to),
- TPARGS(q, bio, dev, from, to));
+ TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
+ sector_t from, sector_t to),
+ TP_ARGS(q, bio, dev, from, to));
#endif
diff --git a/include/trace/irq.h b/include/trace/irq.h
new file mode 100644
index 00000000000..ff5d4495dc3
--- /dev/null
+++ b/include/trace/irq.h
@@ -0,0 +1,9 @@
+#ifndef _TRACE_IRQ_H
+#define _TRACE_IRQ_H
+
+#include <linux/interrupt.h>
+#include <linux/tracepoint.h>
+
+#include <trace/irq_event_types.h>
+
+#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
new file mode 100644
index 00000000000..85964ebd47e
--- /dev/null
+++ b/include/trace/irq_event_types.h
@@ -0,0 +1,55 @@
+
+/* use <trace/irq.h> instead */
+#ifndef TRACE_FORMAT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq
+
+/*
+ * Tracepoint for entry of interrupt handler:
+ */
+TRACE_FORMAT(irq_handler_entry,
+ TP_PROTO(int irq, struct irqaction *action),
+ TP_ARGS(irq, action),
+ TP_FMT("irq=%d handler=%s", irq, action->name)
+ );
+
+/*
+ * Tracepoint for return of an interrupt handler:
+ */
+TRACE_EVENT(irq_handler_exit,
+
+ TP_PROTO(int irq, struct irqaction *action, int ret),
+
+ TP_ARGS(irq, action, ret),
+
+ TP_STRUCT__entry(
+ __field( int, irq )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("irq=%d return=%s",
+ __entry->irq, __entry->ret ? "handled" : "unhandled")
+);
+
+TRACE_FORMAT(softirq_entry,
+ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+ TP_ARGS(h, vec),
+ TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
+ );
+
+TRACE_FORMAT(softirq_exit,
+ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+ TP_ARGS(h, vec),
+ TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
+ );
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
new file mode 100644
index 00000000000..ad8b7857855
--- /dev/null
+++ b/include/trace/kmemtrace.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#ifndef _LINUX_KMEMTRACE_H
+#define _LINUX_KMEMTRACE_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/marker.h>
+
+enum kmemtrace_type_id {
+ KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
+ KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
+ KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
+};
+
+#ifdef CONFIG_KMEMTRACE
+
+extern void kmemtrace_init(void);
+
+extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node);
+
+extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr);
+
+#else /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_init(void)
+{
+}
+
+static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node)
+{
+}
+
+static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr)
+{
+}
+
+#endif /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags)
+{
+ kmemtrace_mark_alloc_node(type_id, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_KMEMTRACE_H */
+
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644
index 00000000000..5ca67df87f2
--- /dev/null
+++ b/include/trace/lockdep.h
@@ -0,0 +1,9 @@
+#ifndef _TRACE_LOCKDEP_H
+#define _TRACE_LOCKDEP_H
+
+#include <linux/lockdep.h>
+#include <linux/tracepoint.h>
+
+#include <trace/lockdep_event_types.h>
+
+#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644
index 00000000000..adccfcd2ec8
--- /dev/null
+++ b/include/trace/lockdep_event_types.h
@@ -0,0 +1,44 @@
+
+#ifndef TRACE_FORMAT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lock
+
+#ifdef CONFIG_LOCKDEP
+
+TRACE_FORMAT(lock_acquire,
+ TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *next_lock, unsigned long ip),
+ TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
+ TP_FMT("%s%s%s", trylock ? "try " : "",
+ read ? "read " : "", lock->name)
+ );
+
+TRACE_FORMAT(lock_release,
+ TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
+ TP_ARGS(lock, nested, ip),
+ TP_FMT("%s", lock->name)
+ );
+
+#ifdef CONFIG_LOCK_STAT
+
+TRACE_FORMAT(lock_contended,
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+ TP_ARGS(lock, ip),
+ TP_FMT("%s", lock->name)
+ );
+
+TRACE_FORMAT(lock_acquired,
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+ TP_ARGS(lock, ip),
+ TP_FMT("%s", lock->name)
+ );
+
+#endif
+#endif
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/power.h b/include/trace/power.h
new file mode 100644
index 00000000000..ef204666e98
--- /dev/null
+++ b/include/trace/power.h
@@ -0,0 +1,32 @@
+#ifndef _TRACE_POWER_H
+#define _TRACE_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+enum {
+ POWER_NONE = 0,
+ POWER_CSTATE = 1,
+ POWER_PSTATE = 2,
+};
+
+struct power_trace {
+ ktime_t stamp;
+ ktime_t end;
+ int type;
+ int state;
+};
+
+DECLARE_TRACE(power_start,
+ TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
+ TP_ARGS(it, type, state));
+
+DECLARE_TRACE(power_mark,
+ TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
+ TP_ARGS(it, type, state));
+
+DECLARE_TRACE(power_end,
+ TP_PROTO(struct power_trace *it),
+ TP_ARGS(it));
+
+#endif /* _TRACE_POWER_H */
diff --git a/include/trace/sched.h b/include/trace/sched.h
index 0d81098ee9f..4e372a1a29b 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -4,53 +4,6 @@
#include <linux/sched.h>
#include <linux/tracepoint.h>
-DECLARE_TRACE(sched_kthread_stop,
- TPPROTO(struct task_struct *t),
- TPARGS(t));
-
-DECLARE_TRACE(sched_kthread_stop_ret,
- TPPROTO(int ret),
- TPARGS(ret));
-
-DECLARE_TRACE(sched_wait_task,
- TPPROTO(struct rq *rq, struct task_struct *p),
- TPARGS(rq, p));
-
-DECLARE_TRACE(sched_wakeup,
- TPPROTO(struct rq *rq, struct task_struct *p, int success),
- TPARGS(rq, p, success));
-
-DECLARE_TRACE(sched_wakeup_new,
- TPPROTO(struct rq *rq, struct task_struct *p, int success),
- TPARGS(rq, p, success));
-
-DECLARE_TRACE(sched_switch,
- TPPROTO(struct rq *rq, struct task_struct *prev,
- struct task_struct *next),
- TPARGS(rq, prev, next));
-
-DECLARE_TRACE(sched_migrate_task,
- TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
- TPARGS(p, orig_cpu, dest_cpu));
-
-DECLARE_TRACE(sched_process_free,
- TPPROTO(struct task_struct *p),
- TPARGS(p));
-
-DECLARE_TRACE(sched_process_exit,
- TPPROTO(struct task_struct *p),
- TPARGS(p));
-
-DECLARE_TRACE(sched_process_wait,
- TPPROTO(struct pid *pid),
- TPARGS(pid));
-
-DECLARE_TRACE(sched_process_fork,
- TPPROTO(struct task_struct *parent, struct task_struct *child),
- TPARGS(parent, child));
-
-DECLARE_TRACE(sched_signal_send,
- TPPROTO(int sig, struct task_struct *p),
- TPARGS(sig, p));
+#include <trace/sched_event_types.h>
#endif
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h
new file mode 100644
index 00000000000..63547dc1125
--- /dev/null
+++ b/include/trace/sched_event_types.h
@@ -0,0 +1,337 @@
+
+/* use <trace/sched.h> instead */
+#ifndef TRACE_EVENT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched
+
+/*
+ * Tracepoint for calling kthread_stop, performed to end a kthread:
+ */
+TRACE_EVENT(sched_kthread_stop,
+
+ TP_PROTO(struct task_struct *t),
+
+ TP_ARGS(t),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __entry->pid = t->pid;
+ ),
+
+ TP_printk("task %s:%d", __entry->comm, __entry->pid)
+);
+
+/*
+ * Tracepoint for the return value of the kthread stopping:
+ */
+TRACE_EVENT(sched_kthread_stop_ret,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret),
+
+ TP_STRUCT__entry(
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ret %d", __entry->ret)
+);
+
+/*
+ * Tracepoint for waiting on task to unschedule:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ * but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_wait_task,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p),
+
+ TP_ARGS(rq, p),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ ),
+
+ TP_printk("task %s:%d [%d]",
+ __entry->comm, __entry->pid, __entry->prio)
+);
+
+/*
+ * Tracepoint for waking up a task:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ * but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_wakeup,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+
+ TP_ARGS(rq, p, success),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, success )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ __entry->success = success;
+ ),
+
+ TP_printk("task %s:%d [%d] success=%d",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->success)
+);
+
+/*
+ * Tracepoint for waking up a new task:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ * but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_wakeup_new,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+
+ TP_ARGS(rq, p, success),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, success )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ __entry->success = success;
+ ),
+
+ TP_printk("task %s:%d [%d] success=%d",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->success)
+);
+
+/*
+ * Tracepoint for task switches, performed by the scheduler:
+ *
+ * (NOTE: the 'rq' argument is not used by generic trace events,
+ * but used by the latency tracer plugin. )
+ */
+TRACE_EVENT(sched_switch,
+
+ TP_PROTO(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next),
+
+ TP_ARGS(rq, prev, next),
+
+ TP_STRUCT__entry(
+ __array( char, prev_comm, TASK_COMM_LEN )
+ __field( pid_t, prev_pid )
+ __field( int, prev_prio )
+ __array( char, next_comm, TASK_COMM_LEN )
+ __field( pid_t, next_pid )
+ __field( int, next_prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ __entry->prev_pid = prev->pid;
+ __entry->prev_prio = prev->prio;
+ memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ __entry->next_pid = next->pid;
+ __entry->next_prio = next->prio;
+ ),
+
+ TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
+ __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
+ __entry->next_comm, __entry->next_pid, __entry->next_prio)
+);
+
+/*
+ * Tracepoint for a task being migrated:
+ */
+TRACE_EVENT(sched_migrate_task,
+
+ TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
+
+ TP_ARGS(p, orig_cpu, dest_cpu),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, orig_cpu )
+ __field( int, dest_cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ __entry->orig_cpu = orig_cpu;
+ __entry->dest_cpu = dest_cpu;
+ ),
+
+ TP_printk("task %s:%d [%d] from: %d to: %d",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->orig_cpu, __entry->dest_cpu)
+);
+
+/*
+ * Tracepoint for freeing a task:
+ */
+TRACE_EVENT(sched_process_free,
+
+ TP_PROTO(struct task_struct *p),
+
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ ),
+
+ TP_printk("task %s:%d [%d]",
+ __entry->comm, __entry->pid, __entry->prio)
+);
+
+/*
+ * Tracepoint for a task exiting:
+ */
+TRACE_EVENT(sched_process_exit,
+
+ TP_PROTO(struct task_struct *p),
+
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ ),
+
+ TP_printk("task %s:%d [%d]",
+ __entry->comm, __entry->pid, __entry->prio)
+);
+
+/*
+ * Tracepoint for a waiting task:
+ */
+TRACE_EVENT(sched_process_wait,
+
+ TP_PROTO(struct pid *pid),
+
+ TP_ARGS(pid),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ __entry->pid = pid_nr(pid);
+ __entry->prio = current->prio;
+ ),
+
+ TP_printk("task %s:%d [%d]",
+ __entry->comm, __entry->pid, __entry->prio)
+);
+
+/*
+ * Tracepoint for do_fork:
+ */
+TRACE_EVENT(sched_process_fork,
+
+ TP_PROTO(struct task_struct *parent, struct task_struct *child),
+
+ TP_ARGS(parent, child),
+
+ TP_STRUCT__entry(
+ __array( char, parent_comm, TASK_COMM_LEN )
+ __field( pid_t, parent_pid )
+ __array( char, child_comm, TASK_COMM_LEN )
+ __field( pid_t, child_pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
+ __entry->parent_pid = parent->pid;
+ memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
+ __entry->child_pid = child->pid;
+ ),
+
+ TP_printk("parent %s:%d child %s:%d",
+ __entry->parent_comm, __entry->parent_pid,
+ __entry->child_comm, __entry->child_pid)
+);
+
+/*
+ * Tracepoint for sending a signal:
+ */
+TRACE_EVENT(sched_signal_send,
+
+ TP_PROTO(int sig, struct task_struct *p),
+
+ TP_ARGS(sig, p),
+
+ TP_STRUCT__entry(
+ __field( int, sig )
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->sig = sig;
+ ),
+
+ TP_printk("sig: %d task %s:%d",
+ __entry->sig, __entry->comm, __entry->pid)
+);
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/skb.h b/include/trace/skb.h
index a96610f92f6..b66206d9be7 100644
--- a/include/trace/skb.h
+++ b/include/trace/skb.h
@@ -5,7 +5,7 @@
#include <linux/tracepoint.h>
DECLARE_TRACE(kfree_skb,
- TPPROTO(struct sk_buff *skb, void *location),
- TPARGS(skb, location));
+ TP_PROTO(struct sk_buff *skb, void *location),
+ TP_ARGS(skb, location));
#endif
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
new file mode 100644
index 00000000000..df56f5694be
--- /dev/null
+++ b/include/trace/trace_event_types.h
@@ -0,0 +1,5 @@
+/* trace/<type>_event_types.h here */
+
+#include <trace/sched_event_types.h>
+#include <trace/irq_event_types.h>
+#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
new file mode 100644
index 00000000000..fd13750ca4b
--- /dev/null
+++ b/include/trace/trace_events.h
@@ -0,0 +1,5 @@
+/* trace/<type>.h here */
+
+#include <trace/sched.h>
+#include <trace/irq.h>
+#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
new file mode 100644
index 00000000000..7626523deeb
--- /dev/null
+++ b/include/trace/workqueue.h
@@ -0,0 +1,25 @@
+#ifndef __TRACE_WORKQUEUE_H
+#define __TRACE_WORKQUEUE_H
+
+#include <linux/tracepoint.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+
+DECLARE_TRACE(workqueue_insertion,
+ TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+ TP_ARGS(wq_thread, work));
+
+DECLARE_TRACE(workqueue_execution,
+ TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+ TP_ARGS(wq_thread, work));
+
+/* Trace the creation of one workqueue thread on a cpu */
+DECLARE_TRACE(workqueue_creation,
+ TP_PROTO(struct task_struct *wq_thread, int cpu),
+ TP_ARGS(wq_thread, cpu));
+
+DECLARE_TRACE(workqueue_destruction,
+ TP_PROTO(struct task_struct *wq_thread),
+ TP_ARGS(wq_thread));
+
+#endif /* __TRACE_WORKQUEUE_H */