aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/exit.c39
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/relay.c17
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/workqueue.c12
8 files changed, 51 insertions, 42 deletions
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 2e896f8ae29..9c8c23227c7 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -800,8 +800,8 @@ static inline int audit_dupe_selinux_field(struct audit_field *df,
/* our own copy of se_str */
se_str = kstrdup(sf->se_str, GFP_KERNEL);
- if (unlikely(IS_ERR(se_str)))
- return -ENOMEM;
+ if (unlikely(!se_str))
+ return -ENOMEM;
df->se_str = se_str;
/* our own (refreshed) copy of se_rule */
diff --git a/kernel/exit.c b/kernel/exit.c
index 122fadb972f..46cf6b68146 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -468,7 +468,7 @@ void fastcall put_files_struct(struct files_struct *files)
fdt = files_fdtable(files);
if (fdt != &files->fdtab)
kmem_cache_free(files_cachep, files);
- call_rcu(&fdt->rcu, free_fdtable_rcu);
+ free_fdtable(fdt);
}
}
@@ -597,14 +597,6 @@ choose_new_parent(struct task_struct *p, struct task_struct *reaper)
static void
reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
{
- /* We don't want people slaying init. */
- if (p->exit_signal != -1)
- p->exit_signal = SIGCHLD;
-
- if (p->pdeath_signal)
- /* We already hold the tasklist_lock here. */
- group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
-
/* Move the child from its dying parent to the new one. */
if (unlikely(traced)) {
/* Preserve ptrace links if someone else is tracing this child. */
@@ -620,13 +612,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
p->parent = p->real_parent;
add_parent(p);
- /* If we'd notified the old parent about this child's death,
- * also notify the new parent.
- */
- if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
- thread_group_empty(p))
- do_notify_parent(p, p->exit_signal);
- else if (p->state == TASK_TRACED) {
+ if (p->state == TASK_TRACED) {
/*
* If it was at a trace stop, turn it into
* a normal stop since it's no longer being
@@ -636,6 +622,27 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
}
}
+ /* If this is a threaded reparent there is no need to
+ * notify anyone anything has happened.
+ */
+ if (p->real_parent->group_leader == father->group_leader)
+ return;
+
+ /* We don't want people slaying init. */
+ if (p->exit_signal != -1)
+ p->exit_signal = SIGCHLD;
+
+ if (p->pdeath_signal)
+ /* We already hold the tasklist_lock here. */
+ group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
+
+ /* If we'd notified the old parent about this child's death,
+ * also notify the new parent.
+ */
+ if (!traced && p->exit_state == EXIT_ZOMBIE &&
+ p->exit_signal != -1 && thread_group_empty(p))
+ do_notify_parent(p, p->exit_signal);
+
/*
* process group orphan check
* Case ii: Our child is in a different pgrp
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ebfd24a4185..d27b2585574 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -517,10 +517,9 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
if (!handle)
handle = handle_bad_irq;
-
- if (desc->chip == &no_irq_chip) {
+ else if (desc->chip == &no_irq_chip) {
printk(KERN_WARNING "Trying to install %sinterrupt handler "
- "for IRQ%d\n", is_chained ? "chained " : " ", irq);
+ "for IRQ%d\n", is_chained ? "chained " : "", irq);
/*
* Some ARM implementations install a handler for really dumb
* interrupt hardware without setting an irq_chip. This worked
diff --git a/kernel/printk.c b/kernel/printk.c
index 185bb45eacf..c770e1a4e88 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -335,7 +335,7 @@ static void __call_console_drivers(unsigned long start, unsigned long end)
static int __read_mostly ignore_loglevel;
-int __init ignore_loglevel_setup(char *str)
+static int __init ignore_loglevel_setup(char *str)
{
ignore_loglevel = 1;
printk(KERN_INFO "debug: ignoring loglevel setting.\n");
diff --git a/kernel/relay.c b/kernel/relay.c
index a4701e7ba7d..284e2e8b4ee 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -302,7 +302,7 @@ static struct rchan_callbacks default_channel_callbacks = {
/**
* wakeup_readers - wake up readers waiting on a channel
- * @private: the channel buffer
+ * @work: work struct that contains the the channel buffer
*
* This is the work function used to defer reader waking. The
* reason waking is deferred is that calling directly from write
@@ -322,7 +322,7 @@ static void wakeup_readers(struct work_struct *work)
*
* See relay_reset for description of effect.
*/
-static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
+static void __relay_reset(struct rchan_buf *buf, unsigned int init)
{
size_t i;
@@ -418,7 +418,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan,
* The channel buffer and channel buffer data structure are then freed
* automatically when the last reference is given up.
*/
-static inline void relay_close_buf(struct rchan_buf *buf)
+static void relay_close_buf(struct rchan_buf *buf)
{
buf->finalized = 1;
cancel_delayed_work(&buf->wake_readers);
@@ -426,7 +426,7 @@ static inline void relay_close_buf(struct rchan_buf *buf)
kref_put(&buf->kref, relay_remove_buf);
}
-static inline void setup_callbacks(struct rchan *chan,
+static void setup_callbacks(struct rchan *chan,
struct rchan_callbacks *cb)
{
if (!cb) {
@@ -946,11 +946,10 @@ typedef int (*subbuf_actor_t) (size_t read_start,
/*
* relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
*/
-static inline ssize_t relay_file_read_subbufs(struct file *filp,
- loff_t *ppos,
- subbuf_actor_t subbuf_actor,
- read_actor_t actor,
- read_descriptor_t *desc)
+static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
+ subbuf_actor_t subbuf_actor,
+ read_actor_t actor,
+ read_descriptor_t *desc)
{
struct rchan_buf *buf = filp->private_data;
size_t read_start, avail;
diff --git a/kernel/sched.c b/kernel/sched.c
index 5cd833bc217..b515e3caad7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1567,6 +1567,7 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
return try_to_wake_up(p, state, 0);
}
+static void task_running_tick(struct rq *rq, struct task_struct *p);
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
@@ -1627,7 +1628,7 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
* runqueue lock is not a problem.
*/
current->time_slice = 1;
- scheduler_tick();
+ task_running_tick(cpu_rq(cpu), current);
}
local_irq_enable();
put_cpu();
@@ -4618,8 +4619,10 @@ asmlinkage long sys_sched_yield(void)
static inline int __resched_legal(int expected_preempt_count)
{
+#ifdef CONFIG_PREEMPT
if (unlikely(preempt_count() != expected_preempt_count))
return 0;
+#endif
if (unlikely(system_state != SYSTEM_RUNNING))
return 0;
return 1;
@@ -5607,7 +5610,7 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
}
/* cpus with isolated domains */
-static cpumask_t __cpuinitdata cpu_isolated_map = CPU_MASK_NONE;
+static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
diff --git a/kernel/timer.c b/kernel/timer.c
index feddf817baa..c2a8ccfc288 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1344,11 +1344,10 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
* should never happens anyway). You just have the printk()
* that will tell you if something is gone wrong and where.
*/
- if (timeout < 0)
- {
+ if (timeout < 0) {
printk(KERN_ERR "schedule_timeout: wrong timeout "
- "value %lx from %p\n", timeout,
- __builtin_return_address(0));
+ "value %lx\n", timeout);
+ dump_stack();
current->state = TASK_RUNNING;
goto out;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 742cbbe49bd..a3da07c5af2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -233,7 +233,7 @@ static void delayed_work_timer_fn(unsigned long __data)
/**
* queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use
- * @work: delayable work to queue
+ * @dwork: delayable work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
* queue_delayed_work_on - queue work on specific CPU after delay
* @cpu: CPU number to execute work on
* @wq: workqueue to use
- * @work: work to queue
+ * @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -637,9 +637,11 @@ int schedule_on_each_cpu(work_func_t func)
mutex_lock(&workqueue_mutex);
for_each_online_cpu(cpu) {
- INIT_WORK(per_cpu_ptr(works, cpu), func);
- __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
- per_cpu_ptr(works, cpu));
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, func);
+ set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
+ __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
}
mutex_unlock(&workqueue_mutex);
flush_workqueue(keventd_wq);