aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-03-22 19:13:54 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-22 19:13:54 -0500
commitf01c18456993bab43067b678f56c87ca954aa43b (patch)
tree3e0cd0cdf1a57618202b46a7126125902e3ab832 /kernel
parent949ec2c8e6b7b89179b85baf6309c009e1a1b951 (diff)
parent1c2e02750b992703a8a18634e08b04353face243 (diff)
Merge branch 'master'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/softirq.c20
-rw-r--r--kernel/workqueue.c29
4 files changed, 55 insertions, 6 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b373322ca49..9bd7b65ee41 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1534,6 +1534,12 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
check_unshare_flags(&unshare_flags);
+ /* Return -EINVAL for all unsupported flags */
+ err = -EINVAL;
+ if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
+ CLONE_VM|CLONE_FILES|CLONE_SYSVSEM))
+ goto bad_unshare_out;
+
if ((err = unshare_thread(unshare_flags)))
goto bad_unshare_out;
if ((err = unshare_fs(unshare_flags, &new_fs)))
diff --git a/kernel/sched.c b/kernel/sched.c
index 4d46e90f59c..6b6e0d70eb3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -707,12 +707,6 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
DEF_TIMESLICE);
} else {
/*
- * The lower the sleep avg a task has the more
- * rapidly it will rise with sleep time.
- */
- sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
- /*
* Tasks waking from uninterruptible sleep are
* limited in their sleep_avg rise as they
* are likely to be waiting on I/O
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ad3295cdded..ec8fed42a86 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
+#include <linux/smp.h>
#include <asm/irq.h>
/*
@@ -495,3 +496,22 @@ __init int spawn_ksoftirqd(void)
register_cpu_notifier(&cpu_nfb);
return 0;
}
+
+#ifdef CONFIG_SMP
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
+{
+ int ret = 0;
+
+ preempt_disable();
+ ret = smp_call_function(func, info, retry, wait);
+ local_irq_disable();
+ func(info);
+ local_irq_enable();
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(on_each_cpu);
+#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b052e2c4c71..e9e464a9037 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -27,6 +27,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
+#include <linux/hardirq.h>
/*
* The per-CPU workqueue (if single thread, we always use the first
@@ -476,6 +477,34 @@ void cancel_rearming_delayed_work(struct work_struct *work)
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);
+/**
+ * execute_in_process_context - reliably execute the routine with user context
+ * @fn: the function to execute
+ * @data: data to pass to the function
+ * @ew: guaranteed storage for the execute work structure (must
+ * be available when the work executes)
+ *
+ * Executes the function immediately if process context is available,
+ * otherwise schedules the function for delayed execution.
+ *
+ * Returns: 0 - function was executed
+ * 1 - function was scheduled for execution
+ */
+int execute_in_process_context(void (*fn)(void *data), void *data,
+ struct execute_work *ew)
+{
+ if (!in_interrupt()) {
+ fn(data);
+ return 0;
+ }
+
+ INIT_WORK(&ew->work, fn, data);
+ schedule_work(&ew->work);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(execute_in_process_context);
+
int keventd_up(void)
{
return keventd_wq != NULL;