diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-26 10:25:59 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-26 10:25:59 +0200 |
commit | 3cf430b0636045dc524759a0852293ba037732a7 (patch) | |
tree | 2c4e03f9028dc23ab4e6ca5a3a5a263af397984f /kernel | |
parent | 93dcf55f828b035fc93fc19eb03c1390e1e6d570 (diff) | |
parent | 83097aca8567a0bd593534853b71fe0fa9a75d69 (diff) |
Merge branch 'linus' into sched/devel
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/capability.c | 21 | ||||
-rw-r--r-- | kernel/kexec.c | 66 | ||||
-rw-r--r-- | kernel/lockdep.c | 8 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 13 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 12 | ||||
-rw-r--r-- | kernel/nsproxy.c | 1 | ||||
-rw-r--r-- | kernel/power/swap.c | 1 | ||||
-rw-r--r-- | kernel/ptrace.c | 5 | ||||
-rw-r--r-- | kernel/rcupdate.c | 1 | ||||
-rw-r--r-- | kernel/sched.c | 54 | ||||
-rw-r--r-- | kernel/sched_features.h | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 2 | ||||
-rw-r--r-- | kernel/signal.c | 5 | ||||
-rw-r--r-- | kernel/spinlock.c | 3 | ||||
-rw-r--r-- | kernel/sys.c | 10 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 6 | ||||
-rw-r--r-- | kernel/user_namespace.c | 1 | ||||
-rw-r--r-- | kernel/utsname.c | 1 | ||||
-rw-r--r-- | kernel/utsname_sysctl.c | 1 |
19 files changed, 128 insertions, 85 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 0101e847603..33e51e78c2d 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -486,17 +486,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) return ret; } -int __capable(struct task_struct *t, int cap) +/** + * capable - Determine if the current task has a superior capability in effect + * @cap: The capability to be tested for + * + * Return true if the current task has the given superior capability currently + * available for use, false if not. + * + * This sets PF_SUPERPRIV on the task if the capability is available on the + * assumption that it's about to be used. + */ +int capable(int cap) { - if (security_capable(t, cap) == 0) { - t->flags |= PF_SUPERPRIV; + if (has_capability(current, cap)) { + current->flags |= PF_SUPERPRIV; return 1; } return 0; } - -int capable(int cap) -{ - return __capable(current, cap); -} EXPORT_SYMBOL(capable); diff --git a/kernel/kexec.c b/kernel/kexec.c index c8a4370e2a3..59f3f0df35d 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -12,7 +12,7 @@ #include <linux/slab.h> #include <linux/fs.h> #include <linux/kexec.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/syscalls.h> @@ -77,7 +77,7 @@ int kexec_should_crash(struct task_struct *p) * * The code for the transition from the current kernel to the * the new kernel is placed in the control_code_buffer, whose size - * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single + * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single * page of memory is necessary, but some architectures require more. * Because this memory must be identity mapped in the transition from * virtual to physical addresses it must live in the range @@ -242,7 +242,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, - get_order(KEXEC_CONTROL_CODE_SIZE)); + get_order(KEXEC_CONTROL_PAGE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; @@ -317,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, - get_order(KEXEC_CONTROL_CODE_SIZE)); + get_order(KEXEC_CONTROL_PAGE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; @@ -924,19 +924,14 @@ static int kimage_load_segment(struct kimage *image, */ struct kimage *kexec_image; struct kimage *kexec_crash_image; -/* - * A home grown binary mutex. - * Nothing can wait so this mutex is safe to use - * in interrupt context :) - */ -static int kexec_lock; + +static DEFINE_MUTEX(kexec_mutex); asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, unsigned long flags) { struct kimage **dest_image, *image; - int locked; int result; /* We only trust the superuser with rebooting the system. */ @@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, * * KISS: always take the mutex. */ - locked = xchg(&kexec_lock, 1); - if (locked) + if (!mutex_trylock(&kexec_mutex)) return -EBUSY; dest_image = &kexec_image; @@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, image = xchg(dest_image, image); out: - locked = xchg(&kexec_lock, 0); /* Release the mutex */ - BUG_ON(!locked); + mutex_unlock(&kexec_mutex); kimage_free(image); return result; @@ -1063,10 +1056,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, void crash_kexec(struct pt_regs *regs) { - int locked; - - - /* Take the kexec_lock here to prevent sys_kexec_load + /* Take the kexec_mutex here to prevent sys_kexec_load * running on one cpu from replacing the crash kernel * we are using after a panic on a different cpu. * @@ -1074,8 +1064,7 @@ void crash_kexec(struct pt_regs *regs) * of memory the xchg(&kexec_crash_image) would be * sufficient. But since I reuse the memory... */ - locked = xchg(&kexec_lock, 1); - if (!locked) { + if (mutex_trylock(&kexec_mutex)) { if (kexec_crash_image) { struct pt_regs fixed_regs; crash_setup_regs(&fixed_regs, regs); @@ -1083,8 +1072,7 @@ void crash_kexec(struct pt_regs *regs) machine_crash_shutdown(&fixed_regs); machine_kexec(kexec_crash_image); } - locked = xchg(&kexec_lock, 0); - BUG_ON(!locked); + mutex_unlock(&kexec_mutex); } } @@ -1426,25 +1414,23 @@ static int __init crash_save_vmcoreinfo_init(void) module_init(crash_save_vmcoreinfo_init) -/** - * kernel_kexec - reboot the system - * - * Move into place and start executing a preloaded standalone - * executable. If nothing was preloaded return an error. +/* + * Move into place and start executing a preloaded standalone + * executable. If nothing was preloaded return an error. */ int kernel_kexec(void) { int error = 0; - if (xchg(&kexec_lock, 1)) + if (!mutex_trylock(&kexec_mutex)) return -EBUSY; if (!kexec_image) { error = -EINVAL; goto Unlock; } - if (kexec_image->preserve_context) { #ifdef CONFIG_KEXEC_JUMP + if (kexec_image->preserve_context) { mutex_lock(&pm_mutex); pm_prepare_console(); error = freeze_processes(); @@ -1459,6 +1445,7 @@ int kernel_kexec(void) error = disable_nonboot_cpus(); if (error) goto Resume_devices; + device_pm_lock(); local_irq_disable(); /* At this point, device_suspend() has been called, * but *not* device_power_down(). We *must* @@ -1470,26 +1457,22 @@ int kernel_kexec(void) error = device_power_down(PMSG_FREEZE); if (error) goto Enable_irqs; - save_processor_state(); + } else #endif - } else { - blocking_notifier_call_chain(&reboot_notifier_list, - SYS_RESTART, NULL); - system_state = SYSTEM_RESTART; - device_shutdown(); - sysdev_shutdown(); + { + kernel_restart_prepare(NULL); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); } machine_kexec(kexec_image); - if (kexec_image->preserve_context) { #ifdef CONFIG_KEXEC_JUMP - restore_processor_state(); + if (kexec_image->preserve_context) { device_power_up(PMSG_RESTORE); Enable_irqs: local_irq_enable(); + device_pm_unlock(); enable_nonboot_cpus(); Resume_devices: device_resume(PMSG_RESTORE); @@ -1499,11 +1482,10 @@ int kernel_kexec(void) Restore_console: pm_restore_console(); mutex_unlock(&pm_mutex); -#endif } +#endif Unlock: - xchg(&kexec_lock, 0); - + mutex_unlock(&kexec_mutex); return error; } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 1aa91fd6b06..3bfb1877a00 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1759,11 +1759,10 @@ static void check_chain_key(struct task_struct *curr) hlock = curr->held_locks + i; if (chain_key != hlock->prev_chain_key) { debug_locks_off(); - printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", + WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", curr->lockdep_depth, i, (unsigned long long)chain_key, (unsigned long long)hlock->prev_chain_key); - WARN_ON(1); return; } id = hlock->class_idx - 1; @@ -1778,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr) } if (chain_key != curr->curr_chain_key) { debug_locks_off(); - printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", + WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", curr->lockdep_depth, i, (unsigned long long)chain_key, (unsigned long long)curr->curr_chain_key); - WARN_ON(1); } #endif } @@ -2584,7 +2582,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->trylock = trylock; hlock->read = read; hlock->check = check; - hlock->hardirqs_off = hardirqs_off; + hlock->hardirqs_off = !!hardirqs_off; #ifdef CONFIG_LOCK_STAT hlock->waittime_stamp = 0; hlock->holdtime_stamp = sched_clock(); diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 55db193d366..56b196932c0 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -50,8 +50,21 @@ extern unsigned int nr_process_chains; extern unsigned int max_lockdep_depth; extern unsigned int max_recursion_depth; +#ifdef CONFIG_PROVE_LOCKING extern unsigned long lockdep_count_forward_deps(struct lock_class *); extern unsigned long lockdep_count_backward_deps(struct lock_class *); +#else +static inline unsigned long +lockdep_count_forward_deps(struct lock_class *class) +{ + return 0; +} +static inline unsigned long +lockdep_count_backward_deps(struct lock_class *class) +{ + return 0; +} +#endif #ifdef CONFIG_DEBUG_LOCKDEP /* diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index fa19aee604c..4b194d34d77 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -82,7 +82,6 @@ static void print_name(struct seq_file *m, struct lock_class *class) static int l_show(struct seq_file *m, void *v) { - unsigned long nr_forward_deps, nr_backward_deps; struct lock_class *class = v; struct lock_list *entry; char c1, c2, c3, c4; @@ -96,11 +95,10 @@ static int l_show(struct seq_file *m, void *v) #ifdef CONFIG_DEBUG_LOCKDEP seq_printf(m, " OPS:%8ld", class->ops); #endif - nr_forward_deps = lockdep_count_forward_deps(class); - seq_printf(m, " FD:%5ld", nr_forward_deps); - - nr_backward_deps = lockdep_count_backward_deps(class); - seq_printf(m, " BD:%5ld", nr_backward_deps); +#ifdef CONFIG_PROVE_LOCKING + seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); + seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); +#endif get_usage_chars(class, &c1, &c2, &c3, &c4); seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); @@ -325,7 +323,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v) if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) nr_hardirq_read_unsafe++; +#ifdef CONFIG_PROVE_LOCKING sum_forward_deps += lockdep_count_forward_deps(class); +#endif } #ifdef CONFIG_DEBUG_LOCKDEP DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 21575fc46d0..1d3ef29a258 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -14,7 +14,6 @@ */ #include <linux/module.h> -#include <linux/version.h> #include <linux/nsproxy.h> #include <linux/init_task.h> #include <linux/mnt_namespace.h> diff --git a/kernel/power/swap.c b/kernel/power/swap.c index a0abf9a463f..80ccac849e4 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/file.h> #include <linux/utsname.h> -#include <linux/version.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/genhd.h> diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 082b3fcb32a..356699a96d5 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) if (!dumpable && !capable(CAP_SYS_PTRACE)) return -EPERM; - return security_ptrace(current, task, mode); + return security_ptrace_may_access(task, mode); } bool ptrace_may_access(struct task_struct *task, unsigned int mode) @@ -499,8 +499,7 @@ repeat: goto repeat; } - ret = security_ptrace(current->parent, current, - PTRACE_MODE_ATTACH); + ret = security_ptrace_traceme(current->parent); /* * Set the ptrace bit in the process ptrace flags. diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f14f372cf6f..467d5940f62 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -77,6 +77,7 @@ void wakeme_after_rcu(struct rcu_head *head) * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. */ +void synchronize_rcu(void); /* Makes kernel-doc tools happy */ synchronize_rcu_xxx(synchronize_rcu, call_rcu) EXPORT_SYMBOL_GPL(synchronize_rcu); diff --git a/kernel/sched.c b/kernel/sched.c index 6a43c8942b0..29e2ec0bd83 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -808,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; /* * ratelimit for updating the group shares. - * default: 0.5ms + * default: 0.25ms */ -const_debug unsigned int sysctl_sched_shares_ratelimit = 500000; +unsigned int sysctl_sched_shares_ratelimit = 250000; /* * period over which we measure -rt task cpu usage in us. @@ -4663,6 +4663,52 @@ int __sched wait_for_completion_killable(struct completion *x) } EXPORT_SYMBOL(wait_for_completion_killable); +/** + * try_wait_for_completion - try to decrement a completion without blocking + * @x: completion structure + * + * Returns: 0 if a decrement cannot be done without blocking + * 1 if a decrement succeeded. + * + * If a completion is being used as a counting completion, + * attempt to decrement the counter without blocking. This + * enables us to avoid waiting if the resource the completion + * is protecting is not available. + */ +bool try_wait_for_completion(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + else + x->done--; + spin_unlock_irq(&x->wait.lock); + return ret; +} +EXPORT_SYMBOL(try_wait_for_completion); + +/** + * completion_done - Test to see if a completion has any waiters + * @x: completion structure + * + * Returns: 0 if there are waiters (wait_for_completion() in progress) + * 1 if there are no waiters. + * + */ +bool completion_done(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + spin_unlock_irq(&x->wait.lock); + return ret; +} +EXPORT_SYMBOL(completion_done); + static long __sched sleep_on_common(wait_queue_head_t *q, int state, long timeout) { @@ -5734,6 +5780,8 @@ static inline void sched_init_granularity(void) sysctl_sched_latency = limit; sysctl_sched_wakeup_granularity *= factor; + + sysctl_sched_shares_ratelimit *= factor; } #ifdef CONFIG_SMP @@ -8456,8 +8504,8 @@ struct task_group *sched_create_group(struct task_group *parent) WARN_ON(!parent); /* root should already exist */ tg->parent = parent; - list_add_rcu(&tg->siblings, &parent->children); INIT_LIST_HEAD(&tg->children); + list_add_rcu(&tg->siblings, &parent->children); spin_unlock_irqrestore(&task_group_lock, flags); return tg; diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 862b06bd560..9353ca78154 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -8,6 +8,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1) SCHED_FEAT(HRTICK, 1) SCHED_FEAT(DOUBLE_TICK, 0) SCHED_FEAT(ASYM_GRAN, 1) -SCHED_FEAT(LB_BIAS, 0) +SCHED_FEAT(LB_BIAS, 1) SCHED_FEAT(LB_WAKEUP_UPDATE, 1) SCHED_FEAT(ASYM_EFF_LOAD, 1) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 6163e4cf885..998ba54b454 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -298,7 +298,7 @@ static void __disable_runtime(struct rq *rq) struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); s64 diff; - if (iter == rt_rq) + if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) continue; spin_lock(&iter->rt_runtime_lock); diff --git a/kernel/signal.c b/kernel/signal.c index c539f60c6f4..e661b01d340 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) struct siginfo info; unsigned long flags; struct sighand_struct *psig; + int ret = sig; BUG_ON(sig == -1); @@ -1402,7 +1403,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) * is implementation-defined: we do (if you don't want * it, just use SIG_IGN instead). */ - tsk->exit_signal = -1; + ret = tsk->exit_signal = -1; if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) sig = -1; } @@ -1411,7 +1412,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); - return sig; + return ret; } static void do_notify_parent_cldstop(struct task_struct *tsk, int why) diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 44baeea94ab..29ab20749dd 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -290,7 +290,6 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } - EXPORT_SYMBOL(_spin_lock_nested); unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) @@ -312,7 +311,6 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas #endif return flags; } - EXPORT_SYMBOL(_spin_lock_irqsave_nested); void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, @@ -322,7 +320,6 @@ void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } - EXPORT_SYMBOL(_spin_lock_nest_lock); #endif diff --git a/kernel/sys.c b/kernel/sys.c index c01858090a9..038a7bc0901 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -169,9 +169,9 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) pgrp = find_vpid(who); else pgrp = task_pgrp(current); - do_each_pid_task(pgrp, PIDTYPE_PGID, p) { + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { error = set_one_prio(p, niceval, error); - } while_each_pid_task(pgrp, PIDTYPE_PGID, p); + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case PRIO_USER: user = current->user; @@ -229,11 +229,11 @@ asmlinkage long sys_getpriority(int which, int who) pgrp = find_vpid(who); else pgrp = task_pgrp(current); - do_each_pid_task(pgrp, PIDTYPE_PGID, p) { + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; - } while_each_pid_task(pgrp, PIDTYPE_PGID, p); + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case PRIO_USER: user = current->user; @@ -274,7 +274,7 @@ void emergency_restart(void) } EXPORT_SYMBOL_GPL(emergency_restart); -static void kernel_restart_prepare(char *cmd) +void kernel_restart_prepare(char *cmd) { blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f5da526424a..7a46bde78c6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -643,17 +643,21 @@ void tick_setup_sched_timer(void) ts->nohz_mode = NOHZ_MODE_HIGHRES; #endif } +#endif /* HIGH_RES_TIMERS */ +#if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS void tick_cancel_sched_timer(int cpu) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); +# ifdef CONFIG_HIGH_RES_TIMERS if (ts->sched_timer.base) hrtimer_cancel(&ts->sched_timer); +# endif ts->nohz_mode = NOHZ_MODE_INACTIVE; } -#endif /* HIGH_RES_TIMERS */ +#endif /** * Async notification about clocksource changes diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index a9ab0596de4..532858fa5b8 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -6,7 +6,6 @@ */ #include <linux/module.h> -#include <linux/version.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/user_namespace.h> diff --git a/kernel/utsname.c b/kernel/utsname.c index 64d398f1244..815237a55af 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c @@ -12,7 +12,6 @@ #include <linux/module.h> #include <linux/uts.h> #include <linux/utsname.h> -#include <linux/version.h> #include <linux/err.h> #include <linux/slab.h> diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index fe3a56c2256..4ab9659d269 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -12,7 +12,6 @@ #include <linux/module.h> #include <linux/uts.h> #include <linux/utsname.h> -#include <linux/version.h> #include <linux/sysctl.h> static void *get_uts(ctl_table *table, int write) |