diff options
author | Tejun Heo <htejun@gmail.com> | 2006-12-06 20:36:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 08:39:32 -0800 |
commit | 593be07ae8f6f4a1b1b98813fabb155328f8bc0c (patch) | |
tree | 570686c676986d79ff0868f88c499a8b8fc1d3b4 /fs | |
parent | e59e2ae2c29700117a54e85c106017c24837119f (diff) |
[PATCH] file: kill unnecessary timer in fdtable_defer
free_fdtable_rc() schedules timer to reschedule fddef->wq if
schedule_work() on it returns 0. However, schedule_work() guarantees that
the target work is executed at least once after the scheduling regardless
of its return value. 0 return simply means that the work was already
pending and thus no further action was required.
Another problem is that it used contant '5' as @expires argument to
mod_timer().
Kill unnecessary fddef->timer.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/file.c | 29 |
1 files changed, 2 insertions, 27 deletions
diff --git a/fs/file.c b/fs/file.c index 3787e82f54c..51aef675470 100644 --- a/fs/file.c +++ b/fs/file.c @@ -21,7 +21,6 @@ struct fdtable_defer { spinlock_t lock; struct work_struct wq; - struct timer_list timer; struct fdtable *next; }; @@ -75,22 +74,6 @@ static void __free_fdtable(struct fdtable *fdt) kfree(fdt); } -static void fdtable_timer(unsigned long data) -{ - struct fdtable_defer *fddef = (struct fdtable_defer *)data; - - spin_lock(&fddef->lock); - /* - * If someone already emptied the queue return. - */ - if (!fddef->next) - goto out; - if (!schedule_work(&fddef->wq)) - mod_timer(&fddef->timer, 5); -out: - spin_unlock(&fddef->lock); -} - static void free_fdtable_work(struct work_struct *work) { struct fdtable_defer *f = @@ -144,13 +127,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; - /* - * vmallocs are handled from the workqueue context. - * If the per-cpu workqueue is running, then we - * defer work scheduling through a timer. - */ - if (!schedule_work(&fddef->wq)) - mod_timer(&fddef->timer, 5); + /* vmallocs are handled from the workqueue context */ + schedule_work(&fddef->wq); spin_unlock(&fddef->lock); put_cpu_var(fdtable_defer_list); } @@ -354,9 +332,6 @@ static void __devinit fdtable_defer_list_init(int cpu) struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); spin_lock_init(&fddef->lock); INIT_WORK(&fddef->wq, free_fdtable_work); - init_timer(&fddef->timer); - fddef->timer.data = (unsigned long)fddef; - fddef->timer.function = fdtable_timer; fddef->next = NULL; } |