aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/acpi/ec.c11
-rw-r--r--drivers/ide/ide-taskfile.c36
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/topology.h1
-rw-r--r--kernel/sched.c11
-rw-r--r--kernel/sched_debug.c1
-rw-r--r--kernel/sched_fair.c191
-rw-r--r--mm/slub.c12
-rw-r--r--net/core/sock.c4
9 files changed, 170 insertions, 100 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e7e197e3a4f..7222a18a031 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -129,7 +129,6 @@ static struct acpi_ec {
struct mutex lock;
wait_queue_head_t wait;
struct list_head list;
- atomic_t irq_count;
u8 handlers_installed;
} *boot_ec, *first_ec;
@@ -182,8 +181,6 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
{
int ret = 0;
- atomic_set(&ec->irq_count, 0);
-
if (unlikely(event == ACPI_EC_EVENT_OBF_1 &&
test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags)))
force_poll = 1;
@@ -230,7 +227,6 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
while (time_before(jiffies, delay)) {
if (acpi_ec_check_status(ec, event))
goto end;
- msleep(5);
}
}
pr_err(PREFIX "acpi_ec_wait timeout,"
@@ -533,13 +529,6 @@ static u32 acpi_ec_gpe_handler(void *data)
struct acpi_ec *ec = data;
pr_debug(PREFIX "~~~> interrupt\n");
- atomic_inc(&ec->irq_count);
- if (atomic_read(&ec->irq_count) > 5) {
- pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
- acpi_disable_gpe(NULL, ec->gpe, ACPI_ISR);
- clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
- return ACPI_INTERRUPT_HANDLED;
- }
clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
wake_up(&ec->wait);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 0518a2e948c..4c86a8d84b4 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -423,6 +423,25 @@ void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
}
/*
+ * We got an interrupt on a task_in case, but no errors and no DRQ.
+ *
+ * It might be a spurious irq (shared irq), but it might be a
+ * command that had no output.
+ */
+static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
+{
+ /* Command all done? */
+ if (OK_STAT(stat, READY_STAT, BUSY_STAT)) {
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Assume it was a spurious irq */
+ ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
+ return ide_started;
+}
+
+/*
* Handler for command with PIO data-in phase (Read/Read Multiple).
*/
static ide_startstop_t task_in_intr(ide_drive_t *drive)
@@ -431,18 +450,17 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
struct request *rq = HWGROUP(drive)->rq;
u8 stat = ide_read_status(drive);
- /* new way for dealing with premature shared PCI interrupts */
- if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
- if (stat & (ERR_STAT | DRQ_STAT))
- return task_error(drive, rq, __FUNCTION__, stat);
- /* No data yet, so wait for another IRQ. */
- ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
- return ide_started;
- }
+ /* Error? */
+ if (stat & ERR_STAT)
+ return task_error(drive, rq, __FUNCTION__, stat);
+
+ /* Didn't want any data? Odd. */
+ if (!(stat & DRQ_STAT))
+ return task_in_unexpected(drive, rq, stat);
ide_pio_datablock(drive, rq, 0);
- /* If it was the last datablock check status and finish transfer. */
+ /* Are we done? Check status and finish transfer. */
if (!hwif->nleft) {
stat = wait_drive_not_busy(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 11d8e9a74ef..3625fcaf5d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -929,6 +929,9 @@ struct sched_entity {
u64 vruntime;
u64 prev_sum_exec_runtime;
+ u64 last_wakeup;
+ u64 avg_overlap;
+
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 2352f46160d..2d8dac8799c 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -138,7 +138,6 @@
| SD_BALANCE_FORK \
| SD_BALANCE_EXEC \
| SD_WAKE_AFFINE \
- | SD_WAKE_IDLE \
| SD_SHARE_PKG_RESOURCES\
| BALANCE_FOR_MC_POWER, \
.last_balance = jiffies, \
diff --git a/kernel/sched.c b/kernel/sched.c
index d1ad69b270c..3f7c5eb254e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1396,6 +1396,12 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
{
s64 delta;
+ /*
+ * Buddy candidates are cache hot:
+ */
+ if (&p->se == cfs_rq_of(&p->se)->next)
+ return 1;
+
if (p->sched_class != &fair_sched_class)
return 0;
@@ -1855,10 +1861,11 @@ out_activate:
schedstat_inc(p, se.nr_wakeups_remote);
update_rq_clock(rq);
activate_task(rq, p, 1);
- check_preempt_curr(rq, p);
success = 1;
out_running:
+ check_preempt_curr(rq, p);
+
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
@@ -1892,6 +1899,8 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
+ p->se.last_wakeup = 0;
+ p->se.avg_overlap = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 4b5e24cf2f4..ef358ba0768 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -288,6 +288,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.exec_start);
PN(se.vruntime);
PN(se.sum_exec_runtime);
+ PN(se.avg_overlap);
nr_switches = p->nvcsw + p->nivcsw;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2cc59080ef..b85cac4b5e2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -73,13 +73,13 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
/*
* SCHED_OTHER wake-up granularity.
- * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
+unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
@@ -556,6 +556,21 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
account_entity_enqueue(cfs_rq, se);
}
+static void update_avg(u64 *avg, u64 sample)
+{
+ s64 diff = sample - *avg;
+ *avg += diff >> 3;
+}
+
+static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ if (!se->last_wakeup)
+ return;
+
+ update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
+ se->last_wakeup = 0;
+}
+
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
@@ -566,6 +581,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
update_stats_dequeue(cfs_rq, se);
if (sleep) {
+ update_avg_stats(cfs_rq, se);
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
@@ -980,96 +996,121 @@ static inline int wake_idle(int cpu, struct task_struct *p)
#endif
#ifdef CONFIG_SMP
-static int select_task_rq_fair(struct task_struct *p, int sync)
+
+static const struct sched_class fair_sched_class;
+
+static int
+wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
+ struct task_struct *p, int prev_cpu, int this_cpu, int sync,
+ int idx, unsigned long load, unsigned long this_load,
+ unsigned int imbalance)
{
- int cpu, this_cpu;
- struct rq *rq;
- struct sched_domain *sd, *this_sd = NULL;
- int new_cpu;
+ struct task_struct *curr = this_rq->curr;
+ unsigned long tl = this_load;
+ unsigned long tl_per_task;
+
+ if (!(this_sd->flags & SD_WAKE_AFFINE))
+ return 0;
+
+ /*
+ * If the currently running task will sleep within
+ * a reasonable amount of time then attract this newly
+ * woken task:
+ */
+ if (sync && curr->sched_class == &fair_sched_class) {
+ if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
+ p->se.avg_overlap < sysctl_sched_migration_cost)
+ return 1;
+ }
- cpu = task_cpu(p);
- rq = task_rq(p);
- this_cpu = smp_processor_id();
- new_cpu = cpu;
+ schedstat_inc(p, se.nr_wakeups_affine_attempts);
+ tl_per_task = cpu_avg_load_per_task(this_cpu);
- if (cpu == this_cpu)
- goto out_set_cpu;
+ /*
+ * If sync wakeup then subtract the (maximum possible)
+ * effect of the currently running task from the load
+ * of the current CPU:
+ */
+ if (sync)
+ tl -= current->se.load.weight;
+
+ if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
+ 100*(tl + p->se.load.weight) <= imbalance*load) {
+ /*
+ * This domain has SD_WAKE_AFFINE and
+ * p is cache cold in this domain, and
+ * there is no bad imbalance.
+ */
+ schedstat_inc(this_sd, ttwu_move_affine);
+ schedstat_inc(p, se.nr_wakeups_affine);
+ return 1;
+ }
+ return 0;
+}
+
+static int select_task_rq_fair(struct task_struct *p, int sync)
+{
+ struct sched_domain *sd, *this_sd = NULL;
+ int prev_cpu, this_cpu, new_cpu;
+ unsigned long load, this_load;
+ struct rq *rq, *this_rq;
+ unsigned int imbalance;
+ int idx;
+
+ prev_cpu = task_cpu(p);
+ rq = task_rq(p);
+ this_cpu = smp_processor_id();
+ this_rq = cpu_rq(this_cpu);
+ new_cpu = prev_cpu;
+
+ /*
+ * 'this_sd' is the first domain that both
+ * this_cpu and prev_cpu are present in:
+ */
for_each_domain(this_cpu, sd) {
- if (cpu_isset(cpu, sd->span)) {
+ if (cpu_isset(prev_cpu, sd->span)) {
this_sd = sd;
break;
}
}
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
- goto out_set_cpu;
+ goto out;
/*
* Check for affine wakeup and passive balancing possibilities.
*/
- if (this_sd) {
- int idx = this_sd->wake_idx;
- unsigned int imbalance;
- unsigned long load, this_load;
-
- imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
-
- load = source_load(cpu, idx);
- this_load = target_load(this_cpu, idx);
-
- new_cpu = this_cpu; /* Wake to this CPU if we can */
-
- if (this_sd->flags & SD_WAKE_AFFINE) {
- unsigned long tl = this_load;
- unsigned long tl_per_task;
-
- /*
- * Attract cache-cold tasks on sync wakeups:
- */
- if (sync && !task_hot(p, rq->clock, this_sd))
- goto out_set_cpu;
-
- schedstat_inc(p, se.nr_wakeups_affine_attempts);
- tl_per_task = cpu_avg_load_per_task(this_cpu);
-
- /*
- * If sync wakeup then subtract the (maximum possible)
- * effect of the currently running task from the load
- * of the current CPU:
- */
- if (sync)
- tl -= current->se.load.weight;
-
- if ((tl <= load &&
- tl + target_load(cpu, idx) <= tl_per_task) ||
- 100*(tl + p->se.load.weight) <= imbalance*load) {
- /*
- * This domain has SD_WAKE_AFFINE and
- * p is cache cold in this domain, and
- * there is no bad imbalance.
- */
- schedstat_inc(this_sd, ttwu_move_affine);
- schedstat_inc(p, se.nr_wakeups_affine);
- goto out_set_cpu;
- }
- }
+ if (!this_sd)
+ goto out;
- /*
- * Start passive balancing when half the imbalance_pct
- * limit is reached.
- */
- if (this_sd->flags & SD_WAKE_BALANCE) {
- if (imbalance*this_load <= 100*load) {
- schedstat_inc(this_sd, ttwu_move_balance);
- schedstat_inc(p, se.nr_wakeups_passive);
- goto out_set_cpu;
- }
+ idx = this_sd->wake_idx;
+
+ imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+
+ load = source_load(prev_cpu, idx);
+ this_load = target_load(this_cpu, idx);
+
+ if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
+ load, this_load, imbalance))
+ return this_cpu;
+
+ if (prev_cpu == this_cpu)
+ goto out;
+
+ /*
+ * Start passive balancing when half the imbalance_pct
+ * limit is reached.
+ */
+ if (this_sd->flags & SD_WAKE_BALANCE) {
+ if (imbalance*this_load <= 100*load) {
+ schedstat_inc(this_sd, ttwu_move_balance);
+ schedstat_inc(p, se.nr_wakeups_passive);
+ return this_cpu;
}
}
- new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
-out_set_cpu:
+out:
return wake_idle(new_cpu, p);
}
#endif /* CONFIG_SMP */
@@ -1092,6 +1133,10 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
return;
}
+ se->last_wakeup = se->sum_exec_runtime;
+ if (unlikely(se == pse))
+ return;
+
cfs_rq_of(pse)->next = pse;
/*
diff --git a/mm/slub.c b/mm/slub.c
index 96d63eb3ab1..ca71d5b81e4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1536,9 +1536,15 @@ new_slab:
* That is only possible if certain conditions are met that are being
* checked when a slab is created.
*/
- if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
- return kmalloc_large(s->objsize, gfpflags);
-
+ if (!(gfpflags & __GFP_NORETRY) &&
+ (s->flags & __PAGE_ALLOC_FALLBACK)) {
+ if (gfpflags & __GFP_WAIT)
+ local_irq_enable();
+ object = kmalloc_large(s->objsize, gfpflags);
+ if (gfpflags & __GFP_WAIT)
+ local_irq_disable();
+ return object;
+ }
return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
diff --git a/net/core/sock.c b/net/core/sock.c
index 09cb3a74de7..2654c147c00 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1621,7 +1621,7 @@ static void sock_def_readable(struct sock *sk, int len)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible_sync(sk->sk_sleep);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
read_unlock(&sk->sk_callback_lock);
}
@@ -1635,7 +1635,7 @@ static void sock_def_write_space(struct sock *sk)
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible_sync(sk->sk_sleep);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))