aboutsummaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-18 22:50:34 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-18 22:50:34 +0200
commita208f37a465e222218974ab20a31b42b7b4893b2 (patch)
tree77c6acdd4be32024330a14f2618b814126ce7a20 /kernel/irq
parent511d9d34183662aada3890883e860b151d707e22 (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'linus' into x86/x2apic
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/manage.c33
-rw-r--r--kernel/irq/proc.c59
2 files changed, 83 insertions, 9 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 628b5572a7c..909b2231fa9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -17,6 +17,8 @@
#ifdef CONFIG_SMP
+cpumask_t irq_default_affinity = CPU_MASK_ALL;
+
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
@@ -102,6 +104,27 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
return 0;
}
+#ifndef CONFIG_AUTO_IRQ_AFFINITY
+/*
+ * Generic version of the affinity autoselector.
+ */
+int irq_select_affinity(unsigned int irq)
+{
+ cpumask_t mask;
+
+ if (!irq_can_set_affinity(irq))
+ return 0;
+
+ cpus_and(mask, cpu_online_map, irq_default_affinity);
+
+ irq_desc[irq].affinity = mask;
+ irq_desc[irq].chip->set_affinity(irq, mask);
+
+ set_balance_irq_affinity(irq, mask);
+ return 0;
+}
+#endif
+
#endif
/**
@@ -361,7 +384,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
- if (desc->chip && desc->chip->set_type)
+ if (desc->chip->set_type)
desc->chip->set_type(irq,
new->flags & IRQF_TRIGGER_MASK);
else
@@ -371,8 +394,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
*/
printk(KERN_WARNING "No IRQF_TRIGGER set_type "
"function for IRQ %d (%s)\n", irq,
- desc->chip ? desc->chip->name :
- "unknown");
+ desc->chip->name);
} else
compat_irq_chip_set_default_handler(desc);
@@ -389,6 +411,9 @@ int setup_irq(unsigned int irq, struct irqaction *new)
} else
/* Undo nested disables: */
desc->depth = 1;
+
+ /* Set default affinity mask once everything is setup */
+ irq_select_affinity(irq);
}
/* Reset broken irq detection when installing new handler */
desc->irq_count = 0;
@@ -578,8 +603,6 @@ int request_irq(unsigned int irq, irq_handler_t handler,
action->next = NULL;
action->dev_id = dev_id;
- select_smp_affinity(irq);
-
#ifdef CONFIG_DEBUG_SHIRQ
if (irqflags & IRQF_SHARED) {
/*
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index c2f2ccb0549..6c6d35d68ee 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -44,7 +44,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
unsigned int irq = (int)(long)data, full_count = count, err;
- cpumask_t new_value, tmp;
+ cpumask_t new_value;
if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
irq_balancing_disabled(irq))
@@ -62,17 +62,51 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
- cpus_and(tmp, new_value, cpu_online_map);
- if (cpus_empty(tmp))
+ if (!cpus_intersects(new_value, cpu_online_map))
/* Special case for empty set - allow the architecture
code to set default SMP affinity. */
- return select_smp_affinity(irq) ? -EINVAL : full_count;
+ return irq_select_affinity(irq) ? -EINVAL : full_count;
irq_set_affinity(irq, new_value);
return full_count;
}
+static int default_affinity_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = cpumask_scnprintf(page, count, irq_default_affinity);
+ if (count - len < 2)
+ return -EINVAL;
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static int default_affinity_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ unsigned int full_count = count, err;
+ cpumask_t new_value;
+
+ err = cpumask_parse_user(buffer, count, new_value);
+ if (err)
+ return err;
+
+ if (!is_affinity_mask_valid(new_value))
+ return -EINVAL;
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ if (!cpus_intersects(new_value, cpu_online_map))
+ return -EINVAL;
+
+ irq_default_affinity = new_value;
+
+ return full_count;
+}
#endif
static int irq_spurious_read(char *page, char **start, off_t off,
@@ -171,6 +205,21 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action)
remove_proc_entry(action->dir->name, irq_desc[irq].dir);
}
+void register_default_affinity_proc(void)
+{
+#ifdef CONFIG_SMP
+ struct proc_dir_entry *entry;
+
+ /* create /proc/irq/default_smp_affinity */
+ entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir);
+ if (entry) {
+ entry->data = NULL;
+ entry->read_proc = default_affinity_read;
+ entry->write_proc = default_affinity_write;
+ }
+#endif
+}
+
void init_irq_proc(void)
{
int i;
@@ -180,6 +229,8 @@ void init_irq_proc(void)
if (!root_irq_dir)
return;
+ register_default_affinity_proc();
+
/*
* Create entries for all existing IRQs.
*/