aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-05-26 23:31:23 +0100
committerThomas Gleixner <tglx@linutronix.de>2008-05-27 10:11:37 +0200
commiteb1e305f4ef201e549ffd475b7dcbcd4ec36d7dc (patch)
tree078c5acb7d6ca6e3f24b45c5d4a660abcdc75202 /drivers
parentd5edbc1f75420935b1ec7e65df10c8f81cea82de (diff)
xen: add rebind_evtchn_irq
Add rebind_evtchn_irq(), which will rebind an device driver's existing irq to a new event channel on restore. Since the new event channel will be masked and bound to vcpu0, we update the state accordingly and unmask the irq once everything is set up. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/xen/events.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 4f0f22b020e..f64e9798129 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -557,6 +557,33 @@ out:
put_cpu();
}
+/* Rebind a new event channel to an existing irq. */
+void rebind_evtchn_irq(int evtchn, int irq)
+{
+ /* Make sure the irq is masked, since the new event channel
+ will also be masked. */
+ disable_irq(irq);
+
+ spin_lock(&irq_mapping_update_lock);
+
+ /* After resume the irq<->evtchn mappings are all cleared out */
+ BUG_ON(evtchn_to_irq[evtchn] != -1);
+ /* Expect irq to have been bound before,
+ so the bindcount should be non-0 */
+ BUG_ON(irq_bindcount[irq] == 0);
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ /* new event channels are always bound to cpu 0 */
+ irq_set_affinity(irq, cpumask_of_cpu(0));
+
+ /* Unmask the event channel. */
+ enable_irq(irq);
+}
+
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{