aboutsummaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-01-09 03:39:43 -0500
committerLen Brown <len.brown@intel.com>2009-01-09 03:39:43 -0500
commitb2576e1d4408e134e2188c967b1f28af39cd79d4 (patch)
tree004f3c82faab760f304ce031d6d2f572e7746a50 /virt
parent3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (diff)
parent2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff)
Merge branch 'linus' into release
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/ioapic.c8
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/iommu.c (renamed from virt/kvm/vtd.c)135
-rw-r--r--virt/kvm/irq_comm.c19
-rw-r--r--virt/kvm/kvm_main.c470
-rw-r--r--virt/kvm/kvm_trace.c1
6 files changed, 474 insertions, 161 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 53772bb4632..23b81cf242a 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -150,10 +150,11 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic,
static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
{
kvm_inject_nmi(vcpu);
+ kvm_vcpu_kick(vcpu);
}
-static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
- u8 dest_mode)
+u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
+ u8 dest_mode)
{
u32 mask = 0;
int i;
@@ -207,7 +208,8 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
"vector=%x trig_mode=%x\n",
dest, dest_mode, delivery_mode, vector, trig_mode);
- deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode);
+ deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, dest,
+ dest_mode);
if (!deliver_bitmask) {
ioapic_debug("no target on destination\n");
return 0;
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index cd7ae7691c9..49c9581d258 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -85,5 +85,7 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
+u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
+ u8 dest_mode);
#endif
diff --git a/virt/kvm/vtd.c b/virt/kvm/iommu.c
index a770874f3a3..e9693a29d00 100644
--- a/virt/kvm/vtd.c
+++ b/virt/kvm/iommu.c
@@ -25,6 +25,7 @@
#include <linux/kvm_host.h>
#include <linux/pci.h>
#include <linux/dmar.h>
+#include <linux/iommu.h>
#include <linux/intel-iommu.h>
static int kvm_iommu_unmap_memslots(struct kvm *kvm);
@@ -37,7 +38,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
gfn_t gfn = base_gfn;
pfn_t pfn;
int i, r = 0;
- struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
+ struct iommu_domain *domain = kvm->arch.iommu_domain;
/* check if iommu exists and in use */
if (!domain)
@@ -45,20 +46,17 @@ int kvm_iommu_map_pages(struct kvm *kvm,
for (i = 0; i < npages; i++) {
/* check if already mapped */
- pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
- gfn_to_gpa(gfn));
- if (pfn)
+ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
continue;
pfn = gfn_to_pfn(kvm, gfn);
- r = intel_iommu_page_mapping(domain,
- gfn_to_gpa(gfn),
- pfn_to_hpa(pfn),
- PAGE_SIZE,
- DMA_PTE_READ |
- DMA_PTE_WRITE);
+ r = iommu_map_range(domain,
+ gfn_to_gpa(gfn),
+ pfn_to_hpa(pfn),
+ PAGE_SIZE,
+ IOMMU_READ | IOMMU_WRITE);
if (r) {
- printk(KERN_ERR "kvm_iommu_map_pages:"
+ printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%lx\n", pfn);
goto unmap_pages;
}
@@ -73,7 +71,7 @@ unmap_pages:
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
- int i, r;
+ int i, r = 0;
down_read(&kvm->slots_lock);
for (i = 0; i < kvm->nmemslots; i++) {
@@ -86,50 +84,79 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
return r;
}
-int kvm_iommu_map_guest(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
+int kvm_assign_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev)
{
struct pci_dev *pdev = NULL;
+ struct iommu_domain *domain = kvm->arch.iommu_domain;
int r;
- if (!intel_iommu_found()) {
- printk(KERN_ERR "%s: intel iommu not found\n", __func__);
+ /* check if iommu exists and in use */
+ if (!domain)
+ return 0;
+
+ pdev = assigned_dev->dev;
+ if (pdev == NULL)
return -ENODEV;
+
+ r = iommu_attach_device(domain, &pdev->dev);
+ if (r) {
+ printk(KERN_ERR "assign device %x:%x.%x failed",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ return r;
}
- printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n",
- assigned_dev->host_busnr,
- PCI_SLOT(assigned_dev->host_devfn),
- PCI_FUNC(assigned_dev->host_devfn));
+ printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
+ assigned_dev->host_busnr,
+ PCI_SLOT(assigned_dev->host_devfn),
+ PCI_FUNC(assigned_dev->host_devfn));
+
+ return 0;
+}
+
+int kvm_deassign_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev)
+{
+ struct iommu_domain *domain = kvm->arch.iommu_domain;
+ struct pci_dev *pdev = NULL;
+
+ /* check if iommu exists and in use */
+ if (!domain)
+ return 0;
pdev = assigned_dev->dev;
+ if (pdev == NULL)
+ return -ENODEV;
- if (pdev == NULL) {
- if (kvm->arch.intel_iommu_domain) {
- intel_iommu_domain_exit(kvm->arch.intel_iommu_domain);
- kvm->arch.intel_iommu_domain = NULL;
- }
+ iommu_detach_device(domain, &pdev->dev);
+
+ printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n",
+ assigned_dev->host_busnr,
+ PCI_SLOT(assigned_dev->host_devfn),
+ PCI_FUNC(assigned_dev->host_devfn));
+
+ return 0;
+}
+
+int kvm_iommu_map_guest(struct kvm *kvm)
+{
+ int r;
+
+ if (!iommu_found()) {
+ printk(KERN_ERR "%s: iommu not found\n", __func__);
return -ENODEV;
}
- kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev);
- if (!kvm->arch.intel_iommu_domain)
- return -ENODEV;
+ kvm->arch.iommu_domain = iommu_domain_alloc();
+ if (!kvm->arch.iommu_domain)
+ return -ENOMEM;
r = kvm_iommu_map_memslots(kvm);
if (r)
goto out_unmap;
- intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
- pdev->bus->number, pdev->devfn);
-
- r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
- pdev);
- if (r) {
- printk(KERN_ERR "Domain context map for %s failed",
- pci_name(pdev));
- goto out_unmap;
- }
return 0;
out_unmap:
@@ -138,19 +165,26 @@ out_unmap:
}
static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages)
+ gfn_t base_gfn, unsigned long npages)
{
gfn_t gfn = base_gfn;
pfn_t pfn;
- struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
- int i;
+ struct iommu_domain *domain = kvm->arch.iommu_domain;
+ unsigned long i;
+ u64 phys;
+
+ /* check if iommu exists and in use */
+ if (!domain)
+ return;
for (i = 0; i < npages; i++) {
- pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
- gfn_to_gpa(gfn));
+ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
+ pfn = phys >> PAGE_SHIFT;
kvm_release_pfn_clean(pfn);
gfn++;
}
+
+ iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages);
}
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
@@ -168,24 +202,13 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
int kvm_iommu_unmap_guest(struct kvm *kvm)
{
- struct kvm_assigned_dev_kernel *entry;
- struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
+ struct iommu_domain *domain = kvm->arch.iommu_domain;
/* check if iommu exists and in use */
if (!domain)
return 0;
- list_for_each_entry(entry, &kvm->arch.assigned_dev_head, list) {
- printk(KERN_DEBUG "VT-d unmap: host bdf = %x:%x:%x\n",
- entry->host_busnr,
- PCI_SLOT(entry->host_devfn),
- PCI_FUNC(entry->host_devfn));
-
- /* detach kvm dmar domain */
- intel_iommu_detach_dev(domain, entry->host_busnr,
- entry->host_devfn);
- }
kvm_iommu_unmap_memslots(kvm);
- intel_iommu_domain_exit(domain);
+ iommu_domain_free(domain);
return 0;
}
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 55ad76ee2d0..aa5d1e5c497 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -61,10 +61,9 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
}
-void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
- struct kvm_irq_ack_notifier *kian)
+void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
{
- hlist_del(&kian->link);
+ hlist_del_init(&kian->link);
}
/* The caller must hold kvm->lock mutex */
@@ -73,11 +72,15 @@ int kvm_request_irq_source_id(struct kvm *kvm)
unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
int irq_source_id = find_first_zero_bit(bitmap,
sizeof(kvm->arch.irq_sources_bitmap));
+
if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
- irq_source_id = -EFAULT;
- } else
- set_bit(irq_source_id, bitmap);
+ return -EFAULT;
+ }
+
+ ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
+ set_bit(irq_source_id, bitmap);
+
return irq_source_id;
}
@@ -85,7 +88,9 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
int i;
- if (irq_source_id <= 0 ||
+ ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
+
+ if (irq_source_id < 0 ||
irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
return;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a87f45edfae..3a5a08298aa 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -47,6 +47,10 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#ifdef CONFIG_X86
+#include <asm/msidef.h>
+#endif
+
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
#endif
@@ -60,10 +64,13 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int msi2intx = 1;
+module_param(msi2intx, bool, 0);
+
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
-static cpumask_t cpus_hardware_enabled;
+static cpumask_var_t cpus_hardware_enabled;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
@@ -75,9 +82,60 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
-bool kvm_rebooting;
+static bool kvm_rebooting;
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+
+#ifdef CONFIG_X86
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
+{
+ int vcpu_id;
+ struct kvm_vcpu *vcpu;
+ struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
+ int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
+ >> MSI_ADDR_DEST_ID_SHIFT;
+ int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
+ >> MSI_DATA_VECTOR_SHIFT;
+ int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
+ (unsigned long *)&dev->guest_msi.address_lo);
+ int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
+ (unsigned long *)&dev->guest_msi.data);
+ int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
+ (unsigned long *)&dev->guest_msi.data);
+ u32 deliver_bitmask;
+
+ BUG_ON(!ioapic);
+
+ deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
+ dest_id, dest_mode);
+ /* IOAPIC delivery mode value is the same as MSI here */
+ switch (delivery_mode) {
+ case IOAPIC_LOWEST_PRIORITY:
+ vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
+ deliver_bitmask);
+ if (vcpu != NULL)
+ kvm_apic_set_irq(vcpu, vector, trig_mode);
+ else
+ printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
+ break;
+ case IOAPIC_FIXED:
+ for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
+ if (!(deliver_bitmask & (1 << vcpu_id)))
+ continue;
+ deliver_bitmask &= ~(1 << vcpu_id);
+ vcpu = ioapic->kvm->vcpus[vcpu_id];
+ if (vcpu)
+ kvm_apic_set_irq(vcpu, vector, trig_mode);
+ }
+ break;
+ default:
+ printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
+ }
+}
+#else
+static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
+#endif
+
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
int assigned_dev_id)
{
@@ -104,9 +162,16 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
* finer-grained lock, update this
*/
mutex_lock(&assigned_dev->kvm->lock);
- kvm_set_irq(assigned_dev->kvm,
- assigned_dev->irq_source_id,
- assigned_dev->guest_irq, 1);
+ if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
+ kvm_set_irq(assigned_dev->kvm,
+ assigned_dev->irq_source_id,
+ assigned_dev->guest_irq, 1);
+ else if (assigned_dev->irq_requested_type &
+ KVM_ASSIGNED_DEV_GUEST_MSI) {
+ assigned_device_msi_dispatch(assigned_dev);
+ enable_irq(assigned_dev->host_irq);
+ assigned_dev->host_irq_disabled = false;
+ }
mutex_unlock(&assigned_dev->kvm->lock);
kvm_put_kvm(assigned_dev->kvm);
}
@@ -117,8 +182,12 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
(struct kvm_assigned_dev_kernel *) dev_id;
kvm_get_kvm(assigned_dev->kvm);
+
schedule_work(&assigned_dev->interrupt_work);
+
disable_irq_nosync(irq);
+ assigned_dev->host_irq_disabled = true;
+
return IRQ_HANDLED;
}
@@ -132,19 +201,32 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
dev = container_of(kian, struct kvm_assigned_dev_kernel,
ack_notifier);
+
kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
- enable_irq(dev->host_irq);
+
+ /* The guest irq may be shared so this ack may be
+ * from another device.
+ */
+ if (dev->host_irq_disabled) {
+ enable_irq(dev->host_irq);
+ dev->host_irq_disabled = false;
+ }
}
-static void kvm_free_assigned_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel
- *assigned_dev)
+static void kvm_free_assigned_irq(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev)
{
- if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested)
- free_irq(assigned_dev->host_irq, (void *)assigned_dev);
+ if (!irqchip_in_kernel(kvm))
+ return;
+
+ kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
- kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
- kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
+ if (assigned_dev->irq_source_id != -1)
+ kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
+ assigned_dev->irq_source_id = -1;
+
+ if (!assigned_dev->irq_requested_type)
+ return;
if (cancel_work_sync(&assigned_dev->interrupt_work))
/* We had pending work. That means we will have to take
@@ -152,6 +234,23 @@ static void kvm_free_assigned_device(struct kvm *kvm,
*/
kvm_put_kvm(kvm);
+ free_irq(assigned_dev->host_irq, (void *)assigned_dev);
+
+ if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
+ pci_disable_msi(assigned_dev->dev);
+
+ assigned_dev->irq_requested_type = 0;
+}
+
+
+static void kvm_free_assigned_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel
+ *assigned_dev)
+{
+ kvm_free_assigned_irq(kvm, assigned_dev);
+
+ pci_reset_function(assigned_dev->dev);
+
pci_release_regions(assigned_dev->dev);
pci_disable_device(assigned_dev->dev);
pci_dev_put(assigned_dev->dev);
@@ -174,6 +273,95 @@ void kvm_free_all_assigned_devices(struct kvm *kvm)
}
}
+static int assigned_device_update_intx(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *adev,
+ struct kvm_assigned_irq *airq)
+{
+ adev->guest_irq = airq->guest_irq;
+ adev->ack_notifier.gsi = airq->guest_irq;
+
+ if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
+ return 0;
+
+ if (irqchip_in_kernel(kvm)) {
+ if (!msi2intx &&
+ adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) {
+ free_irq(adev->host_irq, (void *)kvm);
+ pci_disable_msi(adev->dev);
+ }
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (airq->host_irq)
+ adev->host_irq = airq->host_irq;
+ else
+ adev->host_irq = adev->dev->irq;
+
+ /* Even though this is PCI, we don't want to use shared
+ * interrupts. Sharing host devices with guest-assigned devices
+ * on the same interrupt line is not a happy situation: there
+ * are going to be long delays in accepting, acking, etc.
+ */
+ if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
+ 0, "kvm_assigned_intx_device", (void *)adev))
+ return -EIO;
+ }
+
+ adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
+ KVM_ASSIGNED_DEV_HOST_INTX;
+ return 0;
+}
+
+#ifdef CONFIG_X86
+static int assigned_device_update_msi(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *adev,
+ struct kvm_assigned_irq *airq)
+{
+ int r;
+
+ if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
+ /* x86 don't care upper address of guest msi message addr */
+ adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
+ adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
+ adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
+ adev->guest_msi.data = airq->guest_msi.data;
+ adev->ack_notifier.gsi = -1;
+ } else if (msi2intx) {
+ adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
+ adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
+ adev->guest_irq = airq->guest_irq;
+ adev->ack_notifier.gsi = airq->guest_irq;
+ }
+
+ if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
+ return 0;
+
+ if (irqchip_in_kernel(kvm)) {
+ if (!msi2intx) {
+ if (adev->irq_requested_type &
+ KVM_ASSIGNED_DEV_HOST_INTX)
+ free_irq(adev->host_irq, (void *)adev);
+
+ r = pci_enable_msi(adev->dev);
+ if (r)
+ return r;
+ }
+
+ adev->host_irq = adev->dev->irq;
+ if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
+ "kvm_assigned_msi_device", (void *)adev))
+ return -EIO;
+ }
+
+ if (!msi2intx)
+ adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;
+
+ adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
+ return 0;
+}
+#endif
+
static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
struct kvm_assigned_irq
*assigned_irq)
@@ -190,49 +378,68 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
return -EINVAL;
}
- if (match->irq_requested) {
- match->guest_irq = assigned_irq->guest_irq;
- match->ack_notifier.gsi = assigned_irq->guest_irq;
- mutex_unlock(&kvm->lock);
- return 0;
- }
+ if (!match->irq_requested_type) {
+ INIT_WORK(&match->interrupt_work,
+ kvm_assigned_dev_interrupt_work_handler);
+ if (irqchip_in_kernel(kvm)) {
+ /* Register ack nofitier */
+ match->ack_notifier.gsi = -1;
+ match->ack_notifier.irq_acked =
+ kvm_assigned_dev_ack_irq;
+ kvm_register_irq_ack_notifier(kvm,
+ &match->ack_notifier);
+
+ /* Request IRQ source ID */
+ r = kvm_request_irq_source_id(kvm);
+ if (r < 0)
+ goto out_release;
+ else
+ match->irq_source_id = r;
- INIT_WORK(&match->interrupt_work,
- kvm_assigned_dev_interrupt_work_handler);
+#ifdef CONFIG_X86
+ /* Determine host device irq type, we can know the
+ * result from dev->msi_enabled */
+ if (msi2intx)
+ pci_enable_msi(match->dev);
+#endif
+ }
+ }
- if (irqchip_in_kernel(kvm)) {
- if (!capable(CAP_SYS_RAWIO)) {
- r = -EPERM;
+ if ((!msi2intx &&
+ (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) ||
+ (msi2intx && match->dev->msi_enabled)) {
+#ifdef CONFIG_X86
+ r = assigned_device_update_msi(kvm, match, assigned_irq);
+ if (r) {
+ printk(KERN_WARNING "kvm: failed to enable "
+ "MSI device!\n");
goto out_release;
}
-
- if (assigned_irq->host_irq)
- match->host_irq = assigned_irq->host_irq;
- else
- match->host_irq = match->dev->irq;
- match->guest_irq = assigned_irq->guest_irq;
- match->ack_notifier.gsi = assigned_irq->guest_irq;
- match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
- kvm_register_irq_ack_notifier(kvm, &match->ack_notifier);
- r = kvm_request_irq_source_id(kvm);
- if (r < 0)
+#else
+ r = -ENOTTY;
+#endif
+ } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
+ /* Host device IRQ 0 means don't support INTx */
+ if (!msi2intx) {
+ printk(KERN_WARNING
+ "kvm: wait device to enable MSI!\n");
+ r = 0;
+ } else {
+ printk(KERN_WARNING
+ "kvm: failed to enable MSI device!\n");
+ r = -ENOTTY;
goto out_release;
- else
- match->irq_source_id = r;
-
- /* Even though this is PCI, we don't want to use shared
- * interrupts. Sharing host devices with guest-assigned devices
- * on the same interrupt line is not a happy situation: there
- * are going to be long delays in accepting, acking, etc.
- */
- if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0,
- "kvm_assigned_device", (void *)match)) {
- r = -EIO;
+ }
+ } else {
+ /* Non-sharing INTx mode */
+ r = assigned_device_update_intx(kvm, match, assigned_irq);
+ if (r) {
+ printk(KERN_WARNING "kvm: failed to enable "
+ "INTx device!\n");
goto out_release;
}
}
- match->irq_requested = true;
mutex_unlock(&kvm->lock);
return r;
out_release:
@@ -283,17 +490,26 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
__func__);
goto out_disable;
}
+
+ pci_reset_function(dev);
+
match->assigned_dev_id = assigned_dev->assigned_dev_id;
match->host_busnr = assigned_dev->busnr;
match->host_devfn = assigned_dev->devfn;
+ match->flags = assigned_dev->flags;
match->dev = dev;
-
+ match->irq_source_id = -1;
match->kvm = kvm;
list_add(&match->list, &kvm->arch.assigned_dev_head);
if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
- r = kvm_iommu_map_guest(kvm, match);
+ if (!kvm->arch.iommu_domain) {
+ r = kvm_iommu_map_guest(kvm);
+ if (r)
+ goto out_list_del;
+ }
+ r = kvm_assign_device(kvm, match);
if (r)
goto out_list_del;
}
@@ -315,6 +531,35 @@ out_free:
}
#endif
+#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
+static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
+ struct kvm_assigned_pci_dev *assigned_dev)
+{
+ int r = 0;
+ struct kvm_assigned_dev_kernel *match;
+
+ mutex_lock(&kvm->lock);
+
+ match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+ assigned_dev->assigned_dev_id);
+ if (!match) {
+ printk(KERN_INFO "%s: device hasn't been assigned before, "
+ "so cannot be deassigned\n", __func__);
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
+ kvm_deassign_device(kvm, match);
+
+ kvm_free_assigned_device(kvm, match);
+
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+#endif
+
static inline int valid_vcpu(int n)
{
return likely(n >= 0 && n < KVM_MAX_VCPUS);
@@ -355,57 +600,48 @@ static void ack_flush(void *_completed)
{
}
-void kvm_flush_remote_tlbs(struct kvm *kvm)
+static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
int i, cpu, me;
- cpumask_t cpus;
+ cpumask_var_t cpus;
+ bool called = true;
struct kvm_vcpu *vcpu;
+ if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
+ cpumask_clear(cpus);
+
me = get_cpu();
- cpus_clear(cpus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (!vcpu)
continue;
- if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+ if (test_and_set_bit(req, &vcpu->requests))
continue;
cpu = vcpu->cpu;
- if (cpu != -1 && cpu != me)
- cpu_set(cpu, cpus);
+ if (cpus != NULL && cpu != -1 && cpu != me)
+ cpumask_set_cpu(cpu, cpus);
}
- if (cpus_empty(cpus))
- goto out;
- ++kvm->stat.remote_tlb_flush;
- smp_call_function_mask(cpus, ack_flush, NULL, 1);
-out:
+ if (unlikely(cpus == NULL))
+ smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+ else if (!cpumask_empty(cpus))
+ smp_call_function_many(cpus, ack_flush, NULL, 1);
+ else
+ called = false;
put_cpu();
+ free_cpumask_var(cpus);
+ return called;
}
-void kvm_reload_remote_mmus(struct kvm *kvm)
+void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- int i, cpu, me;
- cpumask_t cpus;
- struct kvm_vcpu *vcpu;
-
- me = get_cpu();
- cpus_clear(cpus);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
- if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
- continue;
- cpu = vcpu->cpu;
- if (cpu != -1 && cpu != me)
- cpu_set(cpu, cpus);
- }
- if (cpus_empty(cpus))
- goto out;
- smp_call_function_mask(cpus, ack_flush, NULL, 1);
-out:
- put_cpu();
+ if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
+ ++kvm->stat.remote_tlb_flush;
}
+void kvm_reload_remote_mmus(struct kvm *kvm)
+{
+ make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
+}
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
@@ -710,6 +946,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
+ if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
+ goto out;
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
@@ -821,7 +1059,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
- kvm_free_physmem_slot(&old, &new);
+ kvm_free_physmem_slot(&old, npages ? &new : NULL);
+ /* Slot deletion case: we have to update the current slot */
+ if (!npages)
+ *memslot = old;
#ifdef CONFIG_DMAR
/* map the pages in iommu page table */
r = kvm_iommu_map_pages(kvm, base_gfn, npages);
@@ -918,7 +1159,7 @@ int kvm_is_error_hva(unsigned long addr)
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);
-static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
@@ -931,11 +1172,12 @@ static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
}
return NULL;
}
+EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
gfn = unalias_gfn(kvm, gfn);
- return __gfn_to_memslot(kvm, gfn);
+ return gfn_to_memslot_unaliased(kvm, gfn);
}
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -959,7 +1201,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
struct kvm_memory_slot *slot;
gfn = unalias_gfn(kvm, gfn);
- slot = __gfn_to_memslot(kvm, gfn);
+ slot = gfn_to_memslot_unaliased(kvm, gfn);
if (!slot)
return bad_hva();
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
@@ -1210,7 +1452,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
struct kvm_memory_slot *memslot;
gfn = unalias_gfn(kvm, gfn);
- memslot = __gfn_to_memslot(kvm, gfn);
+ memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
@@ -1295,7 +1537,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
return 0;
}
-static const struct file_operations kvm_vcpu_fops = {
+static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.compat_ioctl = kvm_vcpu_ioctl,
@@ -1651,6 +1893,19 @@ static long kvm_vm_ioctl(struct file *filp,
break;
}
#endif
+#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
+ case KVM_DEASSIGN_PCI_DEVICE: {
+ struct kvm_assigned_pci_dev assigned_dev;
+
+ r = -EFAULT;
+ if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
+ goto out;
+ r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
+ if (r)
+ goto out;
+ break;
+ }
+#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
@@ -1689,7 +1944,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static const struct file_operations kvm_vm_fops = {
+static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.compat_ioctl = kvm_vm_ioctl,
@@ -1711,6 +1966,18 @@ static int kvm_dev_ioctl_create_vm(void)
return fd;
}
+static long kvm_dev_ioctl_check_extension_generic(long arg)
+{
+ switch (arg) {
+ case KVM_CAP_USER_MEMORY:
+ case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+ return 1;
+ default:
+ break;
+ }
+ return kvm_dev_ioctl_check_extension(arg);
+}
+
static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1730,7 +1997,7 @@ static long kvm_dev_ioctl(struct file *filp,
r = kvm_dev_ioctl_create_vm();
break;
case KVM_CHECK_EXTENSION:
- r = kvm_dev_ioctl_check_extension(arg);
+ r = kvm_dev_ioctl_check_extension_generic(arg);
break;
case KVM_GET_VCPU_MMAP_SIZE:
r = -EINVAL;
@@ -1771,9 +2038,9 @@ static void hardware_enable(void *junk)
{
int cpu = raw_smp_processor_id();
- if (cpu_isset(cpu, cpus_hardware_enabled))
+ if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
- cpu_set(cpu, cpus_hardware_enabled);
+ cpumask_set_cpu(cpu, cpus_hardware_enabled);
kvm_arch_hardware_enable(NULL);
}
@@ -1781,9 +2048,9 @@ static void hardware_disable(void *junk)
{
int cpu = raw_smp_processor_id();
- if (!cpu_isset(cpu, cpus_hardware_enabled))
+ if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
- cpu_clear(cpu, cpus_hardware_enabled);
+ cpumask_clear_cpu(cpu, cpus_hardware_enabled);
kvm_arch_hardware_disable(NULL);
}
@@ -2017,9 +2284,14 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
bad_pfn = page_to_pfn(bad_page);
+ if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
+ r = -ENOMEM;
+ goto out_free_0;
+ }
+
r = kvm_arch_hardware_setup();
if (r < 0)
- goto out_free_0;
+ goto out_free_0a;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu,
@@ -2053,6 +2325,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
}
kvm_chardev_ops.owner = module;
+ kvm_vm_fops.owner = module;
+ kvm_vcpu_fops.owner = module;
r = misc_register(&kvm_dev);
if (r) {
@@ -2062,6 +2336,9 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
+#ifndef CONFIG_X86
+ msi2intx = 0;
+#endif
return 0;
@@ -2078,6 +2355,8 @@ out_free_2:
on_each_cpu(hardware_disable, NULL, 1);
out_free_1:
kvm_arch_hardware_unsetup();
+out_free_0a:
+ free_cpumask_var(cpus_hardware_enabled);
out_free_0:
__free_page(bad_page);
out:
@@ -2101,6 +2380,7 @@ void kvm_exit(void)
kvm_arch_hardware_unsetup();
kvm_arch_exit();
kvm_exit_debug();
+ free_cpumask_var(cpus_hardware_enabled);
__free_page(bad_page);
}
EXPORT_SYMBOL_GPL(kvm_exit);
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c
index 41dcc845f78..f5987444644 100644
--- a/virt/kvm/kvm_trace.c
+++ b/virt/kvm/kvm_trace.c
@@ -252,6 +252,7 @@ void kvm_trace_cleanup(void)
struct kvm_trace_probe *p = &kvm_trace_probes[i];
marker_probe_unregister(p->name, p->probe_func, p);
}
+ marker_synchronize_unregister();
relay_close(kt->rchan);
debugfs_remove(kt->lost_file);