aboutsummaryrefslogtreecommitdiff
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c382
1 files changed, 337 insertions, 45 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7dd9b0b85e4..cf0ab8ed384 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,6 +51,12 @@
#include "coalesced_mmio.h"
#endif
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include "irq.h"
+#endif
+
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -71,11 +77,253 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
bool kvm_rebooting;
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
+ int assigned_dev_id)
+{
+ struct list_head *ptr;
+ struct kvm_assigned_dev_kernel *match;
+
+ list_for_each(ptr, head) {
+ match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
+ if (match->assigned_dev_id == assigned_dev_id)
+ return match;
+ }
+ return NULL;
+}
+
+static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
+{
+ struct kvm_assigned_dev_kernel *assigned_dev;
+
+ assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
+ interrupt_work);
+
+ /* This is taken to safely inject irq inside the guest. When
+ * the interrupt injection (or the ioapic code) uses a
+ * finer-grained lock, update this
+ */
+ mutex_lock(&assigned_dev->kvm->lock);
+ kvm_set_irq(assigned_dev->kvm,
+ assigned_dev->guest_irq, 1);
+ mutex_unlock(&assigned_dev->kvm->lock);
+ kvm_put_kvm(assigned_dev->kvm);
+}
+
+/* FIXME: Implement the OR logic needed to make shared interrupts on
+ * this line behave properly
+ */
+static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
+{
+ struct kvm_assigned_dev_kernel *assigned_dev =
+ (struct kvm_assigned_dev_kernel *) dev_id;
+
+ kvm_get_kvm(assigned_dev->kvm);
+ schedule_work(&assigned_dev->interrupt_work);
+ disable_irq_nosync(irq);
+ return IRQ_HANDLED;
+}
+
+/* Ack the irq line for an assigned device */
+static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
+{
+ struct kvm_assigned_dev_kernel *dev;
+
+ if (kian->gsi == -1)
+ return;
+
+ dev = container_of(kian, struct kvm_assigned_dev_kernel,
+ ack_notifier);
+ kvm_set_irq(dev->kvm, dev->guest_irq, 0);
+ enable_irq(dev->host_irq);
+}
+
+static void kvm_free_assigned_device(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel
+ *assigned_dev)
+{
+ if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested)
+ free_irq(assigned_dev->host_irq, (void *)assigned_dev);
+
+ kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
+
+ if (cancel_work_sync(&assigned_dev->interrupt_work))
+ /* We had pending work. That means we will have to take
+ * care of kvm_put_kvm.
+ */
+ kvm_put_kvm(kvm);
+
+ pci_release_regions(assigned_dev->dev);
+ pci_disable_device(assigned_dev->dev);
+ pci_dev_put(assigned_dev->dev);
+
+ list_del(&assigned_dev->list);
+ kfree(assigned_dev);
+}
+
+void kvm_free_all_assigned_devices(struct kvm *kvm)
+{
+ struct list_head *ptr, *ptr2;
+ struct kvm_assigned_dev_kernel *assigned_dev;
+
+ list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
+ assigned_dev = list_entry(ptr,
+ struct kvm_assigned_dev_kernel,
+ list);
+
+ kvm_free_assigned_device(kvm, assigned_dev);
+ }
+}
+
+static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
+ struct kvm_assigned_irq
+ *assigned_irq)
+{
+ int r = 0;
+ struct kvm_assigned_dev_kernel *match;
+
+ mutex_lock(&kvm->lock);
+
+ match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+ assigned_irq->assigned_dev_id);
+ if (!match) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
+
+ if (match->irq_requested) {
+ match->guest_irq = assigned_irq->guest_irq;
+ match->ack_notifier.gsi = assigned_irq->guest_irq;
+ mutex_unlock(&kvm->lock);
+ return 0;
+ }
+
+ INIT_WORK(&match->interrupt_work,
+ kvm_assigned_dev_interrupt_work_handler);
+
+ if (irqchip_in_kernel(kvm)) {
+ if (!capable(CAP_SYS_RAWIO)) {
+ r = -EPERM;
+ goto out_release;
+ }
+
+ if (assigned_irq->host_irq)
+ match->host_irq = assigned_irq->host_irq;
+ else
+ match->host_irq = match->dev->irq;
+ match->guest_irq = assigned_irq->guest_irq;
+ match->ack_notifier.gsi = assigned_irq->guest_irq;
+ match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
+ kvm_register_irq_ack_notifier(kvm, &match->ack_notifier);
+
+ /* Even though this is PCI, we don't want to use shared
+ * interrupts. Sharing host devices with guest-assigned devices
+ * on the same interrupt line is not a happy situation: there
+ * are going to be long delays in accepting, acking, etc.
+ */
+ if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0,
+ "kvm_assigned_device", (void *)match)) {
+ r = -EIO;
+ goto out_release;
+ }
+ }
+
+ match->irq_requested = true;
+ mutex_unlock(&kvm->lock);
+ return r;
+out_release:
+ mutex_unlock(&kvm->lock);
+ kvm_free_assigned_device(kvm, match);
+ return r;
+}
+
+static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
+ struct kvm_assigned_pci_dev *assigned_dev)
+{
+ int r = 0;
+ struct kvm_assigned_dev_kernel *match;
+ struct pci_dev *dev;
+
+ mutex_lock(&kvm->lock);
+
+ match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+ assigned_dev->assigned_dev_id);
+ if (match) {
+ /* device already assigned */
+ r = -EINVAL;
+ goto out;
+ }
+
+ match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
+ if (match == NULL) {
+ printk(KERN_INFO "%s: Couldn't allocate memory\n",
+ __func__);
+ r = -ENOMEM;
+ goto out;
+ }
+ dev = pci_get_bus_and_slot(assigned_dev->busnr,
+ assigned_dev->devfn);
+ if (!dev) {
+ printk(KERN_INFO "%s: host device not found\n", __func__);
+ r = -EINVAL;
+ goto out_free;
+ }
+ if (pci_enable_device(dev)) {
+ printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
+ r = -EBUSY;
+ goto out_put;
+ }
+ r = pci_request_regions(dev, "kvm_assigned_device");
+ if (r) {
+ printk(KERN_INFO "%s: Could not get access to device regions\n",
+ __func__);
+ goto out_disable;
+ }
+ match->assigned_dev_id = assigned_dev->assigned_dev_id;
+ match->host_busnr = assigned_dev->busnr;
+ match->host_devfn = assigned_dev->devfn;
+ match->dev = dev;
+
+ match->kvm = kvm;
+
+ list_add(&match->list, &kvm->arch.assigned_dev_head);
+
+ if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
+ r = kvm_iommu_map_guest(kvm, match);
+ if (r)
+ goto out_list_del;
+ }
+
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+out_list_del:
+ list_del(&match->list);
+ pci_release_regions(dev);
+out_disable:
+ pci_disable_device(dev);
+out_put:
+ pci_dev_put(dev);
+out_free:
+ kfree(match);
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+#endif
+
static inline int valid_vcpu(int n)
{
return likely(n >= 0 && n < KVM_MAX_VCPUS);
}
+inline int kvm_is_mmio_pfn(pfn_t pfn)
+{
+ if (pfn_valid(pfn))
+ return PageReserved(pfn_to_page(pfn));
+
+ return true;
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
@@ -570,6 +818,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
}
kvm_free_physmem_slot(&old, &new);
+#ifdef CONFIG_DMAR
+ /* map the pages in iommu page table */
+ r = kvm_iommu_map_pages(kvm, base_gfn, npages);
+ if (r)
+ goto out;
+#endif
return 0;
out_free:
@@ -708,9 +962,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
-/*
- * Requires current->mm->mmap_sem to be held
- */
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
struct page *page[1];
@@ -726,21 +977,24 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
return page_to_pfn(bad_page);
}
- npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
- NULL);
+ npages = get_user_pages_fast(addr, 1, 1, page);
if (unlikely(npages != 1)) {
struct vm_area_struct *vma;
+ down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr);
+
if (vma == NULL || addr < vma->vm_start ||
!(vma->vm_flags & VM_PFNMAP)) {
+ up_read(&current->mm->mmap_sem);
get_page(bad_page);
return page_to_pfn(bad_page);
}
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- BUG_ON(pfn_valid(pfn));
+ up_read(&current->mm->mmap_sem);
+ BUG_ON(!kvm_is_mmio_pfn(pfn));
} else
pfn = page_to_pfn(page[0]);
@@ -754,10 +1008,10 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
pfn_t pfn;
pfn = gfn_to_pfn(kvm, gfn);
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
return pfn_to_page(pfn);
- WARN_ON(!pfn_valid(pfn));
+ WARN_ON(kvm_is_mmio_pfn(pfn));
get_page(bad_page);
return bad_page;
@@ -773,7 +1027,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(pfn_t pfn)
{
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -799,7 +1053,7 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
void kvm_set_pfn_dirty(pfn_t pfn)
{
- if (pfn_valid(pfn)) {
+ if (!kvm_is_mmio_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
SetPageDirty(page);
@@ -809,14 +1063,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(pfn_t pfn)
{
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
void kvm_get_pfn(pfn_t pfn)
{
- if (pfn_valid(pfn))
+ if (!kvm_is_mmio_pfn(pfn))
get_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
@@ -972,12 +1226,12 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_cpu_has_interrupt(vcpu))
- break;
- if (kvm_cpu_has_pending_timer(vcpu))
- break;
- if (kvm_arch_vcpu_runnable(vcpu))
+ if (kvm_cpu_has_interrupt(vcpu) ||
+ kvm_cpu_has_pending_timer(vcpu) ||
+ kvm_arch_vcpu_runnable(vcpu)) {
+ set_bit(KVM_REQ_UNHALT, &vcpu->requests);
break;
+ }
if (signal_pending(current))
break;
@@ -1074,12 +1328,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
r = kvm_arch_vcpu_setup(vcpu);
if (r)
- goto vcpu_destroy;
+ return r;
mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) {
r = -EEXIST;
- mutex_unlock(&kvm->lock);
goto vcpu_destroy;
}
kvm->vcpus[n] = vcpu;
@@ -1095,8 +1348,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
unlink:
mutex_lock(&kvm->lock);
kvm->vcpus[n] = NULL;
- mutex_unlock(&kvm->lock);
vcpu_destroy:
+ mutex_unlock(&kvm->lock);
kvm_arch_vcpu_destroy(vcpu);
return r;
}
@@ -1118,6 +1371,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
+ struct kvm_fpu *fpu = NULL;
+ struct kvm_sregs *kvm_sregs = NULL;
if (vcpu->kvm->mm != current->mm)
return -EIO;
@@ -1165,25 +1420,28 @@ out_free2:
break;
}
case KVM_GET_SREGS: {
- struct kvm_sregs kvm_sregs;
-
- memset(&kvm_sregs, 0, sizeof kvm_sregs);
- r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
+ kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!kvm_sregs)
+ goto out;
+ r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = -EFAULT;
- if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
+ if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
goto out;
r = 0;
break;
}
case KVM_SET_SREGS: {
- struct kvm_sregs kvm_sregs;
-
+ kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!kvm_sregs)
+ goto out;
r = -EFAULT;
- if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
+ if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
goto out;
- r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
+ r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = 0;
@@ -1264,25 +1522,28 @@ out_free2:
break;
}
case KVM_GET_FPU: {
- struct kvm_fpu fpu;
-
- memset(&fpu, 0, sizeof fpu);
- r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
+ fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!fpu)
+ goto out;
+ r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
if (r)
goto out;
r = -EFAULT;
- if (copy_to_user(argp, &fpu, sizeof fpu))
+ if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
goto out;
r = 0;
break;
}
case KVM_SET_FPU: {
- struct kvm_fpu fpu;
-
+ fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!fpu)
+ goto out;
r = -EFAULT;
- if (copy_from_user(&fpu, argp, sizeof fpu))
+ if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
goto out;
- r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
+ r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
if (r)
goto out;
r = 0;
@@ -1292,6 +1553,8 @@ out_free2:
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
out:
+ kfree(fpu);
+ kfree(kvm_sregs);
return r;
}
@@ -1360,6 +1623,30 @@ static long kvm_vm_ioctl(struct file *filp,
break;
}
#endif
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+ case KVM_ASSIGN_PCI_DEVICE: {
+ struct kvm_assigned_pci_dev assigned_dev;
+
+ r = -EFAULT;
+ if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
+ goto out;
+ r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
+ if (r)
+ goto out;
+ break;
+ }
+ case KVM_ASSIGN_IRQ: {
+ struct kvm_assigned_irq assigned_irq;
+
+ r = -EFAULT;
+ if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
+ goto out;
+ r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
+ if (r)
+ goto out;
+ break;
+ }
+#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
@@ -1369,17 +1656,22 @@ out:
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
+ struct page *page[1];
+ unsigned long addr;
+ int npages;
+ gfn_t gfn = vmf->pgoff;
struct kvm *kvm = vma->vm_file->private_data;
- struct page *page;
- if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr))
return VM_FAULT_SIGBUS;
- page = gfn_to_page(kvm, vmf->pgoff);
- if (is_error_page(page)) {
- kvm_release_page_clean(page);
+
+ npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
+ NULL);
+ if (unlikely(npages != 1))
return VM_FAULT_SIGBUS;
- }
- vmf->page = page;
+
+ vmf->page = page[0];
return 0;
}