aboutsummaryrefslogtreecommitdiff
path: root/linux-core/drm_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core/drm_vm.c')
-rw-r--r--linux-core/drm_vm.c345
1 files changed, 181 insertions, 164 deletions
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 63cf6f56..93d1c0b8 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -41,8 +41,9 @@
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
-static void drm_vm_ttm_close(struct vm_area_struct *vma);
-static void drm_vm_ttm_open(struct vm_area_struct *vma);
+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
+ struct file *filp,
+ drm_local_map_t *map);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
@@ -158,93 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
-static
-#endif
-struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
- struct fault_data *data)
-{
- unsigned long address = data->address;
- drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
- unsigned long page_offset;
- struct page *page;
- drm_ttm_t *ttm;
- drm_buffer_manager_t *bm;
- drm_device_t *dev;
- unsigned long pfn;
- int err;
- pgprot_t pgprot;
-
- if (!map) {
- data->type = VM_FAULT_OOM;
- return NULL;
- }
-
- if (address > vma->vm_end) {
- data->type = VM_FAULT_SIGBUS;
- return NULL;
- }
-
- ttm = (drm_ttm_t *) map->offset;
-
- dev = ttm->dev;
-
- /*
- * Perhaps retry here?
- */
-
- mutex_lock(&dev->struct_mutex);
- drm_fixup_ttm_caching(ttm);
-
- bm = &dev->bm;
- page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
- page = ttm->pages[page_offset];
-
- if (!page) {
- if (drm_alloc_memctl(PAGE_SIZE)) {
- data->type = VM_FAULT_OOM;
- goto out;
- }
- page = ttm->pages[page_offset] =
- alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!page) {
- drm_free_memctl(PAGE_SIZE);
- data->type = VM_FAULT_OOM;
- goto out;
- }
- ++bm->cur_pages;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- SetPageLocked(page);
-#else
- SetPageReserved(page);
-#endif
- }
-
- if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) &&
- !(ttm->be->flags & DRM_BE_FLAG_CMA)) {
-
- pfn = ttm->aper_offset + page_offset +
- (ttm->be->aperture_base >> PAGE_SHIFT);
- pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
- } else {
- pfn = page_to_pfn(page);
- pgprot = vma->vm_page_prot;
- }
-
- err = vm_insert_pfn(vma, address, pfn, pgprot);
-
- if (!err || err == -EBUSY)
- data->type = VM_FAULT_MINOR;
- else
- data->type = VM_FAULT_OOM;
- out:
- mutex_unlock(&dev->struct_mutex);
- return NULL;
-}
-#endif
-
/**
* \c nopage method for shared virtual memory.
*
@@ -504,20 +418,6 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
-static struct vm_operations_struct drm_vm_ttm_ops = {
- .nopage = drm_vm_ttm_nopage,
- .open = drm_vm_ttm_open,
- .close = drm_vm_ttm_close,
-};
-#else
-static struct vm_operations_struct drm_vm_ttm_ops = {
- .fault = drm_vm_ttm_fault,
- .open = drm_vm_ttm_open,
- .close = drm_vm_ttm_close,
-};
-#endif
-
/**
* \c open method for shared virtual memory.
*
@@ -555,28 +455,6 @@ static void drm_vm_open(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-static void drm_vm_ttm_open_locked(struct vm_area_struct *vma) {
-
- drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
- drm_ttm_t *ttm;
-
- drm_vm_open_locked(vma);
- ttm = (drm_ttm_t *) map->offset;
- atomic_inc(&ttm->vma_count);
-#ifdef DRM_ODD_MM_COMPAT
- drm_ttm_add_vma(ttm, vma);
-#endif
-}
-
-static void drm_vm_ttm_open(struct vm_area_struct *vma) {
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_ttm_open_locked(vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
/**
* \c close method for all virtual memory types.
*
@@ -611,34 +489,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
}
-static void drm_vm_ttm_close(struct vm_area_struct *vma)
-{
- drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
- drm_ttm_t *ttm;
- drm_device_t *dev;
- int ret;
-
- drm_vm_close(vma);
- if (map) {
- ttm = (drm_ttm_t *) map->offset;
- dev = ttm->dev;
- mutex_lock(&dev->struct_mutex);
-#ifdef DRM_ODD_MM_COMPAT
- drm_ttm_delete_vma(ttm, vma);
-#endif
- if (atomic_dec_and_test(&ttm->vma_count)) {
- if (ttm->destroy) {
- ret = drm_destroy_ttm(ttm);
- BUG_ON(ret);
- drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
- }
- }
- mutex_unlock(&dev->struct_mutex);
- }
- return;
-}
-
-
/**
* mmap DMA memory.
*
@@ -834,17 +684,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *)map;
vma->vm_flags |= VM_RESERVED;
break;
- case _DRM_TTM: {
- vma->vm_ops = &drm_vm_ttm_ops;
- vma->vm_private_data = (void *) map;
- vma->vm_file = filp;
- vma->vm_flags |= VM_RESERVED | VM_IO;
-#ifdef DRM_ODD_MM_COMPAT
- drm_ttm_map_bound(vma);
-#endif
- drm_vm_ttm_open_locked(vma);
- return 0;
- }
+ case _DRM_TTM:
+ return drm_bo_mmap_locked(vma, filp, map);
default:
return -EINVAL; /* This should never happen. */
}
@@ -868,3 +709,179 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
EXPORT_SYMBOL(drm_mmap);
+
+/**
+ * buffer object vm functions.
+ */
+
+/**
+ * \c Pagefault method for buffer objects.
+ *
+ * \param vma Virtual memory area.
+ * \param data Fault data on failure or refault.
+ * \return Always NULL as we insert pfns directly.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
+static
+#endif
+struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+ struct fault_data *data)
+{
+ unsigned long address = data->address;
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ drm_local_map_t *map;
+ unsigned long page_offset;
+ struct page *page = NULL;
+ drm_ttm_t *ttm;
+ drm_buffer_manager_t *bm;
+ drm_device_t *dev;
+ unsigned long pfn;
+ int err;
+ pgprot_t pgprot;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+
+
+ mutex_lock(&bo->mutex);
+ map = bo->map_list.map;
+
+ if (!map) {
+ data->type = VM_FAULT_OOM;
+ goto out_unlock;
+ }
+
+ if (address > vma->vm_end) {
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ dev = bo->dev;
+ err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
+
+ if (err) {
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+ if (bus_size) {
+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+ pgprot = drm_io_prot(_DRM_AGP, vma);
+ } else {
+ bm = &dev->bm;
+ ttm = bo->ttm;
+
+ page = ttm->pages[page_offset];
+ if (!page) {
+ page = drm_ttm_alloc_page();
+ if (!page) {
+ data->type = VM_FAULT_OOM;
+ goto out_unlock;
+ }
+ ttm->pages[page_offset] = page;
+ ++bm->cur_pages;
+ }
+ pfn = page_to_pfn(page);
+ pgprot = vma->vm_page_prot;
+ }
+
+ err = vm_insert_pfn(vma, address, pfn, pgprot);
+
+ if (!err || err == -EBUSY)
+ data->type = VM_FAULT_MINOR;
+ else
+ data->type = VM_FAULT_OOM;
+out_unlock:
+ mutex_unlock(&bo->mutex);
+ return NULL;
+}
+#endif
+
+static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
+{
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+
+ drm_vm_open_locked(vma);
+ atomic_inc(&bo->usage);
+#ifdef DRM_MM_ODD_COMPAT
+ drm_bo_vm_add_vma(bo, vma);
+#endif
+}
+
+/**
+ * \c vma open method for buffer objects.
+ *
+ * \param vma virtual memory area.
+ */
+
+static void drm_bo_vm_open(struct vm_area_struct *vma)
+{
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ drm_device_t *dev = bo->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_vm_open_locked(vma);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c vma close method for buffer objects.
+ *
+ * \param vma virtual memory area.
+ */
+
+static void drm_bo_vm_close(struct vm_area_struct *vma)
+{
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ drm_device_t *dev = bo->dev;
+
+ drm_vm_close(vma);
+ if (bo) {
+ mutex_lock(&dev->struct_mutex);
+#ifdef DRM_MM_ODD_COMPAT
+ drm_bo_vm_delete_vma(bo, vma);
+#endif
+ drm_bo_usage_deref_locked(bo);
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return;
+}
+
+static struct vm_operations_struct drm_bo_vm_ops = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
+ .nopage = drm_bo_vm_nopage,
+#else
+ .fault = drm_bo_vm_fault,
+#endif
+ .open = drm_bo_vm_open,
+ .close = drm_bo_vm_close,
+};
+
+/**
+ * mmap buffer object memory.
+ *
+ * \param vma virtual memory area.
+ * \param filp file pointer.
+ * \param map The buffer object drm map.
+ * \return zero on success or a negative number on failure.
+ */
+
+int drm_bo_mmap_locked(struct vm_area_struct *vma,
+ struct file *filp,
+ drm_local_map_t *map)
+{
+ vma->vm_ops = &drm_bo_vm_ops;
+ vma->vm_private_data = map->handle;
+ vma->vm_file = filp;
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ drm_bo_vm_open_locked(vma);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_ttm_map_bound(vma);
+#endif
+ return 0;
+}