OSDN Git Service

Fix build for 2.6.21-rc1.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Mon, 26 Feb 2007 17:17:54 +0000 (18:17 +0100)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Mon, 26 Feb 2007 17:17:54 +0000 (18:17 +0100)
The vm subsystem of 2.6.21 is fully compatible with the buffer object
vm code.

linux-core/drm_compat.c
linux-core/drm_compat.h
linux-core/drm_vm.c

index 4825f0c..2344181 100644 (file)
@@ -94,6 +94,11 @@ static struct {
 } drm_np_retry = 
 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
 
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, 
+                                   struct fault_data *data);
+
+
 struct page * get_nopage_retry(void)
 {
        if (atomic_read(&drm_np_retry.present) == 0) {
@@ -180,7 +185,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma,
        return ret;
 }
 
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                  unsigned long pfn)
 {
        int ret;
@@ -190,14 +195,106 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
        return ret;
 }
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, 
+                                   struct fault_data *data)
+{
+       unsigned long address = data->address;
+       drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+       unsigned long page_offset;
+       struct page *page = NULL;
+       drm_ttm_t *ttm; 
+       drm_device_t *dev;
+       unsigned long pfn;
+       int err;
+       unsigned long bus_base;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       
+
+       mutex_lock(&bo->mutex);
+
+       err = drm_bo_wait(bo, 0, 1, 0);
+       if (err) {
+               data->type = (err == -EAGAIN) ? 
+                       VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+       
+       
+       /*
+        * If buffer happens to be in a non-mappable location,
+        * move it to a mappable.
+        */
+
+       if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+               unsigned long _end = jiffies + 3*DRM_HZ;
+               uint32_t new_mask = bo->mem.mask |
+                       DRM_BO_FLAG_MAPPABLE |
+                       DRM_BO_FLAG_FORCE_MAPPABLE;
+
+               do {
+                       err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+               } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
+
+               if (err) {
+                       DRM_ERROR("Timeout moving buffer to mappable location.\n");
+                       data->type = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+       }
+
+       if (address > vma->vm_end) {
+               data->type = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       dev = bo->dev;
+       err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, 
+                               &bus_size);
+
+       if (err) {
+               data->type = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
+       page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+       if (bus_size) {
+               drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
+
+               pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+               vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+       } else {
+               ttm = bo->ttm;
+
+               drm_ttm_fixup_caching(ttm);
+               page = drm_ttm_get_page(ttm, page_offset);
+               if (!page) {
+                       data->type = VM_FAULT_OOM;
+                       goto out_unlock;
+               }
+               pfn = page_to_pfn(page);
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       }
+       
+       err = vm_insert_pfn(vma, address, pfn);
+
+       if (!err || err == -EBUSY) 
+               data->type = VM_FAULT_MINOR; 
+       else
+               data->type = VM_FAULT_OOM;
+out_unlock:
+       mutex_unlock(&bo->mutex);
+       return NULL;
+}
+
 #endif
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
+  !defined(DRM_FULL_MM_COMPAT)
 
 /**
- * While waiting for the fault() handler to appear in
- * we accomplish approximately
- * the same wrapping it with nopfn.
  */
 
 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
index 7741714..bf5899f 100644 (file)
@@ -212,19 +212,10 @@ extern void free_nopage_retry(void);
 #define NOPAGE_REFAULT get_nopage_retry()
 #endif
 
-#if !defined(DRM_FULL_MM_COMPAT) && \
-  ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
-   (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
 
-struct fault_data;
-extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
-                                   struct fault_data *data);
-
-#endif
 #ifndef DRM_FULL_MM_COMPAT
 
 /*
- * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
  * For now, just return a dummy page that we've allocated out of 
  * static space. The page will be put by do_nopage() since we've already
  * filled out the pte.
@@ -239,15 +230,12 @@ struct fault_data {
        int type;
 };
 
-
-extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
-                        unsigned long pfn);
-
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
 extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
                                     unsigned long address, 
                                     int *type);
-#else
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
+  !defined(DRM_FULL_MM_COMPAT)
 extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
                                     unsigned long address);
 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
index f3b1088..a4a55e3 100644 (file)
@@ -718,28 +718,23 @@ EXPORT_SYMBOL(drm_mmap);
  * \c Pagefault method for buffer objects.
  *
  * \param vma Virtual memory area.
- * \param data Fault data on failure or refault.
- * \return Always NULL as we insert pfns directly.
+ * \param address File offset.
+ * \return Error or refault. The pfn is manually inserted.
  *
  * It's important that pfns are inserted while holding the bo->mutex lock.
  * otherwise we might race with unmap_mapping_range() which is always
  * called with the bo->mutex lock held.
  *
- * It's not pretty to modify the vma->vm_page_prot variable while not
- * holding the mm semaphore in write mode. However, we have it i read mode,
- * so we won't be racing with any other writers, and we only actually modify
- * it when no ptes are present so it shouldn't be a big deal.
+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
+ * without holding the mmap_sem in write mode. Only in read mode.
+ * These bits are not used by the mm subsystem code, and we consider them
+ * protected by the bo->mutex lock.
  */
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) ||   \
-     LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
 #ifdef DRM_FULL_MM_COMPAT
-static
-#endif
-struct page *drm_bo_vm_fault(struct vm_area_struct *vma, 
-                            struct fault_data *data)
+static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, 
+                                    unsigned long address)
 {
-       unsigned long address = data->address;
        drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
        unsigned long page_offset;
        struct page *page = NULL;
@@ -750,66 +745,43 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
        unsigned long bus_base;
        unsigned long bus_offset;
        unsigned long bus_size;
+       int ret = NOPFN_REFAULT;
        
-
-       mutex_lock(&bo->mutex);
+       if (address > vma->vm_end) 
+               return NOPFN_SIGBUS;
+               
+       err = mutex_lock_interruptible(&bo->mutex);
+       if (err)
+               return NOPFN_REFAULT;
 
        err = drm_bo_wait(bo, 0, 0, 0);
        if (err) {
-               data->type = (err == -EAGAIN) ? 
-                       VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+               ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
                goto out_unlock;
        }
-       
-       
+
        /*
         * If buffer happens to be in a non-mappable location,
         * move it to a mappable.
         */
 
-#ifdef DRM_BO_FULL_COMPAT
        if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
                uint32_t new_mask = bo->mem.mask | 
                        DRM_BO_FLAG_MAPPABLE | 
                        DRM_BO_FLAG_FORCE_MAPPABLE;
                err = drm_bo_move_buffer(bo, new_mask, 0, 0);
-               
                if (err) {
-                       data->type = (err == -EAGAIN) ? 
-                               VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+                       ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
                        goto out_unlock;
                }
        }
-#else
-       if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
-               unsigned long _end = jiffies + 3*DRM_HZ;
-               uint32_t new_mask = bo->mem.mask |
-                       DRM_BO_FLAG_MAPPABLE |
-                       DRM_BO_FLAG_FORCE_MAPPABLE;
-
-               do {
-                       err = drm_bo_move_buffer(bo, new_mask, 0, 0);
-               } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
-
-               if (err) {
-                       DRM_ERROR("Timeout moving buffer to mappable location.\n");
-                       data->type = VM_FAULT_SIGBUS;
-                       goto out_unlock;
-               }
-       }
-#endif
-
-       if (address > vma->vm_end) {
-               data->type = VM_FAULT_SIGBUS;
-               goto out_unlock;
-       }
 
        dev = bo->dev;
        err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, 
                                &bus_size);
 
        if (err) {
-               data->type = VM_FAULT_SIGBUS;
+               ret = NOPFN_SIGBUS;
                goto out_unlock;
        }
 
@@ -826,7 +798,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
                drm_ttm_fixup_caching(ttm);
                page = drm_ttm_get_page(ttm, page_offset);
                if (!page) {
-                       data->type = VM_FAULT_OOM;
+                       ret = NOPFN_OOM;
                        goto out_unlock;
                }
                pfn = page_to_pfn(page);
@@ -834,14 +806,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
        }
        
        err = vm_insert_pfn(vma, address, pfn);
-
-       if (!err || err == -EBUSY) 
-               data->type = VM_FAULT_MINOR; 
-       else
-               data->type = VM_FAULT_OOM;
+       if (err) {
+               ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
+               goto out_unlock;
+       }
 out_unlock:
        mutex_unlock(&bo->mutex);
-       return NULL;
+       return ret;
 }
 #endif
 
@@ -897,7 +868,7 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
 
 static struct vm_operations_struct drm_bo_vm_ops = {
 #ifdef DRM_FULL_MM_COMPAT
-       .fault = drm_bo_vm_fault,
+       .nopfn = drm_bo_vm_nopfn,
 #else
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
        .nopfn = drm_bo_vm_nopfn,