OSDN Git Service

drm/ttm: Export the ttm_k[un]map_atomic_prot API.
authorThomas Hellstrom <thellstrom@vmware.com>
Tue, 16 Jan 2018 08:12:05 +0000 (09:12 +0100)
committerThomas Hellstrom <thellstrom@vmware.com>
Thu, 22 Mar 2018 10:10:06 +0000 (11:10 +0100)
It will be used by vmwgfx cpu blit.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
drivers/gpu/drm/ttm/ttm_bo_util.c
include/drm/ttm/ttm_bo_api.h

index 11f27db..2ebbae6 100644 (file)
@@ -263,24 +263,45 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
 #endif
 
-static void *ttm_kmap_atomic_prot(struct page *page,
-                                 pgprot_t prot)
+
+/**
+ * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
+ * specified page protection.
+ *
+ * @page: The page to map.
+ * @prot: The page protection.
+ *
+ * This function maps a TTM page using the kmap_atomic api if available,
+ * otherwise falls back to vmap. The user must make sure that the
+ * specified page does not have an aliased mapping with a different caching
+ * policy unless the architecture explicitly allows it. Also mapping and
+ * unmapping using this api must be correctly nested. Unmapping should
+ * occur in the reverse order of mapping.
+ */
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
        if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
                return kmap_atomic(page);
        else
                return __ttm_kmap_atomic_prot(page, prot);
 }
+EXPORT_SYMBOL(ttm_kmap_atomic_prot);
 
-
-static void ttm_kunmap_atomic_prot(void *addr,
-                                  pgprot_t prot)
+/**
+ * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
+ * ttm_kmap_atomic_prot.
+ *
+ * @addr: The virtual address from the map.
+ * @prot: The page protection.
+ */
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
 {
        if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
                kunmap_atomic(addr);
        else
                __ttm_kunmap_atomic(addr);
 }
+EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
 
 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
                                unsigned long page,
index 8e2fb1a..c67977a 100644 (file)
@@ -709,6 +709,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
                struct ttm_bo_device *bdev);
 
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
+
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
+
 /**
  * ttm_bo_io
  *