OSDN Git Service

drm/msm: Allocate secure buffer objects
authorJordan Crouse <jcrouse@codeaurora.org>
Thu, 4 May 2017 19:48:22 +0000 (13:48 -0600)
committerJordan Crouse <jcrouse@codeaurora.org>
Thu, 4 May 2017 20:07:07 +0000 (14:07 -0600)
Allow the user to allocate and use secured buffer objects. Secured
buffer objects are suitable for use as a write target while the GPU
is in secure mode. They work exactly like regular buffers except
Secure buffers cannot be mmap()ed.

Change-Id: Ic0dedbadd8135fd8472b38ddf61e2bc70983b12f
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h

index d8d6f31..4674c54 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
+#include <soc/qcom/secure_buffer.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
 #include "msm_mmu.h"
 
+static int protect_pages(struct msm_gem_object *msm_obj)
+{
+       int perm = PERM_READ | PERM_WRITE;
+       int src = VMID_HLOS;
+       int dst = VMID_CP_PIXEL;
+
+       return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
+static int unprotect_pages(struct msm_gem_object *msm_obj)
+{
+       int perm = PERM_READ | PERM_WRITE | PERM_EXEC;
+       int src = VMID_CP_PIXEL;
+       int dst = VMID_HLOS;
+
+       return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
 static void *get_dmabuf_ptr(struct drm_gem_object *obj)
 {
        return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
@@ -109,6 +128,20 @@ static struct page **get_pages(struct drm_gem_object *obj)
                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
                        dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
                                msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+
+               /* Secure the pages if we need to */
+               if (use_pages(obj) && msm_obj->flags & MSM_BO_SECURE) {
+                       int ret = protect_pages(msm_obj);
+
+                       if (ret)
+                               return ERR_PTR(ret);
+
+                       /*
+                        * Set a flag to indicate the pages are locked by us and
+                        * need to be unlocked when the pages get freed
+                        */
+                       msm_obj->flags |= MSM_BO_LOCKED;
+               }
        }
 
        return msm_obj->pages;
@@ -119,12 +152,17 @@ static void put_pages(struct drm_gem_object *obj)
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
        if (msm_obj->pages) {
+               if (msm_obj->flags & MSM_BO_LOCKED) {
+                       unprotect_pages(msm_obj);
+                       msm_obj->flags &= ~MSM_BO_LOCKED;
+               }
+
                sg_free_table(msm_obj->sgt);
                kfree(msm_obj->sgt);
 
-               if (use_pages(obj))
+               if (use_pages(obj)) {
                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
-               else {
+               else {
                        drm_mm_remove_node(msm_obj->vram_node);
                        drm_free_large(msm_obj->pages);
                }
@@ -153,6 +191,12 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+       /* We can't mmap secure objects */
+       if (msm_obj->flags & MSM_BO_SECURE) {
+               drm_gem_vm_close(vma);
+               return -EACCES;
+       }
+
        vma->vm_flags &= ~VM_PFNMAP;
        vma->vm_flags |= VM_MIXEDMAP;
 
index 9f89f42..d520422 100644 (file)
@@ -24,6 +24,7 @@
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
+#define MSM_BO_LOCKED        0x20000000    /* Pages have been securely locked */
 
 struct msm_gem_address_space {
        const char *name;