OSDN Git Service

Merge "msm/drm: Move msm_drm_config configuration into the GPUs"
authorLinux Build Service Account <lnxbuild@quicinc.com>
Tue, 2 May 2017 16:07:18 +0000 (09:07 -0700)
committerGerrit - the friendly Code Review server <code-review@localhost>
Tue, 2 May 2017 16:07:17 +0000 (09:07 -0700)
1  2 
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/sde/sde_kms.c

@@@ -133,30 -133,10 +133,30 @@@ static int a5xx_submit(struct msm_gpu *
        OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
        OUT_RING(ring, 0x02);
  
 +      /* Record the always on counter before command execution */
 +      if (submit->profile_buf_iova) {
 +              uint64_t gpuaddr = submit->profile_buf_iova +
 +                      offsetof(struct drm_msm_gem_submit_profile_buffer,
 +                                      ticks_submitted);
 +
 +              /*
 +               * Set bit[30] to make this command a 64 bit write operation.
 +               * bits[18-29] is to specify number of consecutive registers
 +               * to copy, so set this space with 2, since we want to copy
 +               * data from REG_A5XX_RBBM_ALWAYSON_COUNTER_LO and [HI].
 +               */
 +              OUT_PKT7(ring, CP_REG_TO_MEM, 3);
 +              OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO |
 +                              (1 << 30) | (2 << 18));
 +              OUT_RING(ring, lower_32_bits(gpuaddr));
 +              OUT_RING(ring, upper_32_bits(gpuaddr));
 +      }
 +
        /* Submit the commands */
        for (i = 0; i < submit->nr_cmds; i++) {
                switch (submit->cmd[i].type) {
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
 +              case MSM_SUBMIT_CMD_PROFILE_BUF:
                        break;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
        OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
        OUT_RING(ring, 0x01);
  
 +      /* Record the always on counter after command execution */
 +      if (submit->profile_buf_iova) {
 +              uint64_t gpuaddr = submit->profile_buf_iova +
 +                      offsetof(struct drm_msm_gem_submit_profile_buffer,
 +                                      ticks_retired);
 +
 +              OUT_PKT7(ring, CP_REG_TO_MEM, 3);
 +              OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO |
 +                              (1 << 30) | (2 << 18));
 +              OUT_RING(ring, lower_32_bits(gpuaddr));
 +              OUT_RING(ring, upper_32_bits(gpuaddr));
 +      }
 +
        /* Write the fence to the scratch register */
        OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
        OUT_RING(ring, submit->fence);
        /* Set bit 0 to trigger an interrupt on preempt complete */
        OUT_RING(ring, 0x01);
  
 +      if (submit->profile_buf_iova) {
 +              unsigned long flags;
 +              uint64_t ktime;
 +              struct drm_msm_gem_submit_profile_buffer *profile_buf =
 +                      submit->profile_buf_vaddr;
 +
 +              /*
 +               * With this profiling, we are trying to create closest
 +               * possible mapping between the CPU time domain(monotonic clock)
 +               * and the GPU time domain(ticks). In order to make this
 +               * happen, we need to briefly turn off interrupts to make sure
 +               * interrupts do not run between collecting these two samples.
 +               */
 +              local_irq_save(flags);
 +
 +              profile_buf->ticks_queued = gpu_read64(gpu,
 +                      REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
 +                      REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
 +
 +              ktime = ktime_get_raw_ns();
 +
 +              local_irq_restore(flags);
 +
 +              do_div(ktime, NSEC_PER_SEC);
 +
 +              profile_buf->queue_time = ktime;
 +              profile_buf->submit_time = ktime;
 +      }
 +
        a5xx_flush(gpu, ring);
  
        /* Check to see if we need to start preemption */
@@@ -1368,6 -1306,7 +1368,7 @@@ struct msm_gpu *a5xx_gpu_init(struct dr
        struct a5xx_gpu *a5xx_gpu = NULL;
        struct adreno_gpu *adreno_gpu;
        struct msm_gpu *gpu;
+       struct msm_gpu_config a5xx_config = { 0 };
        int ret;
  
        if (!pdev) {
        /* Check the efuses for some configuration */
        a5xx_efuses_read(pdev, adreno_gpu);
  
-       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+       a5xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+       a5xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+       /* Set the number of rings to 4 - yay preemption */
+       a5xx_config.nr_rings = 4;
+       /*
+        * Set the user domain range to fall into the TTBR1 region for global
+        * objects
+        */
+       a5xx_config.va_start = 0x800000000;
+       a5xx_config.va_end = 0x8ffffffff;
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
        if (ret) {
                a5xx_destroy(&(a5xx_gpu->base.base));
                return ERR_PTR(ret);
@@@ -183,7 -183,6 +183,7 @@@ int adreno_submit(struct msm_gpu *gpu, 
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        /* ignore IB-targets */
                        break;
 +              case MSM_SUBMIT_CMD_PROFILE_BUF:
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                                break;
                case MSM_SUBMIT_CMD_BUF:
@@@ -405,10 -404,6 +405,6 @@@ void adreno_wait_ring(struct msm_ringbu
                        ring->gpu->name, ring->id);
  }
  
- static const char *iommu_ports[] = {
-               "gfx3d_user",
- };
  /* Read the set of powerlevels */
  static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
  {
@@@ -524,10 -519,10 +520,10 @@@ static int adreno_of_parse(struct platf
  
  int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct adreno_gpu *adreno_gpu,
-               const struct adreno_gpu_funcs *funcs, int nr_rings)
+               const struct adreno_gpu_funcs *funcs,
+               struct msm_gpu_config *gpu_config)
  {
        struct adreno_platform_config *config = pdev->dev.platform_data;
-       struct msm_gpu_config adreno_gpu_config  = { 0 };
        struct msm_gpu *gpu = &adreno_gpu->base;
        struct msm_mmu *mmu;
        int ret;
        /* Get the rest of the target configuration from the device tree */
        adreno_of_parse(pdev, gpu);
  
-       adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
-       adreno_gpu_config.irqname = "kgsl_3d0_irq";
-       adreno_gpu_config.nr_rings = nr_rings;
-       adreno_gpu_config.va_start = SZ_16M;
-       adreno_gpu_config.va_end = 0xffffffff;
-       if (adreno_gpu->revn >= 500) {
-               /* 5XX targets use a 64 bit region */
-               adreno_gpu_config.va_start = 0x800000000;
-               adreno_gpu_config.va_end = 0x8ffffffff;
-       } else {
-               adreno_gpu_config.va_start = 0x300000;
-               adreno_gpu_config.va_end = 0xffffffff;
-       }
-       adreno_gpu_config.nr_rings = nr_rings;
        ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
-                       adreno_gpu->info->name, &adreno_gpu_config);
+                       adreno_gpu->info->name, gpu_config);
        if (ret)
                return ret;
  
  
        mmu = gpu->aspace->mmu;
        if (mmu) {
-               ret = mmu->funcs->attach(mmu, iommu_ports,
-                               ARRAY_SIZE(iommu_ports));
+               ret = mmu->funcs->attach(mmu, NULL, 0);
                if (ret)
                        return ret;
        }
@@@ -722,7 -698,7 +699,7 @@@ static struct adreno_counter_group *get
                return ERR_PTR(-ENODEV);
  
        if (groupid >= adreno_gpu->nr_counter_groups)
-               return ERR_PTR(-EINVAL);
+               return ERR_PTR(-ENODEV);
  
        return (struct adreno_counter_group *)
                adreno_gpu->counter_groups[groupid];
@@@ -745,7 -721,7 +722,7 @@@ u64 adreno_read_counter(struct msm_gpu 
        struct adreno_counter_group *group =
                get_counter_group(gpu, groupid);
  
-       if (!IS_ERR(group) && group->funcs.read)
+       if (!IS_ERR_OR_NULL(group) && group->funcs.read)
                return group->funcs.read(gpu, group, counterid);
  
        return 0;
@@@ -756,6 -732,6 +733,6 @@@ void adreno_put_counter(struct msm_gpu 
        struct adreno_counter_group *group =
                get_counter_group(gpu, groupid);
  
-       if (!IS_ERR(group) && group->funcs.put)
+       if (!IS_ERR_OR_NULL(group) && group->funcs.put)
                group->funcs.put(gpu, group, counterid);
  }
@@@ -148,7 -148,6 +148,7 @@@ enum msm_mdp_conn_property 
        CONNECTOR_PROP_DST_Y,
        CONNECTOR_PROP_DST_W,
        CONNECTOR_PROP_DST_H,
 +      CONNECTOR_PROP_PLL_DELTA,
  
        /* enum/bitmask properties */
        CONNECTOR_PROP_TOPOLOGY_NAME,
@@@ -413,7 -412,7 +413,7 @@@ void msm_gem_address_space_put(struct m
  /* For GPU and legacy display */
  struct msm_gem_address_space *
  msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
-               const char *name);
+               int type, const char *name);
  struct msm_gem_address_space *
  msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
                uint64_t start, uint64_t end);
  /* Additional internal-use only BO flags: */
  #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
  
- struct msm_gem_aspace_ops {
-       int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
-               struct sg_table *sgt, void *priv, unsigned int flags);
-       void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
-               struct sg_table *sgt, void *priv);
-       void (*destroy)(struct msm_gem_address_space *);
- };
  struct msm_gem_address_space {
        const char *name;
        struct msm_mmu *mmu;
-       const struct msm_gem_aspace_ops *ops;
        struct kref kref;
+       struct drm_mm mm;
+       u64 va_len;
  };
  
  struct msm_gem_vma {
@@@ -125,8 -116,6 +116,8 @@@ struct msm_gem_submit 
        uint32_t fence;
        int ring;
        bool valid;
 +      uint64_t profile_buf_iova;
 +      void *profile_buf_vaddr;
        unsigned int nr_cmds;
        unsigned int nr_bos;
        struct {
@@@ -48,9 -48,6 +48,9 @@@ static struct msm_gem_submit *submit_cr
                submit->nr_bos = 0;
                submit->nr_cmds = 0;
  
 +              submit->profile_buf_vaddr = NULL;
 +              submit->profile_buf_iova = 0;
 +
                INIT_LIST_HEAD(&submit->bo_list);
                ww_acquire_init(&submit->ticket, &reservation_ww_class);
        }
@@@ -82,13 -79,16 +82,16 @@@ static int submit_lookup_objects(struc
                void __user *userptr =
                        to_user_ptr(args->bos + (i * sizeof(submit_bo)));
  
-               ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
-               if (unlikely(ret)) {
+               if (copy_from_user_inatomic(&submit_bo, userptr,
+                       sizeof(submit_bo))) {
                        pagefault_enable();
                        spin_unlock(&file->table_lock);
-                       ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
-                       if (ret)
+                       if (copy_from_user(&submit_bo, userptr,
+                               sizeof(submit_bo))) {
+                               ret = -EFAULT;
                                goto out;
+                       }
                        spin_lock(&file->table_lock);
                        pagefault_disable();
                }
@@@ -283,8 -283,8 +286,8 @@@ static int submit_reloc(struct msm_gem_
                uint32_t off;
                bool valid;
  
-               ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
-               if (ret)
+               if (copy_from_user(&submit_reloc, userptr,
+                       sizeof(submit_reloc)))
                        return -EFAULT;
  
                if (submit_reloc.submit_offset % 4) {
@@@ -396,7 -396,6 +399,7 @@@ int msm_ioctl_gem_submit(struct drm_dev
                case MSM_SUBMIT_CMD_BUF:
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
 +              case MSM_SUBMIT_CMD_PROFILE_BUF:
                        break;
                default:
                        DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
                submit->cmd[i].iova = iova + submit_cmd.submit_offset;
                submit->cmd[i].idx  = submit_cmd.submit_idx;
  
 +              if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
 +                      submit->profile_buf_iova = submit->cmd[i].iova;
 +                      submit->profile_buf_vaddr =
 +                              msm_gem_vaddr_locked(&msm_obj->base);
 +              }
 +
                if (submit->valid)
                        continue;
  
  #define CREATE_TRACE_POINTS
  #include "sde_trace.h"
  
- static const char * const iommu_ports[] = {
-               "mdp_0",
- };
  /**
   * Controls size of event log buffer. Specified as a power of 2.
   */
@@@ -598,7 -594,6 +594,7 @@@ static int _sde_kms_setup_displays(stru
                .get_modes =  sde_hdmi_connector_get_modes,
                .mode_valid = sde_hdmi_mode_valid,
                .get_info =   sde_hdmi_get_info,
 +              .set_property = sde_hdmi_set_property,
        };
        struct msm_display_info info = {0};
        struct drm_encoder *encoder;
@@@ -1077,8 -1072,7 +1073,7 @@@ static int _sde_kms_mmu_init(struct sde
  
                sde_kms->aspace[i] = aspace;
  
-               ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
-                               ARRAY_SIZE(iommu_ports));
+               ret = mmu->funcs->attach(mmu, NULL, 0);
                if (ret) {
                        SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
                        msm_gem_address_space_put(aspace);