OSDN Git Service

Merge android-4.4.143 (7bbfac1) into msm-4.4
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / gpu / drm / msm / msm_gem_submit.c
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include "msm_drv.h"
19 #include "msm_gpu.h"
20 #include "msm_gem.h"
21 #include "msm_trace.h"
22
23 /*
24  * Cmdstream submission:
25  */
26
27 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
28 #define BO_VALID    0x8000
29 #define BO_LOCKED   0x4000
30 #define BO_PINNED   0x2000
31
32 static struct msm_gem_submit *submit_create(struct drm_device *dev,
33                 struct msm_gem_address_space *aspace,
34                 uint32_t nr_bos, uint32_t nr_cmds,
35                 struct msm_gpu_submitqueue *queue)
36 {
37         struct msm_gem_submit *submit;
38         uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
39                 ((u64)nr_cmds * sizeof(submit->cmd[0]));
40
41         if (sz > SIZE_MAX)
42                 return NULL;
43
44         submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
45         if (submit) {
46                 submit->dev = dev;
47                 submit->aspace = aspace;
48                 submit->queue = queue;
49
50                 /* initially, until copy_from_user() and bo lookup succeeds: */
51                 submit->nr_bos = 0;
52                 submit->nr_cmds = 0;
53
54                 submit->profile_buf = NULL;
55                 submit->profile_buf_iova = 0;
56                 submit->cmd = (void *)&submit->bos[nr_bos];
57
58                 submit->secure = false;
59
60                 /*
61                  * Initalize node so we can safely list_del() on it if
62                  * we fail in the submit path
63                  */
64                 INIT_LIST_HEAD(&submit->node);
65                 INIT_LIST_HEAD(&submit->bo_list);
66                 ww_acquire_init(&submit->ticket, &reservation_ww_class);
67         }
68
69         return submit;
70 }
71
72 static inline unsigned long __must_check
73 copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
74 {
75         if (access_ok(VERIFY_READ, from, n))
76                 return __copy_from_user_inatomic(to, from, n);
77         return -EFAULT;
78 }
79
80 void msm_gem_submit_free(struct msm_gem_submit *submit)
81 {
82         if (!submit)
83                 return;
84
85         msm_submitqueue_put(submit->queue);
86         list_del(&submit->node);
87         kfree(submit);
88 }
89
90 static int submit_lookup_objects(struct msm_gpu *gpu,
91                 struct msm_gem_submit *submit,
92                 struct drm_msm_gem_submit *args, struct drm_file *file)
93 {
94         unsigned i;
95         int ret = 0;
96
97         spin_lock(&file->table_lock);
98         pagefault_disable();
99
100         for (i = 0; i < args->nr_bos; i++) {
101                 struct drm_msm_gem_submit_bo submit_bo;
102                 struct drm_gem_object *obj;
103                 struct msm_gem_object *msm_obj;
104                 void __user *userptr =
105                         u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
106
107                 if (copy_from_user_inatomic(&submit_bo, userptr,
108                         sizeof(submit_bo))) {
109                         pagefault_enable();
110                         spin_unlock(&file->table_lock);
111                         if (copy_from_user(&submit_bo, userptr,
112                                 sizeof(submit_bo))) {
113                                 ret = -EFAULT;
114                                 goto out;
115                         }
116
117                         spin_lock(&file->table_lock);
118                         pagefault_disable();
119                 }
120
121                 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
122                         !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
123                         DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
124                         ret = -EINVAL;
125                         goto out_unlock;
126                 }
127
128                 submit->bos[i].flags = submit_bo.flags;
129                 /* in validate_objects() we figure out if this is true: */
130                 submit->bos[i].iova  = submit_bo.presumed;
131
132                 /* normally use drm_gem_object_lookup(), but for bulk lookup
133                  * all under single table_lock just hit object_idr directly:
134                  */
135                 obj = idr_find(&file->object_idr, submit_bo.handle);
136                 if (!obj) {
137                         DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
138                         ret = -EINVAL;
139                         goto out_unlock;
140                 }
141
142                 msm_obj = to_msm_bo(obj);
143
144                 /*
145                  * If the buffer is marked as secure make sure that we can
146                  * handle secure buffers and then mark the submission as secure
147                  */
148                 if (msm_obj->flags & MSM_BO_SECURE) {
149                         if (!gpu->secure_aspace) {
150                                 DRM_ERROR("Cannot handle secure buffers\n");
151                                 ret = -EINVAL;
152                                 goto out_unlock;
153                         }
154
155                         submit->secure = true;
156                 }
157
158                 if (!list_empty(&msm_obj->submit_entry)) {
159                         DRM_ERROR("handle %u at index %u already on submit list\n",
160                                         submit_bo.handle, i);
161                         ret = -EINVAL;
162                         goto out_unlock;
163                 }
164
165                 drm_gem_object_reference(obj);
166
167                 submit->bos[i].obj = msm_obj;
168
169                 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
170         }
171
172 out_unlock:
173         pagefault_enable();
174         spin_unlock(&file->table_lock);
175
176 out:
177         submit->nr_bos = i;
178
179         return ret;
180 }
181
182 static void submit_unlock_unpin_bo(struct msm_gpu *gpu,
183                 struct msm_gem_submit *submit, int i)
184 {
185         struct msm_gem_object *msm_obj = submit->bos[i].obj;
186         struct msm_gem_address_space *aspace;
187
188         aspace = (msm_obj->flags & MSM_BO_SECURE) ?
189                         gpu->secure_aspace : submit->aspace;
190
191         if (submit->bos[i].flags & BO_PINNED)
192                 msm_gem_put_iova(&msm_obj->base, aspace);
193
194         if (submit->bos[i].flags & BO_LOCKED)
195                 ww_mutex_unlock(&msm_obj->resv->lock);
196
197         if (!(submit->bos[i].flags & BO_VALID))
198                 submit->bos[i].iova = 0;
199
200         submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
201 }
202
203 /* This is where we make sure all the bo's are reserved and pin'd: */
204 static int submit_validate_objects(struct msm_gpu *gpu,
205                 struct msm_gem_submit *submit)
206 {
207         int contended, slow_locked = -1, i, ret = 0;
208
209 retry:
210         for (i = 0; i < submit->nr_bos; i++) {
211                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
212
213                 if (slow_locked == i)
214                         slow_locked = -1;
215
216                 contended = i;
217
218                 if (!(submit->bos[i].flags & BO_LOCKED)) {
219                         ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
220                                         &submit->ticket);
221                         if (ret)
222                                 goto fail;
223                         submit->bos[i].flags |= BO_LOCKED;
224                 }
225
226                 /*
227                  * An invalid SVM object is part of
228                  * this submit's buffer list, fail.
229                  */
230                 if (msm_obj->flags & MSM_BO_SVM) {
231                         struct msm_gem_svm_object *msm_svm_obj =
232                                 to_msm_svm_obj(msm_obj);
233                         if (msm_svm_obj->invalid) {
234                                 ret = -EINVAL;
235                                 goto fail;
236                         }
237                 }
238         }
239
240         ww_acquire_done(&submit->ticket);
241
242         return 0;
243
244 fail:
245         for (; i >= 0; i--)
246                 submit_unlock_unpin_bo(gpu, submit, i);
247
248         if (slow_locked > 0)
249                 submit_unlock_unpin_bo(gpu, submit, slow_locked);
250
251         if (ret == -EDEADLK) {
252                 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
253                 /* we lost out in a seqno race, lock and retry.. */
254                 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
255                                 &submit->ticket);
256                 if (!ret) {
257                         submit->bos[contended].flags |= BO_LOCKED;
258                         slow_locked = contended;
259                         goto retry;
260                 }
261         }
262
263         return ret;
264 }
265
266 static int submit_bo(struct msm_gpu *gpu,
267                 struct msm_gem_submit *submit, uint32_t idx,
268                 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
269 {
270         struct msm_gem_object *msm_obj;
271         struct msm_gem_address_space *aspace;
272         int ret;
273
274         if (idx >= submit->nr_bos) {
275                 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
276                                 idx, submit->nr_bos);
277                 return -EINVAL;
278         }
279
280         if (obj)
281                 *obj = submit->bos[idx].obj;
282
283         /* Only map and pin if the caller needs either the iova or valid */
284         if (!iova && !valid)
285                 return 0;
286
287         if (!(submit->bos[idx].flags & BO_PINNED)) {
288                 uint64_t buf_iova;
289
290                 msm_obj = submit->bos[idx].obj;
291                 aspace = (msm_obj->flags & MSM_BO_SECURE) ?
292                         gpu->secure_aspace : submit->aspace;
293
294                 ret = msm_gem_get_iova(&msm_obj->base, aspace, &buf_iova);
295
296                 /* this would break the logic in the fail path.. there is no
297                  * reason for this to happen, but just to be on the safe side
298                  * let's notice if this starts happening in the future:
299                  */
300                 WARN_ON(ret == -EDEADLK);
301
302                 if (ret)
303                         return ret;
304
305                 submit->bos[idx].flags |= BO_PINNED;
306
307                 if (buf_iova == submit->bos[idx].iova) {
308                         submit->bos[idx].flags |= BO_VALID;
309                 } else {
310                         submit->bos[idx].iova = buf_iova;
311                         submit->bos[idx].flags &= ~BO_VALID;
312                 }
313         }
314
315         if (iova)
316                 *iova = submit->bos[idx].iova;
317         if (valid)
318                 *valid = !!(submit->bos[idx].flags & BO_VALID);
319
320         return 0;
321 }
322
323 /* process the reloc's and patch up the cmdstream as needed: */
324 static int submit_reloc(struct msm_gpu *gpu,
325                 struct msm_gem_submit *submit,
326                 struct msm_gem_object *obj, uint32_t offset,
327                 uint32_t nr_relocs, uint64_t relocs)
328 {
329         uint32_t i, last_offset = 0;
330         uint32_t *ptr;
331         int ret;
332
333         if (offset % 4) {
334                 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
335                 return -EINVAL;
336         }
337
338         if (obj->flags & MSM_BO_SECURE) {
339                 DRM_ERROR("cannot do relocs on a secure buffer\n");
340                 return -EINVAL;
341         }
342
343         if (nr_relocs == 0)
344                 return 0;
345
346         /* For now, just map the entire thing.  Eventually we probably
347          * to do it page-by-page, w/ kmap() if not vmap()d..
348          */
349         ptr = msm_gem_vaddr(&obj->base);
350         if (!ptr) {
351                 DRM_ERROR("Invalid format");
352                 return -EINVAL;
353         }
354
355         if (IS_ERR(ptr)) {
356                 ret = PTR_ERR(ptr);
357                 DBG("failed to map: %d", ret);
358                 return ret;
359         }
360
361         for (i = 0; i < nr_relocs; i++) {
362                 struct drm_msm_gem_submit_reloc submit_reloc;
363                 void __user *userptr =
364                         u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
365                 uint64_t iova;
366                 uint32_t off;
367                 bool valid;
368
369                 if (copy_from_user(&submit_reloc, userptr,
370                         sizeof(submit_reloc)))
371                         return -EFAULT;
372
373                 if (submit_reloc.submit_offset % 4) {
374                         DRM_ERROR("non-aligned reloc offset: %u\n",
375                                         submit_reloc.submit_offset);
376                         return -EINVAL;
377                 }
378
379                 /* offset in dwords: */
380                 off = submit_reloc.submit_offset / 4;
381
382                 if ((off >= (obj->base.size / 4)) ||
383                                 (off < last_offset)) {
384                         DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
385                         return -EINVAL;
386                 }
387
388                 ret = submit_bo(gpu, submit, submit_reloc.reloc_idx,
389                                 NULL, &iova, &valid);
390                 if (ret)
391                         return ret;
392
393                 if (valid)
394                         continue;
395
396                 iova += submit_reloc.reloc_offset;
397
398                 if (submit_reloc.shift < 0)
399                         iova >>= -submit_reloc.shift;
400                 else
401                         iova <<= submit_reloc.shift;
402
403                 ptr[off] = iova | submit_reloc.or;
404
405                 last_offset = off;
406         }
407
408         return 0;
409 }
410
411 static void submit_cleanup(struct msm_gpu *gpu, struct msm_gem_submit *submit,
412                 bool fail)
413 {
414         unsigned i;
415
416         if (!submit)
417                 return;
418
419         for (i = 0; i < submit->nr_bos; i++) {
420                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
421                 submit_unlock_unpin_bo(gpu, submit, i);
422                 list_del_init(&msm_obj->submit_entry);
423                 drm_gem_object_unreference(&msm_obj->base);
424         }
425
426         ww_acquire_fini(&submit->ticket);
427 }
428
429 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
430                 struct drm_file *file)
431 {
432         struct msm_drm_private *priv = dev->dev_private;
433         struct drm_msm_gem_submit *args = data;
434         struct msm_file_private *ctx = file->driver_priv;
435         struct msm_gem_submit *submit;
436         struct msm_gpu_submitqueue *queue;
437         struct msm_gpu *gpu;
438         unsigned i;
439         int ret;
440
441         /* for now, we just have 3d pipe.. eventually this would need to
442          * be more clever to dispatch to appropriate gpu module:
443          */
444         if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
445                 return -EINVAL;
446
447         gpu = priv->gpu;
448         if (!gpu || !ctx)
449                 return -ENXIO;
450
451         queue = msm_submitqueue_get(ctx, args->queueid);
452         if (!queue)
453                 return -ENOENT;
454
455         mutex_lock(&dev->struct_mutex);
456
457         submit = submit_create(dev, ctx->aspace, args->nr_bos, args->nr_cmds,
458                 queue);
459         if (!submit) {
460                 ret = -ENOMEM;
461                 goto out;
462         }
463
464         ret = submit_lookup_objects(gpu, submit, args, file);
465         if (ret)
466                 goto out;
467
468         ret = submit_validate_objects(gpu, submit);
469         if (ret)
470                 goto out;
471
472         for (i = 0; i < args->nr_cmds; i++) {
473                 struct drm_msm_gem_submit_cmd submit_cmd;
474                 void __user *userptr =
475                         u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
476                 struct msm_gem_object *msm_obj;
477                 uint64_t iova;
478                 size_t size;
479
480                 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
481                 if (ret) {
482                         ret = -EFAULT;
483                         goto out;
484                 }
485
486                 /* validate input from userspace: */
487                 switch (submit_cmd.type) {
488                 case MSM_SUBMIT_CMD_BUF:
489                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
490                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
491                 case MSM_SUBMIT_CMD_PROFILE_BUF:
492                         break;
493                 default:
494                         DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
495                         ret = -EINVAL;
496                         goto out;
497                 }
498
499                 ret = submit_bo(gpu, submit, submit_cmd.submit_idx,
500                                 &msm_obj, &iova, NULL);
501                 if (ret)
502                         goto out;
503
504                 if (submit_cmd.size % 4) {
505                         DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
506                                         submit_cmd.size);
507                         ret = -EINVAL;
508                         goto out;
509                 }
510
511                 size = submit_cmd.size + submit_cmd.submit_offset;
512
513                 if (!submit_cmd.size || (size < submit_cmd.size) ||
514                         (size > msm_obj->base.size)) {
515                         DRM_ERROR("invalid cmdstream offset/size: %u/%u\n",
516                                 submit_cmd.submit_offset, submit_cmd.size);
517                         ret = -EINVAL;
518                         goto out;
519                 }
520
521                 submit->cmd[i].type = submit_cmd.type;
522                 submit->cmd[i].size = submit_cmd.size / 4;
523                 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
524                 submit->cmd[i].idx  = submit_cmd.submit_idx;
525
526                 if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
527                         submit->profile_buf_iova = submit->cmd[i].iova;
528                         submit->profile_buf = msm_gem_vaddr(&msm_obj->base)
529                                 + submit_cmd.submit_offset;
530                 }
531
532                 ret = submit_reloc(gpu, submit, msm_obj,
533                                 submit_cmd.submit_offset, submit_cmd.nr_relocs,
534                                 submit_cmd.relocs);
535                 if (ret)
536                         goto out;
537         }
538
539         submit->nr_cmds = i;
540
541         /* Clamp the user submitted ring to the range of available rings */
542         submit->ring = clamp_t(uint32_t, queue->prio, 0, gpu->nr_rings - 1);
543
544         ret = msm_gpu_submit(gpu, submit);
545
546         args->fence = submit->fence;
547
548 out:
549         submit_cleanup(gpu, submit, !!ret);
550         if (ret)
551                 msm_gem_submit_free(submit);
552         mutex_unlock(&dev->struct_mutex);
553         return ret;
554 }