OSDN Git Service

Merge branch 'master' into modesetting-101
[android-x86/external-libdrm.git] / linux-core / i915_execbuf.c
1 /*
2  * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *     Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27  *     Dave Airlie
28  *     Keith Packard
29  *     ... ?
30  */
31
32 #include "drmP.h"
33 #include "drm.h"
34 #include "i915_drm.h"
35 #include "i915_drv.h"
36
37 #if DRM_DEBUG_CODE
38 #define DRM_DEBUG_RELOCATION    (drm_debug != 0)
39 #else
40 #define DRM_DEBUG_RELOCATION    0
41 #endif
42
43 enum i915_buf_idle {
44         I915_RELOC_UNCHECKED,
45         I915_RELOC_IDLE,
46         I915_RELOC_BUSY
47 };
48
49 struct i915_relocatee_info {
50         struct drm_buffer_object *buf;
51         unsigned long offset;
52         uint32_t *data_page;
53         unsigned page_offset;
54         struct drm_bo_kmap_obj kmap;
55         int is_iomem;
56         int dst;
57         int idle;
58         int performed_ring_relocs;
59 #ifdef DRM_KMAP_ATOMIC_PROT_PFN
60         unsigned long pfn;
61         pgprot_t pg_prot;
62 #endif
63 };
64
65 struct drm_i915_validate_buffer {
66         struct drm_buffer_object *buffer;
67         int presumed_offset_correct;
68         void __user *data;
69         int ret;
70         enum i915_buf_idle idle;
71 };
72
73 /*
74  * I'd like to use MI_STORE_DATA_IMM here, but I can't make
75  * it work. Seems like GART writes are broken with that
76  * instruction. Also I'm not sure that MI_FLUSH will
77  * act as a memory barrier for that instruction. It will
78  * for this single dword 2D blit.
79  */
80
81 static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
82                                  uint32_t value)
83 {
84         struct drm_i915_private *dev_priv =
85             (struct drm_i915_private *)dev->dev_private;
86
87         RING_LOCALS;
88         i915_kernel_lost_context(dev);
89         BEGIN_LP_RING(6);
90         OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
91         OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
92         OUT_RING((0x1 << 16) | (0x4));
93         OUT_RING(offset);
94         OUT_RING(value);
95         OUT_RING(0);
96         ADVANCE_LP_RING();
97 }
98
99 static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
100                                             *buffers, unsigned num_buffers)
101 {
102         while (num_buffers--)
103                 drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
104 }
105
106 int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
107                      struct drm_i915_validate_buffer *buffers,
108                      struct i915_relocatee_info *relocatee, uint32_t * reloc)
109 {
110         unsigned index;
111         unsigned long new_cmd_offset;
112         u32 val;
113         int ret, i;
114         int buf_index = -1;
115
116         /*
117          * FIXME: O(relocs * buffers) complexity.
118          */
119
120         for (i = 0; i <= num_buffers; i++)
121                 if (buffers[i].buffer)
122                         if (reloc[2] == buffers[i].buffer->base.hash.key)
123                                 buf_index = i;
124
125         if (buf_index == -1) {
126                 DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
127                 return -EINVAL;
128         }
129
130         /*
131          * Short-circuit relocations that were correctly
132          * guessed by the client
133          */
134         if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
135                 return 0;
136
137         new_cmd_offset = reloc[0];
138         if (!relocatee->data_page ||
139             !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
140                 struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
141
142                 drm_bo_kunmap(&relocatee->kmap);
143                 relocatee->data_page = NULL;
144                 relocatee->offset = new_cmd_offset;
145
146                 if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
147                   ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
148                         if (ret)
149                                 return ret;
150                         relocatee->idle = I915_RELOC_IDLE;
151                 }
152
153                 if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
154                              (mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
155                         drm_bo_evict_cached(relocatee->buf);
156
157                 ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
158                                   1, &relocatee->kmap);
159                 if (ret) {
160                         DRM_ERROR
161                             ("Could not map command buffer to apply relocs\n %08lx",
162                              new_cmd_offset);
163                         return ret;
164                 }
165                 relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
166                                                        &relocatee->is_iomem);
167                 relocatee->page_offset = (relocatee->offset & PAGE_MASK);
168         }
169
170         val = buffers[buf_index].buffer->offset;
171         index = (reloc[0] - relocatee->page_offset) >> 2;
172
173         /* add in validate */
174         val = val + reloc[1];
175
176         if (DRM_DEBUG_RELOCATION) {
177                 if (buffers[buf_index].presumed_offset_correct &&
178                     relocatee->data_page[index] != val) {
179                         DRM_DEBUG
180                             ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
181                              reloc[0], reloc[1], buf_index,
182                              relocatee->data_page[index], val);
183                 }
184         }
185
186         if (relocatee->is_iomem)
187                 iowrite32(val, relocatee->data_page + index);
188         else
189                 relocatee->data_page[index] = val;
190         return 0;
191 }
192
193 int i915_process_relocs(struct drm_file *file_priv,
194                         uint32_t buf_handle,
195                         uint32_t __user ** reloc_user_ptr,
196                         struct i915_relocatee_info *relocatee,
197                         struct drm_i915_validate_buffer *buffers,
198                         uint32_t num_buffers)
199 {
200         int ret, reloc_stride;
201         uint32_t cur_offset;
202         uint32_t reloc_count;
203         uint32_t reloc_type;
204         uint32_t reloc_buf_size;
205         uint32_t *reloc_buf = NULL;
206         int i;
207
208         /* do a copy from user from the user ptr */
209         ret = get_user(reloc_count, *reloc_user_ptr);
210         if (ret) {
211                 DRM_ERROR("Could not map relocation buffer.\n");
212                 goto out;
213         }
214
215         ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
216         if (ret) {
217                 DRM_ERROR("Could not map relocation buffer.\n");
218                 goto out;
219         }
220
221         if (reloc_type != 0) {
222                 DRM_ERROR("Unsupported relocation type requested\n");
223                 ret = -EINVAL;
224                 goto out;
225         }
226
227         reloc_buf_size =
228             (I915_RELOC_HEADER +
229              (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
230         reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
231         if (!reloc_buf) {
232                 DRM_ERROR("Out of memory for reloc buffer\n");
233                 ret = -ENOMEM;
234                 goto out;
235         }
236
237         if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
238                 ret = -EFAULT;
239                 goto out;
240         }
241
242         /* get next relocate buffer handle */
243         *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
244
245         reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);   /* may be different for other types of relocs */
246
247         DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
248                   *reloc_user_ptr);
249
250         for (i = 0; i < reloc_count; i++) {
251                 cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
252
253                 ret = i915_apply_reloc(file_priv, num_buffers, buffers,
254                                        relocatee, reloc_buf + cur_offset);
255                 if (ret)
256                         goto out;
257         }
258
259       out:
260         if (reloc_buf)
261                 kfree(reloc_buf);
262
263         if (relocatee->data_page) {
264                 drm_bo_kunmap(&relocatee->kmap);
265                 relocatee->data_page = NULL;
266         }
267
268         return ret;
269 }
270
271 static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
272                            uint32_t __user * reloc_user_ptr,
273                            struct drm_i915_validate_buffer *buffers,
274                            uint32_t buf_count)
275 {
276         struct drm_device *dev = file_priv->minor->dev;
277         struct i915_relocatee_info relocatee;
278         int ret = 0;
279         int b;
280
281         /*
282          * Short circuit relocations when all previous
283          * buffers offsets were correctly guessed by
284          * the client
285          */
286         if (!DRM_DEBUG_RELOCATION) {
287                 for (b = 0; b < buf_count; b++)
288                         if (!buffers[b].presumed_offset_correct)
289                                 break;
290
291                 if (b == buf_count)
292                         return 0;
293         }
294
295         memset(&relocatee, 0, sizeof(relocatee));
296         relocatee.idle = I915_RELOC_UNCHECKED;
297
298         mutex_lock(&dev->struct_mutex);
299         relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
300         mutex_unlock(&dev->struct_mutex);
301         if (!relocatee.buf) {
302                 DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
303                 ret = -EINVAL;
304                 goto out_err;
305         }
306
307         mutex_lock(&relocatee.buf->mutex);
308         while (reloc_user_ptr) {
309                 ret =
310                     i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
311                                         &relocatee, buffers, buf_count);
312                 if (ret) {
313                         DRM_ERROR("process relocs failed\n");
314                         goto out_err1;
315                 }
316         }
317
318       out_err1:
319         mutex_unlock(&relocatee.buf->mutex);
320         drm_bo_usage_deref_unlocked(&relocatee.buf);
321       out_err:
322         return ret;
323 }
324
325 static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
326 {
327         if (relocatee->data_page) {
328 #ifndef DRM_KMAP_ATOMIC_PROT_PFN
329                 drm_bo_kunmap(&relocatee->kmap);
330 #else
331                 kunmap_atomic(relocatee->data_page, KM_USER0);
332 #endif
333                 relocatee->data_page = NULL;
334         }
335         relocatee->buf = NULL;
336         relocatee->dst = ~0;
337 }
338
339 static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
340                                  struct drm_i915_validate_buffer *buffers,
341                                  unsigned int dst, unsigned long dst_offset)
342 {
343         int ret;
344
345         if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
346                 i915_clear_relocatee(relocatee);
347                 relocatee->dst = dst;
348                 relocatee->buf = buffers[dst].buffer;
349                 relocatee->idle = buffers[dst].idle;
350
351                 /*
352                  * Check for buffer idle. If the buffer is busy, revert to
353                  * ring relocations.
354                  */
355
356                 if (relocatee->idle == I915_RELOC_UNCHECKED) {
357                         preempt_enable();
358                         mutex_lock(&relocatee->buf->mutex);
359
360                         ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
361                         if (ret == 0)
362                                 relocatee->idle = I915_RELOC_IDLE;
363                         else {
364                                 relocatee->idle = I915_RELOC_BUSY;
365                                 relocatee->performed_ring_relocs = 1;
366                         }
367                         mutex_unlock(&relocatee->buf->mutex);
368                         preempt_disable();
369                         buffers[dst].idle = relocatee->idle;
370                 }
371         }
372
373         if (relocatee->idle == I915_RELOC_BUSY)
374                 return 0;
375
376         if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
377                 DRM_ERROR("Relocation destination out of bounds.\n");
378                 return -EINVAL;
379         }
380         if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
381                      NULL == relocatee->data_page)) {
382 #ifdef DRM_KMAP_ATOMIC_PROT_PFN
383                 if (NULL != relocatee->data_page) {
384                         kunmap_atomic(relocatee->data_page, KM_USER0);
385                         relocatee->data_page = NULL;
386                 }
387                 ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
388                                       &relocatee->pfn, &relocatee->pg_prot);
389                 if (ret) {
390                         DRM_ERROR("Can't map relocation destination.\n");
391                         return -EINVAL;
392                 }
393                 relocatee->data_page =
394                     kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
395                                          relocatee->pg_prot);
396 #else
397                 if (NULL != relocatee->data_page) {
398                         drm_bo_kunmap(&relocatee->kmap);
399                         relocatee->data_page = NULL;
400                 }
401
402                 ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
403                                   1, &relocatee->kmap);
404                 if (ret) {
405                         DRM_ERROR("Can't map relocation destination.\n");
406                         return ret;
407                 }
408
409                 relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
410                                                        &relocatee->is_iomem);
411 #endif
412                 relocatee->page_offset = dst_offset & PAGE_MASK;
413         }
414         return 0;
415 }
416
417 static int i915_apply_post_reloc(uint32_t reloc[],
418                                  struct drm_i915_validate_buffer *buffers,
419                                  uint32_t num_buffers,
420                                  struct i915_relocatee_info *relocatee)
421 {
422         uint32_t reloc_buffer = reloc[2];
423         uint32_t dst_buffer = reloc[3];
424         uint32_t val;
425         uint32_t index;
426         int ret;
427
428         if (likely(buffers[reloc_buffer].presumed_offset_correct))
429                 return 0;
430         if (unlikely(reloc_buffer >= num_buffers)) {
431                 DRM_ERROR("Invalid reloc buffer index.\n");
432                 return -EINVAL;
433         }
434         if (unlikely(dst_buffer >= num_buffers)) {
435                 DRM_ERROR("Invalid dest buffer index.\n");
436                 return -EINVAL;
437         }
438
439         ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
440         if (unlikely(ret))
441                 return ret;
442
443         val = buffers[reloc_buffer].buffer->offset;
444         index = (reloc[0] - relocatee->page_offset) >> 2;
445         val = val + reloc[1];
446
447         if (relocatee->idle == I915_RELOC_BUSY) {
448                 i915_emit_ring_reloc(relocatee->buf->dev,
449                                      relocatee->buf->offset + reloc[0], val);
450                 return 0;
451         }
452 #ifdef DRM_KMAP_ATOMIC_PROT_PFN
453         relocatee->data_page[index] = val;
454 #else
455         if (likely(relocatee->is_iomem))
456                 iowrite32(val, relocatee->data_page + index);
457         else
458                 relocatee->data_page[index] = val;
459 #endif
460
461         return 0;
462 }
463
464 static int i915_post_relocs(struct drm_file *file_priv,
465                             uint32_t __user * new_reloc_ptr,
466                             struct drm_i915_validate_buffer *buffers,
467                             unsigned int num_buffers)
468 {
469         uint32_t *reloc;
470         uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
471         uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
472         struct i915_relocatee_info relocatee;
473         uint32_t reloc_type;
474         uint32_t num_relocs;
475         uint32_t count;
476         int ret = 0;
477         int i;
478         int short_circuit = 1;
479         uint32_t __user *reloc_ptr;
480         uint64_t new_reloc_data;
481         uint32_t reloc_buf_size;
482         uint32_t *reloc_buf;
483
484         for (i = 0; i < num_buffers; ++i) {
485                 if (unlikely(!buffers[i].presumed_offset_correct)) {
486                         short_circuit = 0;
487                         break;
488                 }
489         }
490
491         if (likely(short_circuit))
492                 return 0;
493
494         memset(&relocatee, 0, sizeof(relocatee));
495
496         while (new_reloc_ptr) {
497                 reloc_ptr = new_reloc_ptr;
498
499                 ret = get_user(num_relocs, reloc_ptr);
500                 if (unlikely(ret))
501                         goto out;
502                 if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
503                                         header_size +
504                                         num_relocs * reloc_stride)))
505                         return -EFAULT;
506
507                 ret = __get_user(reloc_type, reloc_ptr + 1);
508                 if (unlikely(ret))
509                         goto out;
510
511                 if (unlikely(reloc_type != 1)) {
512                         DRM_ERROR("Unsupported relocation type requested.\n");
513                         ret = -EINVAL;
514                         goto out;
515                 }
516
517                 ret = __get_user(new_reloc_data, reloc_ptr + 2);
518                 new_reloc_ptr = (uint32_t __user *) (unsigned long)
519                     new_reloc_data;
520
521                 reloc_ptr += I915_RELOC_HEADER;
522
523                 if (num_relocs == 0)
524                         goto out;
525
526                 reloc_buf_size =
527                     (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
528                 reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
529                 if (!reloc_buf) {
530                         DRM_ERROR("Out of memory for reloc buffer\n");
531                         ret = -ENOMEM;
532                         goto out;
533                 }
534
535                 if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
536                         ret = -EFAULT;
537                         goto out;
538                 }
539                 reloc = reloc_buf;
540                 preempt_disable();
541                 for (count = 0; count < num_relocs; ++count) {
542                         ret = i915_apply_post_reloc(reloc, buffers,
543                                                     num_buffers, &relocatee);
544                         if (unlikely(ret)) {
545                                 preempt_enable();
546                                 goto out;
547                         }
548                         reloc += I915_RELOC0_STRIDE;
549                 }
550                 preempt_enable();
551
552                 if (reloc_buf) {
553                         kfree(reloc_buf);
554                         reloc_buf = NULL;
555                 }
556                 i915_clear_relocatee(&relocatee);
557         }
558
559       out:
560         /*
561          * Flush ring relocs so the command parser will pick them up.
562          */
563
564         if (relocatee.performed_ring_relocs)
565                 (void)i915_emit_mi_flush(file_priv->minor->dev, 0);
566
567         i915_clear_relocatee(&relocatee);
568         if (reloc_buf) {
569                 kfree(reloc_buf);
570                 reloc_buf = NULL;
571         }
572
573         return ret;
574 }
575
576 static int i915_check_presumed(struct drm_i915_op_arg *arg,
577                                struct drm_buffer_object *bo,
578                                uint32_t __user * data, int *presumed_ok)
579 {
580         struct drm_bo_op_req *req = &arg->d.req;
581         uint32_t hint_offset;
582         uint32_t hint = req->bo_req.hint;
583
584         *presumed_ok = 0;
585
586         if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
587                 return 0;
588         if (bo->offset == req->bo_req.presumed_offset) {
589                 *presumed_ok = 1;
590                 return 0;
591         }
592
593         /*
594          * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
595          * the user-space IOCTL argument list, since the buffer has moved,
596          * we're about to apply relocations and we might subsequently
597          * hit an -EAGAIN. In that case the argument list will be reused by
598          * user-space, but the presumed offset is no longer valid.
599          *
600          * Needless to say, this is a bit ugly.
601          */
602
603         hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
604         hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
605         return __put_user(hint, data + hint_offset);
606 }
607
608 /*
609  * Validate, add fence and relocate a block of bos from a userspace list
610  */
611 int i915_validate_buffer_list(struct drm_file *file_priv,
612                               unsigned int fence_class, uint64_t data,
613                               struct drm_i915_validate_buffer *buffers,
614                               uint32_t * num_buffers,
615                               uint32_t __user ** post_relocs)
616 {
617         struct drm_i915_op_arg arg;
618         struct drm_bo_op_req *req = &arg.d.req;
619         int ret = 0;
620         unsigned buf_count = 0;
621         uint32_t buf_handle;
622         uint32_t __user *reloc_user_ptr;
623         struct drm_i915_validate_buffer *item = buffers;
624         *post_relocs = NULL;
625
626         do {
627                 if (buf_count >= *num_buffers) {
628                         DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
629                         ret = -EINVAL;
630                         goto out_err;
631                 }
632                 item = buffers + buf_count;
633                 item->buffer = NULL;
634                 item->presumed_offset_correct = 0;
635                 item->idle = I915_RELOC_UNCHECKED;
636
637                 if (copy_from_user
638                     (&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
639                         ret = -EFAULT;
640                         goto out_err;
641                 }
642
643                 ret = 0;
644                 if (req->op != drm_bo_validate) {
645                         DRM_ERROR
646                             ("Buffer object operation wasn't \"validate\".\n");
647                         ret = -EINVAL;
648                         goto out_err;
649                 }
650                 item->ret = 0;
651                 item->data = (void __user *)(unsigned long)data;
652
653                 buf_handle = req->bo_req.handle;
654                 reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
655
656                 /*
657                  * Switch mode to post-validation relocations?
658                  */
659
660                 if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
661                              (reloc_user_ptr != NULL))) {
662                         uint32_t reloc_type;
663
664                         ret = get_user(reloc_type, reloc_user_ptr + 1);
665                         if (ret)
666                                 goto out_err;
667
668                         if (reloc_type == 1)
669                                 *post_relocs = reloc_user_ptr;
670
671                 }
672
673                 if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
674                         ret =
675                             i915_exec_reloc(file_priv, buf_handle,
676                                             reloc_user_ptr, buffers, buf_count);
677                         if (ret)
678                                 goto out_err;
679                         DRM_MEMORYBARRIER();
680                 }
681
682                 ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
683                                              req->bo_req.flags,
684                                              req->bo_req.mask, req->bo_req.hint,
685                                              req->bo_req.fence_class,
686                                              NULL, &item->buffer);
687                 if (ret) {
688                         DRM_ERROR("error on handle validate %d\n", ret);
689                         goto out_err;
690                 }
691
692                 buf_count++;
693
694                 ret = i915_check_presumed(&arg, item->buffer,
695                                           (uint32_t __user *)
696                                           (unsigned long)data,
697                                           &item->presumed_offset_correct);
698                 if (ret)
699                         goto out_err;
700
701                 data = arg.next;
702         } while (data != 0);
703       out_err:
704         *num_buffers = buf_count;
705         item->ret = (ret != -EAGAIN) ? ret : 0;
706         return ret;
707 }
708
709 /*
710  * Remove all buffers from the unfenced list.
711  * If the execbuffer operation was aborted, for example due to a signal,
712  * this also make sure that buffers retain their original state and
713  * fence pointers.
714  * Copy back buffer information to user-space unless we were interrupted
715  * by a signal. In which case the IOCTL must be rerun.
716  */
717
718 static int i915_handle_copyback(struct drm_device *dev,
719                                 struct drm_i915_validate_buffer *buffers,
720                                 unsigned int num_buffers, int ret)
721 {
722         int err = ret;
723         int i;
724         struct drm_i915_op_arg arg;
725         struct drm_buffer_object *bo;
726
727         if (ret)
728                 drm_putback_buffer_objects(dev);
729
730         if (ret != -EAGAIN) {
731                 for (i = 0; i < num_buffers; ++i) {
732                         arg.handled = 1;
733                         arg.d.rep.ret = buffers->ret;
734                         bo = buffers->buffer;
735                         mutex_lock(&bo->mutex);
736                         drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
737                         mutex_unlock(&bo->mutex);
738                         if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
739                                 err = -EFAULT;
740                         buffers++;
741                 }
742         }
743
744         return err;
745 }
746
747 /*
748  * Create a fence object, and if that fails, pretend that everything is
749  * OK and just idle the GPU.
750  */
751
752 void i915_fence_or_sync(struct drm_file *file_priv,
753                         uint32_t fence_flags,
754                         struct drm_fence_arg *fence_arg,
755                         struct drm_fence_object **fence_p)
756 {
757         struct drm_device *dev = file_priv->minor->dev;
758         int ret;
759         struct drm_fence_object *fence;
760
761         ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
762
763         if (ret) {
764
765                 /*
766                  * Fence creation failed.
767                  * Fall back to synchronous operation and idle the engine.
768                  */
769
770                 (void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
771                 (void)i915_quiescent(dev);
772
773                 if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
774
775                         /*
776                          * Communicate to user-space that
777                          * fence creation has failed and that
778                          * the engine is idle.
779                          */
780
781                         fence_arg->handle = ~0;
782                         fence_arg->error = ret;
783                 }
784                 drm_putback_buffer_objects(dev);
785                 if (fence_p)
786                         *fence_p = NULL;
787                 return;
788         }
789
790         if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
791
792                 ret = drm_fence_add_user_object(file_priv, fence,
793                                                 fence_flags &
794                                                 DRM_FENCE_FLAG_SHAREABLE);
795                 if (!ret)
796                         drm_fence_fill_arg(fence, fence_arg);
797                 else {
798                         /*
799                          * Fence user object creation failed.
800                          * We must idle the engine here as well, as user-
801                          * space expects a fence object to wait on. Since we
802                          * have a fence object we wait for it to signal
803                          * to indicate engine "sufficiently" idle.
804                          */
805
806                         (void)drm_fence_object_wait(fence, 0, 1, fence->type);
807                         drm_fence_usage_deref_unlocked(&fence);
808                         fence_arg->handle = ~0;
809                         fence_arg->error = ret;
810                 }
811         }
812
813         if (fence_p)
814                 *fence_p = fence;
815         else if (fence)
816                 drm_fence_usage_deref_unlocked(&fence);
817 }
818
819 int i915_execbuffer(struct drm_device *dev, void *data,
820                     struct drm_file *file_priv)
821 {
822         struct drm_i915_private *dev_priv = (struct drm_i915_private *) 
823                 dev->dev_private;
824         struct drm_i915_master_private *master_priv = 
825                 (struct drm_i915_master_private *)
826                 dev->primary->master->driver_priv;
827         struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
828                 master_priv->sarea_priv;
829         struct drm_i915_execbuffer *exec_buf = data;
830         struct drm_i915_batchbuffer *batch = &exec_buf->batch;
831         struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
832         int num_buffers;
833         int ret;
834         uint32_t __user *post_relocs;
835
836         if (!dev_priv->allow_batchbuffer) {
837                 DRM_ERROR("Batchbuffer ioctl disabled\n");
838                 return -EINVAL;
839         }
840
841         if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
842                                                         batch->num_cliprects *
843                                                         sizeof(struct
844                                                                drm_clip_rect)))
845                 return -EFAULT;
846
847         if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
848                 return -EINVAL;
849
850         ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
851         if (ret)
852                 return ret;
853
854         /*
855          * The cmdbuf_mutex makes sure the validate-submit-fence
856          * operation is atomic.
857          */
858
859         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
860         if (ret) {
861                 drm_bo_read_unlock(&dev->bm.bm_lock);
862                 return -EAGAIN;
863         }
864
865         num_buffers = exec_buf->num_buffers;
866
867         if (!dev_priv->val_bufs) {
868                 dev_priv->val_bufs =
869                     vmalloc(sizeof(struct drm_i915_validate_buffer) *
870                             dev_priv->max_validate_buffers);
871         }
872         if (!dev_priv->val_bufs) {
873                 drm_bo_read_unlock(&dev->bm.bm_lock);
874                 mutex_unlock(&dev_priv->cmdbuf_mutex);
875                 return -ENOMEM;
876         }
877
878         /* validate buffer list + fixup relocations */
879         ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
880                                         dev_priv->val_bufs, &num_buffers,
881                                         &post_relocs);
882         if (ret)
883                 goto out_err0;
884
885         if (post_relocs) {
886                 ret = i915_post_relocs(file_priv, post_relocs,
887                                        dev_priv->val_bufs, num_buffers);
888                 if (ret)
889                         goto out_err0;
890         }
891
892         /* make sure all previous memory operations have passed */
893         DRM_MEMORYBARRIER();
894
895         if (!post_relocs) {
896                 drm_agp_chipset_flush(dev);
897                 batch->start =
898                     dev_priv->val_bufs[num_buffers - 1].buffer->offset;
899         } else {
900                 batch->start += dev_priv->val_bufs[0].buffer->offset;
901         }
902
903         DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
904                   batch->start, batch->used, batch->num_cliprects);
905
906         ret = i915_dispatch_batchbuffer(dev, batch);
907         if (ret)
908                 goto out_err0;
909         if (sarea_priv)
910                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
911         i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
912
913       out_err0:
914         ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
915         mutex_lock(&dev->struct_mutex);
916         i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
917         mutex_unlock(&dev->struct_mutex);
918         mutex_unlock(&dev_priv->cmdbuf_mutex);
919         drm_bo_read_unlock(&dev->bm.bm_lock);
920         return ret;
921 }