OSDN Git Service

intel-gem: Another checkpatch.pl pass.
[android-x86/external-libdrm.git] / linux-core / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "drm_compat.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33 #include <linux/swap.h>
34
35 static int
36 i915_gem_object_set_domain(struct drm_gem_object *obj,
37                             uint32_t read_domains,
38                             uint32_t write_domain);
39 static int
40 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
41                                  uint64_t offset,
42                                  uint64_t size,
43                                  uint32_t read_domains,
44                                  uint32_t write_domain);
45 int
46 i915_gem_set_domain(struct drm_gem_object *obj,
47                     struct drm_file *file_priv,
48                     uint32_t read_domains,
49                     uint32_t write_domain);
50 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
51 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
52 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
53
54 int
55 i915_gem_init_ioctl(struct drm_device *dev, void *data,
56                     struct drm_file *file_priv)
57 {
58         drm_i915_private_t *dev_priv = dev->dev_private;
59         struct drm_i915_gem_init *args = data;
60
61         mutex_lock(&dev->struct_mutex);
62
63         if (args->gtt_start >= args->gtt_end ||
64             (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
65             (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
66                 mutex_unlock(&dev->struct_mutex);
67                 return -EINVAL;
68         }
69
70         drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
71             args->gtt_end - args->gtt_start);
72
73         dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
74
75         mutex_unlock(&dev->struct_mutex);
76
77         return 0;
78 }
79
80
81 /**
82  * Creates a new mm object and returns a handle to it.
83  */
84 int
85 i915_gem_create_ioctl(struct drm_device *dev, void *data,
86                       struct drm_file *file_priv)
87 {
88         struct drm_i915_gem_create *args = data;
89         struct drm_gem_object *obj;
90         int handle, ret;
91
92         args->size = roundup(args->size, PAGE_SIZE);
93
94         /* Allocate the new object */
95         obj = drm_gem_object_alloc(dev, args->size);
96         if (obj == NULL)
97                 return -ENOMEM;
98
99         ret = drm_gem_handle_create(file_priv, obj, &handle);
100         mutex_lock(&dev->struct_mutex);
101         drm_gem_object_handle_unreference(obj);
102         mutex_unlock(&dev->struct_mutex);
103
104         if (ret)
105                 return ret;
106
107         args->handle = handle;
108
109         return 0;
110 }
111
112 /**
113  * Reads data from the object referenced by handle.
114  *
115  * On error, the contents of *data are undefined.
116  */
117 int
118 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
119                      struct drm_file *file_priv)
120 {
121         struct drm_i915_gem_pread *args = data;
122         struct drm_gem_object *obj;
123         struct drm_i915_gem_object *obj_priv;
124         ssize_t read;
125         loff_t offset;
126         int ret;
127
128         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
129         if (obj == NULL)
130                 return -EBADF;
131         obj_priv = obj->driver_private;
132
133         /* Bounds check source.
134          *
135          * XXX: This could use review for overflow issues...
136          */
137         if (args->offset > obj->size || args->size > obj->size ||
138             args->offset + args->size > obj->size) {
139                 drm_gem_object_unreference(obj);
140                 return -EINVAL;
141         }
142
143         mutex_lock(&dev->struct_mutex);
144
145         ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
146                                                I915_GEM_DOMAIN_CPU, 0);
147         if (ret != 0) {
148                 drm_gem_object_unreference(obj);
149                 mutex_unlock(&dev->struct_mutex);
150         }
151
152         offset = args->offset;
153
154         read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
155                         args->size, &offset);
156         if (read != args->size) {
157                 drm_gem_object_unreference(obj);
158                 mutex_unlock(&dev->struct_mutex);
159                 if (read < 0)
160                         return read;
161                 else
162                         return -EINVAL;
163         }
164
165         drm_gem_object_unreference(obj);
166         mutex_unlock(&dev->struct_mutex);
167
168         return 0;
169 }
170
171 #include "drm_compat.h"
172
173 static int
174 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
175                     struct drm_i915_gem_pwrite *args,
176                     struct drm_file *file_priv)
177 {
178         struct drm_i915_gem_object *obj_priv = obj->driver_private;
179         ssize_t remain;
180         loff_t offset;
181         char __user *user_data;
182         char *vaddr;
183         int i, o, l;
184         int ret = 0;
185         unsigned long pfn;
186         unsigned long unwritten;
187
188         user_data = (char __user *) (uintptr_t) args->data_ptr;
189         remain = args->size;
190         if (!access_ok(VERIFY_READ, user_data, remain))
191                 return -EFAULT;
192
193
194         mutex_lock(&dev->struct_mutex);
195         ret = i915_gem_object_pin(obj, 0);
196         if (ret) {
197                 mutex_unlock(&dev->struct_mutex);
198                 return ret;
199         }
200         ret = i915_gem_set_domain(obj, file_priv,
201                                   I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
202         if (ret)
203                 goto fail;
204
205         obj_priv = obj->driver_private;
206         offset = obj_priv->gtt_offset + args->offset;
207         obj_priv->dirty = 1;
208
209         while (remain > 0) {
210                 /* Operation in this page
211                  *
212                  * i = page number
213                  * o = offset within page
214                  * l = bytes to copy
215                  */
216                 i = offset >> PAGE_SHIFT;
217                 o = offset & (PAGE_SIZE-1);
218                 l = remain;
219                 if ((o + l) > PAGE_SIZE)
220                         l = PAGE_SIZE - o;
221
222                 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
223
224 #ifdef DRM_KMAP_ATOMIC_PROT_PFN
225                 /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
226                  */
227                 vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
228                                              __pgprot(__PAGE_KERNEL));
229 #if WATCH_PWRITE
230                 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
231                          i, o, l, pfn, vaddr);
232 #endif
233                 unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
234                                                               user_data, l);
235                 kunmap_atomic(vaddr, KM_USER0);
236
237                 if (unwritten)
238 #endif
239                 {
240                         vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
241 #if WATCH_PWRITE
242                         DRM_INFO("pwrite slow i %d o %d l %d "
243                                  "pfn %ld vaddr %p\n",
244                                  i, o, l, pfn, vaddr);
245 #endif
246                         if (vaddr == NULL) {
247                                 ret = -EFAULT;
248                                 goto fail;
249                         }
250                         unwritten = __copy_from_user(vaddr + o, user_data, l);
251 #if WATCH_PWRITE
252                         DRM_INFO("unwritten %ld\n", unwritten);
253 #endif
254                         iounmap(vaddr);
255                         if (unwritten) {
256                                 ret = -EFAULT;
257                                 goto fail;
258                         }
259                 }
260
261                 remain -= l;
262                 user_data += l;
263                 offset += l;
264         }
265 #if WATCH_PWRITE && 1
266         i915_gem_clflush_object(obj);
267         i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
268         i915_gem_clflush_object(obj);
269 #endif
270
271 fail:
272         i915_gem_object_unpin(obj);
273         mutex_unlock(&dev->struct_mutex);
274
275         return ret;
276 }
277
278 int
279 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
280                       struct drm_i915_gem_pwrite *args,
281                       struct drm_file *file_priv)
282 {
283         int ret;
284         loff_t offset;
285         ssize_t written;
286
287         mutex_lock(&dev->struct_mutex);
288
289         ret = i915_gem_set_domain(obj, file_priv,
290                                   I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
291         if (ret) {
292                 mutex_unlock(&dev->struct_mutex);
293                 return ret;
294         }
295
296         offset = args->offset;
297
298         written = vfs_write(obj->filp,
299                             (char __user *)(uintptr_t) args->data_ptr,
300                             args->size, &offset);
301         if (written != args->size) {
302                 mutex_unlock(&dev->struct_mutex);
303                 if (written < 0)
304                         return written;
305                 else
306                         return -EINVAL;
307         }
308
309         mutex_unlock(&dev->struct_mutex);
310
311         return 0;
312 }
313
314 /**
315  * Writes data to the object referenced by handle.
316  *
317  * On error, the contents of the buffer that were to be modified are undefined.
318  */
319 int
320 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
321                       struct drm_file *file_priv)
322 {
323         struct drm_i915_gem_pwrite *args = data;
324         struct drm_gem_object *obj;
325         struct drm_i915_gem_object *obj_priv;
326         int ret = 0;
327
328         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
329         if (obj == NULL)
330                 return -EBADF;
331         obj_priv = obj->driver_private;
332
333         /* Bounds check destination.
334          *
335          * XXX: This could use review for overflow issues...
336          */
337         if (args->offset > obj->size || args->size > obj->size ||
338             args->offset + args->size > obj->size) {
339                 drm_gem_object_unreference(obj);
340                 return -EINVAL;
341         }
342
343         /* We can only do the GTT pwrite on untiled buffers, as otherwise
344          * it would end up going through the fenced access, and we'll get
345          * different detiling behavior between reading and writing.
346          * pread/pwrite currently are reading and writing from the CPU
347          * perspective, requiring manual detiling by the client.
348          */
349         if (obj_priv->tiling_mode == I915_TILING_NONE &&
350             dev->gtt_total != 0)
351                 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
352         else
353                 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
354
355 #if WATCH_PWRITE
356         if (ret)
357                 DRM_INFO("pwrite failed %d\n", ret);
358 #endif
359
360         drm_gem_object_unreference(obj);
361
362         return ret;
363 }
364
365 /**
366  * Called when user space prepares to use an object
367  */
368 int
369 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
370                           struct drm_file *file_priv)
371 {
372         struct drm_i915_gem_set_domain *args = data;
373         struct drm_gem_object *obj;
374         int ret;
375
376         if (!(dev->driver->driver_features & DRIVER_GEM))
377                 return -ENODEV;
378
379         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
380         if (obj == NULL)
381                 return -EBADF;
382
383         mutex_lock(&dev->struct_mutex);
384         ret = i915_gem_set_domain(obj, file_priv,
385                                   args->read_domains, args->write_domain);
386         drm_gem_object_unreference(obj);
387         mutex_unlock(&dev->struct_mutex);
388         return ret;
389 }
390
391 /**
392  * Called when user space has done writes to this buffer
393  */
394 int
395 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
396                       struct drm_file *file_priv)
397 {
398         struct drm_i915_gem_sw_finish *args = data;
399         struct drm_gem_object *obj;
400         struct drm_i915_gem_object *obj_priv;
401         int ret = 0;
402
403         if (!(dev->driver->driver_features & DRIVER_GEM))
404                 return -ENODEV;
405
406         mutex_lock(&dev->struct_mutex);
407         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
408         if (obj == NULL) {
409                 mutex_unlock(&dev->struct_mutex);
410                 return -EBADF;
411         }
412
413 #if WATCH_BUF
414         DRM_INFO("%s: sw_finish %d (%p)\n",
415                  __func__, args->handle, obj);
416 #endif
417         obj_priv = obj->driver_private;
418
419         /* Pinned buffers may be scanout, so flush the cache */
420         if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
421                 i915_gem_clflush_object(obj);
422                 drm_agp_chipset_flush(dev);
423         }
424         drm_gem_object_unreference(obj);
425         mutex_unlock(&dev->struct_mutex);
426         return ret;
427 }
428
429 /**
430  * Maps the contents of an object, returning the address it is mapped
431  * into.
432  *
433  * While the mapping holds a reference on the contents of the object, it doesn't
434  * imply a ref on the object itself.
435  */
436 int
437 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
438                    struct drm_file *file_priv)
439 {
440         struct drm_i915_gem_mmap *args = data;
441         struct drm_gem_object *obj;
442         loff_t offset;
443         unsigned long addr;
444
445         if (!(dev->driver->driver_features & DRIVER_GEM))
446                 return -ENODEV;
447
448         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
449         if (obj == NULL)
450                 return -EBADF;
451
452         offset = args->offset;
453
454         down_write(&current->mm->mmap_sem);
455         addr = do_mmap(obj->filp, 0, args->size,
456                        PROT_READ | PROT_WRITE, MAP_SHARED,
457                        args->offset);
458         up_write(&current->mm->mmap_sem);
459         mutex_lock(&dev->struct_mutex);
460         drm_gem_object_unreference(obj);
461         mutex_unlock(&dev->struct_mutex);
462         if (IS_ERR((void *)addr))
463                 return addr;
464
465         args->addr_ptr = (uint64_t) addr;
466
467         return 0;
468 }
469
470 static void
471 i915_gem_object_free_page_list(struct drm_gem_object *obj)
472 {
473         struct drm_i915_gem_object *obj_priv = obj->driver_private;
474         int page_count = obj->size / PAGE_SIZE;
475         int i;
476
477         if (obj_priv->page_list == NULL)
478                 return;
479
480
481         for (i = 0; i < page_count; i++)
482                 if (obj_priv->page_list[i] != NULL) {
483                         if (obj_priv->dirty)
484                                 set_page_dirty(obj_priv->page_list[i]);
485                         mark_page_accessed(obj_priv->page_list[i]);
486                         page_cache_release(obj_priv->page_list[i]);
487                 }
488         obj_priv->dirty = 0;
489
490         drm_free(obj_priv->page_list,
491                  page_count * sizeof(struct page *),
492                  DRM_MEM_DRIVER);
493         obj_priv->page_list = NULL;
494 }
495
496 static void
497 i915_gem_object_move_to_active(struct drm_gem_object *obj)
498 {
499         struct drm_device *dev = obj->dev;
500         drm_i915_private_t *dev_priv = dev->dev_private;
501         struct drm_i915_gem_object *obj_priv = obj->driver_private;
502
503         /* Add a reference if we're newly entering the active list. */
504         if (!obj_priv->active) {
505                 drm_gem_object_reference(obj);
506                 obj_priv->active = 1;
507         }
508         /* Move from whatever list we were on to the tail of execution. */
509         list_move_tail(&obj_priv->list,
510                        &dev_priv->mm.active_list);
511 }
512
513
514 static void
515 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
516 {
517         struct drm_device *dev = obj->dev;
518         drm_i915_private_t *dev_priv = dev->dev_private;
519         struct drm_i915_gem_object *obj_priv = obj->driver_private;
520
521         i915_verify_inactive(dev, __FILE__, __LINE__);
522         if (obj_priv->pin_count != 0)
523                 list_del_init(&obj_priv->list);
524         else
525                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
526
527         if (obj_priv->active) {
528                 obj_priv->active = 0;
529                 drm_gem_object_unreference(obj);
530         }
531         i915_verify_inactive(dev, __FILE__, __LINE__);
532 }
533
534 /**
535  * Creates a new sequence number, emitting a write of it to the status page
536  * plus an interrupt, which will trigger i915_user_interrupt_handler.
537  *
538  * Must be called with struct_lock held.
539  *
540  * Returned sequence numbers are nonzero on success.
541  */
542 static uint32_t
543 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
544 {
545         drm_i915_private_t *dev_priv = dev->dev_private;
546         struct drm_i915_gem_request *request;
547         uint32_t seqno;
548         int was_empty;
549         RING_LOCALS;
550
551         request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
552         if (request == NULL)
553                 return 0;
554
555         /* Grab the seqno we're going to make this request be, and bump the
556          * next (skipping 0 so it can be the reserved no-seqno value).
557          */
558         seqno = dev_priv->mm.next_gem_seqno;
559         dev_priv->mm.next_gem_seqno++;
560         if (dev_priv->mm.next_gem_seqno == 0)
561                 dev_priv->mm.next_gem_seqno++;
562
563         BEGIN_LP_RING(4);
564         OUT_RING(CMD_STORE_DWORD_IDX);
565         OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
566         OUT_RING(seqno);
567
568         OUT_RING(GFX_OP_USER_INTERRUPT);
569         ADVANCE_LP_RING();
570
571         DRM_DEBUG("%d\n", seqno);
572
573         request->seqno = seqno;
574         request->emitted_jiffies = jiffies;
575         request->flush_domains = flush_domains;
576         was_empty = list_empty(&dev_priv->mm.request_list);
577         list_add_tail(&request->list, &dev_priv->mm.request_list);
578
579         if (was_empty)
580                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
581         return seqno;
582 }
583
584 /**
585  * Command execution barrier
586  *
587  * Ensures that all commands in the ring are finished
588  * before signalling the CPU
589  */
590 uint32_t
591 i915_retire_commands(struct drm_device *dev)
592 {
593         drm_i915_private_t *dev_priv = dev->dev_private;
594         uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
595         uint32_t flush_domains = 0;
596         RING_LOCALS;
597
598         /* The sampler always gets flushed on i965 (sigh) */
599         if (IS_I965G(dev))
600                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
601         BEGIN_LP_RING(2);
602         OUT_RING(cmd);
603         OUT_RING(0); /* noop */
604         ADVANCE_LP_RING();
605         return flush_domains;
606 }
607
608 /**
609  * Moves buffers associated only with the given active seqno from the active
610  * to inactive list, potentially freeing them.
611  */
612 static void
613 i915_gem_retire_request(struct drm_device *dev,
614                         struct drm_i915_gem_request *request)
615 {
616         drm_i915_private_t *dev_priv = dev->dev_private;
617
618         if (request->flush_domains != 0) {
619                 struct drm_i915_gem_object *obj_priv, *next;
620
621                 /* First clear any buffers that were only waiting for a flush
622                  * matching the one just retired.
623                  */
624
625                 list_for_each_entry_safe(obj_priv, next,
626                                          &dev_priv->mm.flushing_list, list) {
627                         struct drm_gem_object *obj = obj_priv->obj;
628
629                         if (obj->write_domain & request->flush_domains) {
630                                 obj->write_domain = 0;
631                                 i915_gem_object_move_to_inactive(obj);
632                         }
633                 }
634
635         }
636
637         /* Move any buffers on the active list that are no longer referenced
638          * by the ringbuffer to the flushing/inactive lists as appropriate.
639          */
640         while (!list_empty(&dev_priv->mm.active_list)) {
641                 struct drm_gem_object *obj;
642                 struct drm_i915_gem_object *obj_priv;
643
644                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
645                                             struct drm_i915_gem_object,
646                                             list);
647                 obj = obj_priv->obj;
648
649                 /* If the seqno being retired doesn't match the oldest in the
650                  * list, then the oldest in the list must still be newer than
651                  * this seqno.
652                  */
653                 if (obj_priv->last_rendering_seqno != request->seqno)
654                         return;
655 #if WATCH_LRU
656                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
657                          __func__, request->seqno, obj);
658 #endif
659
660                 if (obj->write_domain != 0) {
661                         list_move_tail(&obj_priv->list,
662                                        &dev_priv->mm.flushing_list);
663                 } else {
664                         i915_gem_object_move_to_inactive(obj);
665                 }
666         }
667 }
668
669 /**
670  * Returns true if seq1 is later than seq2.
671  */
672 static int
673 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
674 {
675         return (int32_t)(seq1 - seq2) >= 0;
676 }
677
678 uint32_t
679 i915_get_gem_seqno(struct drm_device *dev)
680 {
681         drm_i915_private_t *dev_priv = dev->dev_private;
682
683         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
684 }
685
686 /**
687  * This function clears the request list as sequence numbers are passed.
688  */
689 void
690 i915_gem_retire_requests(struct drm_device *dev)
691 {
692         drm_i915_private_t *dev_priv = dev->dev_private;
693         uint32_t seqno;
694
695         seqno = i915_get_gem_seqno(dev);
696
697         while (!list_empty(&dev_priv->mm.request_list)) {
698                 struct drm_i915_gem_request *request;
699                 uint32_t retiring_seqno;
700
701                 request = list_first_entry(&dev_priv->mm.request_list,
702                                            struct drm_i915_gem_request,
703                                            list);
704                 retiring_seqno = request->seqno;
705
706                 if (i915_seqno_passed(seqno, retiring_seqno) ||
707                     dev_priv->mm.wedged) {
708                         i915_gem_retire_request(dev, request);
709
710                         list_del(&request->list);
711                         drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
712                 } else
713                         break;
714         }
715 }
716
717 void
718 i915_gem_retire_work_handler(struct work_struct *work)
719 {
720         drm_i915_private_t *dev_priv;
721         struct drm_device *dev;
722
723         dev_priv = container_of(work, drm_i915_private_t,
724                                 mm.retire_work.work);
725         dev = dev_priv->dev;
726
727         mutex_lock(&dev->struct_mutex);
728         i915_gem_retire_requests(dev);
729         if (!list_empty(&dev_priv->mm.request_list))
730                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
731         mutex_unlock(&dev->struct_mutex);
732 }
733
734 /**
735  * Waits for a sequence number to be signaled, and cleans up the
736  * request and object lists appropriately for that event.
737  */
738 int
739 i915_wait_request(struct drm_device *dev, uint32_t seqno)
740 {
741         drm_i915_private_t *dev_priv = dev->dev_private;
742         int ret = 0;
743
744         BUG_ON(seqno == 0);
745
746         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
747                 dev_priv->mm.waiting_gem_seqno = seqno;
748                 i915_user_irq_on(dev_priv);
749                 ret = wait_event_interruptible(dev_priv->irq_queue,
750                                                i915_seqno_passed(i915_get_gem_seqno(dev),
751                                                                  seqno) ||
752                                                dev_priv->mm.wedged);
753                 i915_user_irq_off(dev_priv);
754                 dev_priv->mm.waiting_gem_seqno = 0;
755         }
756         if (dev_priv->mm.wedged)
757                 ret = -EIO;
758
759         if (ret)
760                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
761                           __func__, ret, seqno, i915_get_gem_seqno(dev));
762
763         /* Directly dispatch request retiring.  While we have the work queue
764          * to handle this, the waiter on a request often wants an associated
765          * buffer to have made it to the inactive list, and we would need
766          * a separate wait queue to handle that.
767          */
768         if (ret == 0)
769                 i915_gem_retire_requests(dev);
770
771         return ret;
772 }
773
774 static void
775 i915_gem_flush(struct drm_device *dev,
776                uint32_t invalidate_domains,
777                uint32_t flush_domains)
778 {
779         drm_i915_private_t *dev_priv = dev->dev_private;
780         uint32_t cmd;
781         RING_LOCALS;
782
783 #if WATCH_EXEC
784         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
785                   invalidate_domains, flush_domains);
786 #endif
787
788         if (flush_domains & I915_GEM_DOMAIN_CPU)
789                 drm_agp_chipset_flush(dev);
790
791         if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
792                                                      I915_GEM_DOMAIN_GTT)) {
793                 /*
794                  * read/write caches:
795                  *
796                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
797                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
798                  * also flushed at 2d versus 3d pipeline switches.
799                  *
800                  * read-only caches:
801                  *
802                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
803                  * MI_READ_FLUSH is set, and is always flushed on 965.
804                  *
805                  * I915_GEM_DOMAIN_COMMAND may not exist?
806                  *
807                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
808                  * invalidated when MI_EXE_FLUSH is set.
809                  *
810                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
811                  * invalidated with every MI_FLUSH.
812                  *
813                  * TLBs:
814                  *
815                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
816                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
817                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
818                  * are flushed at any MI_FLUSH.
819                  */
820
821                 cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
822                 if ((invalidate_domains|flush_domains) &
823                     I915_GEM_DOMAIN_RENDER)
824                         cmd &= ~MI_NO_WRITE_FLUSH;
825                 if (!IS_I965G(dev)) {
826                         /*
827                          * On the 965, the sampler cache always gets flushed
828                          * and this bit is reserved.
829                          */
830                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
831                                 cmd |= MI_READ_FLUSH;
832                 }
833                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
834                         cmd |= MI_EXE_FLUSH;
835
836 #if WATCH_EXEC
837                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
838 #endif
839                 BEGIN_LP_RING(2);
840                 OUT_RING(cmd);
841                 OUT_RING(0); /* noop */
842                 ADVANCE_LP_RING();
843         }
844 }
845
846 /**
847  * Ensures that all rendering to the object has completed and the object is
848  * safe to unbind from the GTT or access from the CPU.
849  */
850 static int
851 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
852 {
853         struct drm_device *dev = obj->dev;
854         struct drm_i915_gem_object *obj_priv = obj->driver_private;
855         int ret;
856
857         /* If there are writes queued to the buffer, flush and
858          * create a new seqno to wait for.
859          */
860         if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
861                 uint32_t write_domain = obj->write_domain;
862 #if WATCH_BUF
863                 DRM_INFO("%s: flushing object %p from write domain %08x\n",
864                           __func__, obj, write_domain);
865 #endif
866                 i915_gem_flush(dev, 0, write_domain);
867                 obj->write_domain = 0;
868
869                 i915_gem_object_move_to_active(obj);
870                 obj_priv->last_rendering_seqno = i915_add_request(dev,
871                                                                   write_domain);
872                 BUG_ON(obj_priv->last_rendering_seqno == 0);
873 #if WATCH_LRU
874                 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
875 #endif
876         }
877         /* If there is rendering queued on the buffer being evicted, wait for
878          * it.
879          */
880         if (obj_priv->active) {
881 #if WATCH_BUF
882                 DRM_INFO("%s: object %p wait for seqno %08x\n",
883                           __func__, obj, obj_priv->last_rendering_seqno);
884 #endif
885                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
886                 if (ret != 0)
887                         return ret;
888         }
889
890         return 0;
891 }
892
893 /**
894  * Unbinds an object from the GTT aperture.
895  */
896 static int
897 i915_gem_object_unbind(struct drm_gem_object *obj)
898 {
899         struct drm_device *dev = obj->dev;
900         struct drm_i915_gem_object *obj_priv = obj->driver_private;
901         int ret = 0;
902
903 #if WATCH_BUF
904         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
905         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
906 #endif
907         if (obj_priv->gtt_space == NULL)
908                 return 0;
909
910         if (obj_priv->pin_count != 0) {
911                 DRM_ERROR("Attempting to unbind pinned buffer\n");
912                 return -EINVAL;
913         }
914
915         /* Wait for any rendering to complete
916          */
917         ret = i915_gem_object_wait_rendering(obj);
918         if (ret) {
919                 DRM_ERROR("wait_rendering failed: %d\n", ret);
920                 return ret;
921         }
922
923         /* Move the object to the CPU domain to ensure that
924          * any possible CPU writes while it's not in the GTT
925          * are flushed when we go to remap it. This will
926          * also ensure that all pending GPU writes are finished
927          * before we unbind.
928          */
929         ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
930                                          I915_GEM_DOMAIN_CPU);
931         if (ret) {
932                 DRM_ERROR("set_domain failed: %d\n", ret);
933                 return ret;
934         }
935
936         if (obj_priv->agp_mem != NULL) {
937                 drm_unbind_agp(obj_priv->agp_mem);
938                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
939                 obj_priv->agp_mem = NULL;
940         }
941
942         BUG_ON(obj_priv->active);
943
944         i915_gem_object_free_page_list(obj);
945
946         if (obj_priv->gtt_space) {
947                 atomic_dec(&dev->gtt_count);
948                 atomic_sub(obj->size, &dev->gtt_memory);
949
950                 drm_memrange_put_block(obj_priv->gtt_space);
951                 obj_priv->gtt_space = NULL;
952         }
953
954         /* Remove ourselves from the LRU list if present. */
955         if (!list_empty(&obj_priv->list))
956                 list_del_init(&obj_priv->list);
957
958         return 0;
959 }
960
961 static int
962 i915_gem_evict_something(struct drm_device *dev)
963 {
964         drm_i915_private_t *dev_priv = dev->dev_private;
965         struct drm_gem_object *obj;
966         struct drm_i915_gem_object *obj_priv;
967         int ret = 0;
968
969         for (;;) {
970                 /* If there's an inactive buffer available now, grab it
971                  * and be done.
972                  */
973                 if (!list_empty(&dev_priv->mm.inactive_list)) {
974                         obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
975                                                     struct drm_i915_gem_object,
976                                                     list);
977                         obj = obj_priv->obj;
978                         BUG_ON(obj_priv->pin_count != 0);
979 #if WATCH_LRU
980                         DRM_INFO("%s: evicting %p\n", __func__, obj);
981 #endif
982                         BUG_ON(obj_priv->active);
983
984                         /* Wait on the rendering and unbind the buffer. */
985                         ret = i915_gem_object_unbind(obj);
986                         break;
987                 }
988
989                 /* If we didn't get anything, but the ring is still processing
990                  * things, wait for one of those things to finish and hopefully
991                  * leave us a buffer to evict.
992                  */
993                 if (!list_empty(&dev_priv->mm.request_list)) {
994                         struct drm_i915_gem_request *request;
995
996                         request = list_first_entry(&dev_priv->mm.request_list,
997                                                    struct drm_i915_gem_request,
998                                                    list);
999
1000                         ret = i915_wait_request(dev, request->seqno);
1001                         if (ret)
1002                                 break;
1003
1004                         /* if waiting caused an object to become inactive,
1005                          * then loop around and wait for it. Otherwise, we
1006                          * assume that waiting freed and unbound something,
1007                          * so there should now be some space in the GTT
1008                          */
1009                         if (!list_empty(&dev_priv->mm.inactive_list))
1010                                 continue;
1011                         break;
1012                 }
1013
1014                 /* If we didn't have anything on the request list but there
1015                  * are buffers awaiting a flush, emit one and try again.
1016                  * When we wait on it, those buffers waiting for that flush
1017                  * will get moved to inactive.
1018                  */
1019                 if (!list_empty(&dev_priv->mm.flushing_list)) {
1020                         obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1021                                                     struct drm_i915_gem_object,
1022                                                     list);
1023                         obj = obj_priv->obj;
1024
1025                         i915_gem_flush(dev,
1026                                        obj->write_domain,
1027                                        obj->write_domain);
1028                         i915_add_request(dev, obj->write_domain);
1029
1030                         obj = NULL;
1031                         continue;
1032                 }
1033
1034                 DRM_ERROR("inactive empty %d request empty %d "
1035                           "flushing empty %d\n",
1036                           list_empty(&dev_priv->mm.inactive_list),
1037                           list_empty(&dev_priv->mm.request_list),
1038                           list_empty(&dev_priv->mm.flushing_list));
1039                 /* If we didn't do any of the above, there's nothing to be done
1040                  * and we just can't fit it in.
1041                  */
1042                 return -ENOMEM;
1043         }
1044         return ret;
1045 }
1046
1047 static int
1048 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1049 {
1050         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1051         int page_count, i;
1052         struct address_space *mapping;
1053         struct inode *inode;
1054         struct page *page;
1055         int ret;
1056
1057         if (obj_priv->page_list)
1058                 return 0;
1059
1060         /* Get the list of pages out of our struct file.  They'll be pinned
1061          * at this point until we release them.
1062          */
1063         page_count = obj->size / PAGE_SIZE;
1064         BUG_ON(obj_priv->page_list != NULL);
1065         obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1066                                          DRM_MEM_DRIVER);
1067         if (obj_priv->page_list == NULL) {
1068                 DRM_ERROR("Faled to allocate page list\n");
1069                 return -ENOMEM;
1070         }
1071
1072         inode = obj->filp->f_path.dentry->d_inode;
1073         mapping = inode->i_mapping;
1074         for (i = 0; i < page_count; i++) {
1075                 page = find_get_page(mapping, i);
1076                 if (page == NULL || !PageUptodate(page)) {
1077                         if (page) {
1078                                 page_cache_release(page);
1079                                 page = NULL;
1080                         }
1081                         ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
1082
1083                         if (ret) {
1084                                 DRM_ERROR("shmem_getpage failed: %d\n", ret);
1085                                 i915_gem_object_free_page_list(obj);
1086                                 return ret;
1087                         }
1088                         unlock_page(page);
1089                 }
1090                 obj_priv->page_list[i] = page;
1091         }
1092         return 0;
1093 }
1094
1095 /**
1096  * Finds free space in the GTT aperture and binds the object there.
1097  */
1098 static int
1099 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1100 {
1101         struct drm_device *dev = obj->dev;
1102         drm_i915_private_t *dev_priv = dev->dev_private;
1103         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1104         struct drm_memrange_node *free_space;
1105         int page_count, ret;
1106
1107         if (alignment == 0)
1108                 alignment = PAGE_SIZE;
1109         if (alignment & (PAGE_SIZE - 1)) {
1110                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1111                 return -EINVAL;
1112         }
1113
1114  search_free:
1115         free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
1116                                               obj->size,
1117                                               alignment, 0);
1118         if (free_space != NULL) {
1119                 obj_priv->gtt_space =
1120                         drm_memrange_get_block(free_space, obj->size,
1121                                                alignment);
1122                 if (obj_priv->gtt_space != NULL) {
1123                         obj_priv->gtt_space->private = obj;
1124                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
1125                 }
1126         }
1127         if (obj_priv->gtt_space == NULL) {
1128                 /* If the gtt is empty and we're still having trouble
1129                  * fitting our object in, we're out of memory.
1130                  */
1131 #if WATCH_LRU
1132                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1133 #endif
1134                 if (list_empty(&dev_priv->mm.inactive_list) &&
1135                     list_empty(&dev_priv->mm.flushing_list) &&
1136                     list_empty(&dev_priv->mm.active_list)) {
1137                         DRM_ERROR("GTT full, but LRU list empty\n");
1138                         return -ENOMEM;
1139                 }
1140
1141                 ret = i915_gem_evict_something(dev);
1142                 if (ret != 0) {
1143                         DRM_ERROR("Failed to evict a buffer %d\n", ret);
1144                         return ret;
1145                 }
1146                 goto search_free;
1147         }
1148
1149 #if WATCH_BUF
1150         DRM_INFO("Binding object of size %d at 0x%08x\n",
1151                  obj->size, obj_priv->gtt_offset);
1152 #endif
1153         ret = i915_gem_object_get_page_list(obj);
1154         if (ret) {
1155                 drm_memrange_put_block(obj_priv->gtt_space);
1156                 obj_priv->gtt_space = NULL;
1157                 return ret;
1158         }
1159
1160         page_count = obj->size / PAGE_SIZE;
1161         /* Create an AGP memory structure pointing at our pages, and bind it
1162          * into the GTT.
1163          */
1164         obj_priv->agp_mem = drm_agp_bind_pages(dev,
1165                                                obj_priv->page_list,
1166                                                page_count,
1167                                                obj_priv->gtt_offset);
1168         if (obj_priv->agp_mem == NULL) {
1169                 i915_gem_object_free_page_list(obj);
1170                 drm_memrange_put_block(obj_priv->gtt_space);
1171                 obj_priv->gtt_space = NULL;
1172                 return -ENOMEM;
1173         }
1174         atomic_inc(&dev->gtt_count);
1175         atomic_add(obj->size, &dev->gtt_memory);
1176
1177         /* Assert that the object is not currently in any GPU domain. As it
1178          * wasn't in the GTT, there shouldn't be any way it could have been in
1179          * a GPU cache
1180          */
1181         BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1182         BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1183
1184         return 0;
1185 }
1186
1187 void
1188 i915_gem_clflush_object(struct drm_gem_object *obj)
1189 {
1190         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1191
1192         /* If we don't have a page list set up, then we're not pinned
1193          * to GPU, and we can ignore the cache flush because it'll happen
1194          * again at bind time.
1195          */
1196         if (obj_priv->page_list == NULL)
1197                 return;
1198
1199         drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
1200 }
1201
1202 /*
1203  * Set the next domain for the specified object. This
1204  * may not actually perform the necessary flushing/invaliding though,
1205  * as that may want to be batched with other set_domain operations
1206  *
1207  * This is (we hope) the only really tricky part of gem. The goal
1208  * is fairly simple -- track which caches hold bits of the object
1209  * and make sure they remain coherent. A few concrete examples may
1210  * help to explain how it works. For shorthand, we use the notation
1211  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1212  * a pair of read and write domain masks.
1213  *
1214  * Case 1: the batch buffer
1215  *
1216  *      1. Allocated
1217  *      2. Written by CPU
1218  *      3. Mapped to GTT
1219  *      4. Read by GPU
1220  *      5. Unmapped from GTT
1221  *      6. Freed
1222  *
1223  *      Let's take these a step at a time
1224  *
1225  *      1. Allocated
1226  *              Pages allocated from the kernel may still have
1227  *              cache contents, so we set them to (CPU, CPU) always.
1228  *      2. Written by CPU (using pwrite)
1229  *              The pwrite function calls set_domain (CPU, CPU) and
1230  *              this function does nothing (as nothing changes)
1231  *      3. Mapped by GTT
1232  *              This function asserts that the object is not
1233  *              currently in any GPU-based read or write domains
1234  *      4. Read by GPU
1235  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
1236  *              As write_domain is zero, this function adds in the
1237  *              current read domains (CPU+COMMAND, 0).
1238  *              flush_domains is set to CPU.
1239  *              invalidate_domains is set to COMMAND
1240  *              clflush is run to get data out of the CPU caches
1241  *              then i915_dev_set_domain calls i915_gem_flush to
1242  *              emit an MI_FLUSH and drm_agp_chipset_flush
1243  *      5. Unmapped from GTT
1244  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
1245  *              flush_domains and invalidate_domains end up both zero
1246  *              so no flushing/invalidating happens
1247  *      6. Freed
1248  *              yay, done
1249  *
1250  * Case 2: The shared render buffer
1251  *
1252  *      1. Allocated
1253  *      2. Mapped to GTT
1254  *      3. Read/written by GPU
1255  *      4. set_domain to (CPU,CPU)
1256  *      5. Read/written by CPU
1257  *      6. Read/written by GPU
1258  *
1259  *      1. Allocated
1260  *              Same as last example, (CPU, CPU)
1261  *      2. Mapped to GTT
1262  *              Nothing changes (assertions find that it is not in the GPU)
1263  *      3. Read/written by GPU
1264  *              execbuffer calls set_domain (RENDER, RENDER)
1265  *              flush_domains gets CPU
1266  *              invalidate_domains gets GPU
1267  *              clflush (obj)
1268  *              MI_FLUSH and drm_agp_chipset_flush
1269  *      4. set_domain (CPU, CPU)
1270  *              flush_domains gets GPU
1271  *              invalidate_domains gets CPU
1272  *              wait_rendering (obj) to make sure all drawing is complete.
1273  *              This will include an MI_FLUSH to get the data from GPU
1274  *              to memory
1275  *              clflush (obj) to invalidate the CPU cache
1276  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1277  *      5. Read/written by CPU
1278  *              cache lines are loaded and dirtied
1279  *      6. Read written by GPU
1280  *              Same as last GPU access
1281  *
1282  * Case 3: The constant buffer
1283  *
1284  *      1. Allocated
1285  *      2. Written by CPU
1286  *      3. Read by GPU
1287  *      4. Updated (written) by CPU again
1288  *      5. Read by GPU
1289  *
1290  *      1. Allocated
1291  *              (CPU, CPU)
1292  *      2. Written by CPU
1293  *              (CPU, CPU)
1294  *      3. Read by GPU
1295  *              (CPU+RENDER, 0)
1296  *              flush_domains = CPU
1297  *              invalidate_domains = RENDER
1298  *              clflush (obj)
1299  *              MI_FLUSH
1300  *              drm_agp_chipset_flush
1301  *      4. Updated (written) by CPU again
1302  *              (CPU, CPU)
1303  *              flush_domains = 0 (no previous write domain)
1304  *              invalidate_domains = 0 (no new read domains)
1305  *      5. Read by GPU
1306  *              (CPU+RENDER, 0)
1307  *              flush_domains = CPU
1308  *              invalidate_domains = RENDER
1309  *              clflush (obj)
1310  *              MI_FLUSH
1311  *              drm_agp_chipset_flush
1312  */
1313 static int
1314 i915_gem_object_set_domain(struct drm_gem_object *obj,
1315                             uint32_t read_domains,
1316                             uint32_t write_domain)
1317 {
1318         struct drm_device               *dev = obj->dev;
1319         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1320         uint32_t                        invalidate_domains = 0;
1321         uint32_t                        flush_domains = 0;
1322         int                             ret;
1323
1324 #if WATCH_BUF
1325         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1326                  __func__, obj,
1327                  obj->read_domains, read_domains,
1328                  obj->write_domain, write_domain);
1329 #endif
1330         /*
1331          * If the object isn't moving to a new write domain,
1332          * let the object stay in multiple read domains
1333          */
1334         if (write_domain == 0)
1335                 read_domains |= obj->read_domains;
1336         else
1337                 obj_priv->dirty = 1;
1338
1339         /*
1340          * Flush the current write domain if
1341          * the new read domains don't match. Invalidate
1342          * any read domains which differ from the old
1343          * write domain
1344          */
1345         if (obj->write_domain && obj->write_domain != read_domains) {
1346                 flush_domains |= obj->write_domain;
1347                 invalidate_domains |= read_domains & ~obj->write_domain;
1348         }
1349         /*
1350          * Invalidate any read caches which may have
1351          * stale data. That is, any new read domains.
1352          */
1353         invalidate_domains |= read_domains & ~obj->read_domains;
1354         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1355 #if WATCH_BUF
1356                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1357                          __func__, flush_domains, invalidate_domains);
1358 #endif
1359                 /*
1360                  * If we're invaliding the CPU cache and flushing a GPU cache,
1361                  * then pause for rendering so that the GPU caches will be
1362                  * flushed before the cpu cache is invalidated
1363                  */
1364                 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1365                     (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1366                                        I915_GEM_DOMAIN_GTT))) {
1367                         ret = i915_gem_object_wait_rendering(obj);
1368                         if (ret)
1369                                 return ret;
1370                 }
1371                 i915_gem_clflush_object(obj);
1372         }
1373
1374         if ((write_domain | flush_domains) != 0)
1375                 obj->write_domain = write_domain;
1376
1377         /* If we're invalidating the CPU domain, clear the per-page CPU
1378          * domain list as well.
1379          */
1380         if (obj_priv->page_cpu_valid != NULL &&
1381             (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
1382             ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
1383                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1384         }
1385         obj->read_domains = read_domains;
1386
1387         dev->invalidate_domains |= invalidate_domains;
1388         dev->flush_domains |= flush_domains;
1389 #if WATCH_BUF
1390         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1391                  __func__,
1392                  obj->read_domains, obj->write_domain,
1393                  dev->invalidate_domains, dev->flush_domains);
1394 #endif
1395         return 0;
1396 }
1397
1398 /**
1399  * Set the read/write domain on a range of the object.
1400  *
1401  * Currently only implemented for CPU reads, otherwise drops to normal
1402  * i915_gem_object_set_domain().
1403  */
1404 static int
1405 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1406                                  uint64_t offset,
1407                                  uint64_t size,
1408                                  uint32_t read_domains,
1409                                  uint32_t write_domain)
1410 {
1411         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1412         int ret, i;
1413
1414         if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1415                 return 0;
1416
1417         if (read_domains != I915_GEM_DOMAIN_CPU ||
1418             write_domain != 0)
1419                 return i915_gem_object_set_domain(obj,
1420                                                   read_domains, write_domain);
1421
1422         /* Wait on any GPU rendering to the object to be flushed. */
1423         if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1424                 ret = i915_gem_object_wait_rendering(obj);
1425                 if (ret)
1426                         return ret;
1427         }
1428
1429         if (obj_priv->page_cpu_valid == NULL) {
1430                 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1431                                                       DRM_MEM_DRIVER);
1432         }
1433
1434         /* Flush the cache on any pages that are still invalid from the CPU's
1435          * perspective.
1436          */
1437         for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
1438                 if (obj_priv->page_cpu_valid[i])
1439                         continue;
1440
1441                 drm_ttm_cache_flush(obj_priv->page_list + i, 1);
1442
1443                 obj_priv->page_cpu_valid[i] = 1;
1444         }
1445
1446         return 0;
1447 }
1448
1449 /**
1450  * Once all of the objects have been set in the proper domain,
1451  * perform the necessary flush and invalidate operations.
1452  *
1453  * Returns the write domains flushed, for use in flush tracking.
1454  */
1455 static uint32_t
1456 i915_gem_dev_set_domain(struct drm_device *dev)
1457 {
1458         uint32_t flush_domains = dev->flush_domains;
1459
1460         /*
1461          * Now that all the buffers are synced to the proper domains,
1462          * flush and invalidate the collected domains
1463          */
1464         if (dev->invalidate_domains | dev->flush_domains) {
1465 #if WATCH_EXEC
1466                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1467                           __func__,
1468                          dev->invalidate_domains,
1469                          dev->flush_domains);
1470 #endif
1471                 i915_gem_flush(dev,
1472                                dev->invalidate_domains,
1473                                dev->flush_domains);
1474                 dev->invalidate_domains = 0;
1475                 dev->flush_domains = 0;
1476         }
1477
1478         return flush_domains;
1479 }
1480
1481 /**
1482  * Pin an object to the GTT and evaluate the relocations landing in it.
1483  */
1484 static int
1485 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1486                                  struct drm_file *file_priv,
1487                                  struct drm_i915_gem_exec_object *entry)
1488 {
1489         struct drm_device *dev = obj->dev;
1490         struct drm_i915_gem_relocation_entry reloc;
1491         struct drm_i915_gem_relocation_entry __user *relocs;
1492         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1493         int i, ret;
1494         uint32_t last_reloc_offset = -1;
1495         void *reloc_page = NULL;
1496
1497         /* Choose the GTT offset for our buffer and put it there. */
1498         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1499         if (ret)
1500                 return ret;
1501
1502         entry->offset = obj_priv->gtt_offset;
1503
1504         relocs = (struct drm_i915_gem_relocation_entry __user *)
1505                  (uintptr_t) entry->relocs_ptr;
1506         /* Apply the relocations, using the GTT aperture to avoid cache
1507          * flushing requirements.
1508          */
1509         for (i = 0; i < entry->relocation_count; i++) {
1510                 struct drm_gem_object *target_obj;
1511                 struct drm_i915_gem_object *target_obj_priv;
1512                 uint32_t reloc_val, reloc_offset, *reloc_entry;
1513                 int ret;
1514
1515                 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1516                 if (ret != 0) {
1517                         i915_gem_object_unpin(obj);
1518                         return ret;
1519                 }
1520
1521                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1522                                                    reloc.target_handle);
1523                 if (target_obj == NULL) {
1524                         i915_gem_object_unpin(obj);
1525                         return -EBADF;
1526                 }
1527                 target_obj_priv = target_obj->driver_private;
1528
1529                 /* The target buffer should have appeared before us in the
1530                  * exec_object list, so it should have a GTT space bound by now.
1531                  */
1532                 if (target_obj_priv->gtt_space == NULL) {
1533                         DRM_ERROR("No GTT space found for object %d\n",
1534                                   reloc.target_handle);
1535                         drm_gem_object_unreference(target_obj);
1536                         i915_gem_object_unpin(obj);
1537                         return -EINVAL;
1538                 }
1539
1540                 if (reloc.offset > obj->size - 4) {
1541                         DRM_ERROR("Relocation beyond object bounds: "
1542                                   "obj %p target %d offset %d size %d.\n",
1543                                   obj, reloc.target_handle,
1544                                   (int) reloc.offset, (int) obj->size);
1545                         drm_gem_object_unreference(target_obj);
1546                         i915_gem_object_unpin(obj);
1547                         return -EINVAL;
1548                 }
1549                 if (reloc.offset & 3) {
1550                         DRM_ERROR("Relocation not 4-byte aligned: "
1551                                   "obj %p target %d offset %d.\n",
1552                                   obj, reloc.target_handle,
1553                                   (int) reloc.offset);
1554                         drm_gem_object_unreference(target_obj);
1555                         i915_gem_object_unpin(obj);
1556                         return -EINVAL;
1557                 }
1558
1559                 if (reloc.write_domain && target_obj->pending_write_domain &&
1560                     reloc.write_domain != target_obj->pending_write_domain) {
1561                         DRM_ERROR("Write domain conflict: "
1562                                   "obj %p target %d offset %d "
1563                                   "new %08x old %08x\n",
1564                                   obj, reloc.target_handle,
1565                                   (int) reloc.offset,
1566                                   reloc.write_domain,
1567                                   target_obj->pending_write_domain);
1568                         drm_gem_object_unreference(target_obj);
1569                         i915_gem_object_unpin(obj);
1570                         return -EINVAL;
1571                 }
1572
1573 #if WATCH_RELOC
1574                 DRM_INFO("%s: obj %p offset %08x target %d "
1575                          "read %08x write %08x gtt %08x "
1576                          "presumed %08x delta %08x\n",
1577                          __func__,
1578                          obj,
1579                          (int) reloc.offset,
1580                          (int) reloc.target_handle,
1581                          (int) reloc.read_domains,
1582                          (int) reloc.write_domain,
1583                          (int) target_obj_priv->gtt_offset,
1584                          (int) reloc.presumed_offset,
1585                          reloc.delta);
1586 #endif
1587
1588                 target_obj->pending_read_domains |= reloc.read_domains;
1589                 target_obj->pending_write_domain |= reloc.write_domain;
1590
1591                 /* If the relocation already has the right value in it, no
1592                  * more work needs to be done.
1593                  */
1594                 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1595                         drm_gem_object_unreference(target_obj);
1596                         continue;
1597                 }
1598
1599                 /* Now that we're going to actually write some data in,
1600                  * make sure that any rendering using this buffer's contents
1601                  * is completed.
1602                  */
1603                 i915_gem_object_wait_rendering(obj);
1604
1605                 /* As we're writing through the gtt, flush
1606                  * any CPU writes before we write the relocations
1607                  */
1608                 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1609                         i915_gem_clflush_object(obj);
1610                         drm_agp_chipset_flush(dev);
1611                         obj->write_domain = 0;
1612                 }
1613
1614                 /* Map the page containing the relocation we're going to
1615                  * perform.
1616                  */
1617                 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1618                 if (reloc_page == NULL ||
1619                     (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1620                     (reloc_offset & ~(PAGE_SIZE - 1))) {
1621                         if (reloc_page != NULL)
1622                                 iounmap(reloc_page);
1623
1624                         reloc_page = ioremap(dev->agp->base +
1625                                              (reloc_offset & ~(PAGE_SIZE - 1)),
1626                                              PAGE_SIZE);
1627                         last_reloc_offset = reloc_offset;
1628                         if (reloc_page == NULL) {
1629                                 drm_gem_object_unreference(target_obj);
1630                                 i915_gem_object_unpin(obj);
1631                                 return -ENOMEM;
1632                         }
1633                 }
1634
1635                 reloc_entry = (uint32_t *)((char *)reloc_page +
1636                                            (reloc_offset & (PAGE_SIZE - 1)));
1637                 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1638
1639 #if WATCH_BUF
1640                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1641                           obj, (unsigned int) reloc.offset,
1642                           readl(reloc_entry), reloc_val);
1643 #endif
1644                 writel(reloc_val, reloc_entry);
1645
1646                 /* Write the updated presumed offset for this entry back out
1647                  * to the user.
1648                  */
1649                 reloc.presumed_offset = target_obj_priv->gtt_offset;
1650                 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1651                 if (ret != 0) {
1652                         drm_gem_object_unreference(target_obj);
1653                         i915_gem_object_unpin(obj);
1654                         return ret;
1655                 }
1656
1657                 drm_gem_object_unreference(target_obj);
1658         }
1659
1660         if (reloc_page != NULL)
1661                 iounmap(reloc_page);
1662
1663 #if WATCH_BUF
1664         if (0)
1665                 i915_gem_dump_object(obj, 128, __func__, ~0);
1666 #endif
1667         return 0;
1668 }
1669
1670 /** Dispatch a batchbuffer to the ring
1671  */
1672 static int
1673 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1674                               struct drm_i915_gem_execbuffer *exec,
1675                               uint64_t exec_offset)
1676 {
1677         drm_i915_private_t *dev_priv = dev->dev_private;
1678         struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1679                                              (uintptr_t) exec->cliprects_ptr;
1680         int nbox = exec->num_cliprects;
1681         int i = 0, count;
1682         uint32_t        exec_start, exec_len;
1683         RING_LOCALS;
1684
1685         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1686         exec_len = (uint32_t) exec->batch_len;
1687
1688         if ((exec_start | exec_len) & 0x7) {
1689                 DRM_ERROR("alignment\n");
1690                 return -EINVAL;
1691         }
1692
1693         if (!exec_start)
1694                 return -EINVAL;
1695
1696         count = nbox ? nbox : 1;
1697
1698         for (i = 0; i < count; i++) {
1699                 if (i < nbox) {
1700                         int ret = i915_emit_box(dev, boxes, i,
1701                                                 exec->DR1, exec->DR4);
1702                         if (ret)
1703                                 return ret;
1704                 }
1705
1706                 if (IS_I830(dev) || IS_845G(dev)) {
1707                         BEGIN_LP_RING(4);
1708                         OUT_RING(MI_BATCH_BUFFER);
1709                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1710                         OUT_RING(exec_start + exec_len - 4);
1711                         OUT_RING(0);
1712                         ADVANCE_LP_RING();
1713                 } else {
1714                         BEGIN_LP_RING(2);
1715                         if (IS_I965G(dev)) {
1716                                 OUT_RING(MI_BATCH_BUFFER_START |
1717                                          (2 << 6) |
1718                                          MI_BATCH_NON_SECURE_I965);
1719                                 OUT_RING(exec_start);
1720                         } else {
1721                                 OUT_RING(MI_BATCH_BUFFER_START |
1722                                          (2 << 6));
1723                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1724                         }
1725                         ADVANCE_LP_RING();
1726                 }
1727         }
1728
1729         /* XXX breadcrumb */
1730         return 0;
1731 }
1732
1733 /* Throttle our rendering by waiting until the ring has completed our requests
1734  * emitted over 20 msec ago.
1735  *
1736  * This should get us reasonable parallelism between CPU and GPU but also
1737  * relatively low latency when blocking on a particular request to finish.
1738  */
1739 static int
1740 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1741 {
1742         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1743         int ret = 0;
1744         uint32_t seqno;
1745
1746         mutex_lock(&dev->struct_mutex);
1747         seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1748         i915_file_priv->mm.last_gem_throttle_seqno =
1749                 i915_file_priv->mm.last_gem_seqno;
1750         if (seqno)
1751                 ret = i915_wait_request(dev, seqno);
1752         mutex_unlock(&dev->struct_mutex);
1753         return ret;
1754 }
1755
1756 int
1757 i915_gem_execbuffer(struct drm_device *dev, void *data,
1758                     struct drm_file *file_priv)
1759 {
1760         drm_i915_private_t *dev_priv = dev->dev_private;
1761         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1762         struct drm_i915_gem_execbuffer *args = data;
1763         struct drm_i915_gem_exec_object *exec_list = NULL;
1764         struct drm_gem_object **object_list = NULL;
1765         struct drm_gem_object *batch_obj;
1766         int ret, i, pinned = 0;
1767         uint64_t exec_offset;
1768         uint32_t seqno, flush_domains;
1769
1770 #if WATCH_EXEC
1771         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1772                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1773 #endif
1774
1775         /* Copy in the exec list from userland */
1776         exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1777                                DRM_MEM_DRIVER);
1778         object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1779                                  DRM_MEM_DRIVER);
1780         if (exec_list == NULL || object_list == NULL) {
1781                 DRM_ERROR("Failed to allocate exec or object list "
1782                           "for %d buffers\n",
1783                           args->buffer_count);
1784                 ret = -ENOMEM;
1785                 goto pre_mutex_err;
1786         }
1787         ret = copy_from_user(exec_list,
1788                              (struct drm_i915_relocation_entry __user *)
1789                              (uintptr_t) args->buffers_ptr,
1790                              sizeof(*exec_list) * args->buffer_count);
1791         if (ret != 0) {
1792                 DRM_ERROR("copy %d exec entries failed %d\n",
1793                           args->buffer_count, ret);
1794                 goto pre_mutex_err;
1795         }
1796
1797         mutex_lock(&dev->struct_mutex);
1798
1799         i915_verify_inactive(dev, __FILE__, __LINE__);
1800
1801         if (dev_priv->mm.wedged) {
1802                 DRM_ERROR("Execbuf while wedged\n");
1803                 mutex_unlock(&dev->struct_mutex);
1804                 return -EIO;
1805         }
1806
1807         if (dev_priv->mm.suspended) {
1808                 DRM_ERROR("Execbuf while VT-switched.\n");
1809                 mutex_unlock(&dev->struct_mutex);
1810                 return -EBUSY;
1811         }
1812
1813         /* Zero the gloabl flush/invalidate flags. These
1814          * will be modified as each object is bound to the
1815          * gtt
1816          */
1817         dev->invalidate_domains = 0;
1818         dev->flush_domains = 0;
1819
1820         /* Look up object handles and perform the relocations */
1821         for (i = 0; i < args->buffer_count; i++) {
1822                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1823                                                        exec_list[i].handle);
1824                 if (object_list[i] == NULL) {
1825                         DRM_ERROR("Invalid object handle %d at index %d\n",
1826                                    exec_list[i].handle, i);
1827                         ret = -EBADF;
1828                         goto err;
1829                 }
1830
1831                 object_list[i]->pending_read_domains = 0;
1832                 object_list[i]->pending_write_domain = 0;
1833                 ret = i915_gem_object_pin_and_relocate(object_list[i],
1834                                                        file_priv,
1835                                                        &exec_list[i]);
1836                 if (ret) {
1837                         DRM_ERROR("object bind and relocate failed %d\n", ret);
1838                         goto err;
1839                 }
1840                 pinned = i + 1;
1841         }
1842
1843         /* Set the pending read domains for the batch buffer to COMMAND */
1844         batch_obj = object_list[args->buffer_count-1];
1845         batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1846         batch_obj->pending_write_domain = 0;
1847
1848         i915_verify_inactive(dev, __FILE__, __LINE__);
1849
1850         for (i = 0; i < args->buffer_count; i++) {
1851                 struct drm_gem_object *obj = object_list[i];
1852                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1853
1854                 if (obj_priv->gtt_space == NULL) {
1855                         /* We evicted the buffer in the process of validating
1856                          * our set of buffers in.  We could try to recover by
1857                          * kicking them everything out and trying again from
1858                          * the start.
1859                          */
1860                         ret = -ENOMEM;
1861                         goto err;
1862                 }
1863
1864                 /* make sure all previous memory operations have passed */
1865                 ret = i915_gem_object_set_domain(obj,
1866                                                  obj->pending_read_domains,
1867                                                  obj->pending_write_domain);
1868                 if (ret)
1869                         goto err;
1870         }
1871
1872         i915_verify_inactive(dev, __FILE__, __LINE__);
1873
1874         /* Flush/invalidate caches and chipset buffer */
1875         flush_domains = i915_gem_dev_set_domain(dev);
1876
1877         i915_verify_inactive(dev, __FILE__, __LINE__);
1878
1879 #if WATCH_COHERENCY
1880         for (i = 0; i < args->buffer_count; i++) {
1881                 i915_gem_object_check_coherency(object_list[i],
1882                                                 exec_list[i].handle);
1883         }
1884 #endif
1885
1886         exec_offset = exec_list[args->buffer_count - 1].offset;
1887
1888 #if WATCH_EXEC
1889         i915_gem_dump_object(object_list[args->buffer_count - 1],
1890                               args->batch_len,
1891                               __func__,
1892                               ~0);
1893 #endif
1894
1895         /* Exec the batchbuffer */
1896         ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1897         if (ret) {
1898                 DRM_ERROR("dispatch failed %d\n", ret);
1899                 goto err;
1900         }
1901
1902         /*
1903          * Ensure that the commands in the batch buffer are
1904          * finished before the interrupt fires
1905          */
1906         flush_domains |= i915_retire_commands(dev);
1907
1908         i915_verify_inactive(dev, __FILE__, __LINE__);
1909
1910         /*
1911          * Get a seqno representing the execution of the current buffer,
1912          * which we can wait on.  We would like to mitigate these interrupts,
1913          * likely by only creating seqnos occasionally (so that we have
1914          * *some* interrupts representing completion of buffers that we can
1915          * wait on when trying to clear up gtt space).
1916          */
1917         seqno = i915_add_request(dev, flush_domains);
1918         BUG_ON(seqno == 0);
1919         i915_file_priv->mm.last_gem_seqno = seqno;
1920         for (i = 0; i < args->buffer_count; i++) {
1921                 struct drm_gem_object *obj = object_list[i];
1922                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1923
1924                 i915_gem_object_move_to_active(obj);
1925                 obj_priv->last_rendering_seqno = seqno;
1926 #if WATCH_LRU
1927                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1928 #endif
1929         }
1930 #if WATCH_LRU
1931         i915_dump_lru(dev, __func__);
1932 #endif
1933
1934         i915_verify_inactive(dev, __FILE__, __LINE__);
1935
1936         /* Copy the new buffer offsets back to the user's exec list. */
1937         ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1938                            (uintptr_t) args->buffers_ptr,
1939                            exec_list,
1940                            sizeof(*exec_list) * args->buffer_count);
1941         if (ret)
1942                 DRM_ERROR("failed to copy %d exec entries "
1943                           "back to user (%d)\n",
1944                            args->buffer_count, ret);
1945 err:
1946         if (object_list != NULL) {
1947                 for (i = 0; i < pinned; i++)
1948                         i915_gem_object_unpin(object_list[i]);
1949
1950                 for (i = 0; i < args->buffer_count; i++)
1951                         drm_gem_object_unreference(object_list[i]);
1952         }
1953         mutex_unlock(&dev->struct_mutex);
1954
1955 pre_mutex_err:
1956         drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1957                  DRM_MEM_DRIVER);
1958         drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1959                  DRM_MEM_DRIVER);
1960
1961         return ret;
1962 }
1963
1964 int
1965 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1966 {
1967         struct drm_device *dev = obj->dev;
1968         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1969         int ret;
1970
1971         i915_verify_inactive(dev, __FILE__, __LINE__);
1972         if (obj_priv->gtt_space == NULL) {
1973                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1974                 if (ret != 0) {
1975                         DRM_ERROR("Failure to bind: %d", ret);
1976                         return ret;
1977                 }
1978         }
1979         obj_priv->pin_count++;
1980
1981         /* If the object is not active and not pending a flush,
1982          * remove it from the inactive list
1983          */
1984         if (obj_priv->pin_count == 1) {
1985                 atomic_inc(&dev->pin_count);
1986                 atomic_add(obj->size, &dev->pin_memory);
1987                 if (!obj_priv->active &&
1988                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
1989                                            I915_GEM_DOMAIN_GTT)) == 0 &&
1990                     !list_empty(&obj_priv->list))
1991                         list_del_init(&obj_priv->list);
1992         }
1993         i915_verify_inactive(dev, __FILE__, __LINE__);
1994
1995         return 0;
1996 }
1997
1998 void
1999 i915_gem_object_unpin(struct drm_gem_object *obj)
2000 {
2001         struct drm_device *dev = obj->dev;
2002         drm_i915_private_t *dev_priv = dev->dev_private;
2003         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2004
2005         i915_verify_inactive(dev, __FILE__, __LINE__);
2006         obj_priv->pin_count--;
2007         BUG_ON(obj_priv->pin_count < 0);
2008         BUG_ON(obj_priv->gtt_space == NULL);
2009
2010         /* If the object is no longer pinned, and is
2011          * neither active nor being flushed, then stick it on
2012          * the inactive list
2013          */
2014         if (obj_priv->pin_count == 0) {
2015                 if (!obj_priv->active &&
2016                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2017                                            I915_GEM_DOMAIN_GTT)) == 0)
2018                         list_move_tail(&obj_priv->list,
2019                                        &dev_priv->mm.inactive_list);
2020                 atomic_dec(&dev->pin_count);
2021                 atomic_sub(obj->size, &dev->pin_memory);
2022         }
2023         i915_verify_inactive(dev, __FILE__, __LINE__);
2024 }
2025
2026 int
2027 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2028                    struct drm_file *file_priv)
2029 {
2030         struct drm_i915_gem_pin *args = data;
2031         struct drm_gem_object *obj;
2032         struct drm_i915_gem_object *obj_priv;
2033         int ret;
2034
2035         mutex_lock(&dev->struct_mutex);
2036
2037         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2038         if (obj == NULL) {
2039                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2040                           args->handle);
2041                 mutex_unlock(&dev->struct_mutex);
2042                 return -EBADF;
2043         }
2044         obj_priv = obj->driver_private;
2045
2046         ret = i915_gem_object_pin(obj, args->alignment);
2047         if (ret != 0) {
2048                 drm_gem_object_unreference(obj);
2049                 mutex_unlock(&dev->struct_mutex);
2050                 return ret;
2051         }
2052
2053         /* XXX - flush the CPU caches for pinned objects
2054          * as the X server doesn't manage domains yet
2055          */
2056         if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2057                 i915_gem_clflush_object(obj);
2058                 drm_agp_chipset_flush(dev);
2059                 obj->write_domain = 0;
2060         }
2061         args->offset = obj_priv->gtt_offset;
2062         drm_gem_object_unreference(obj);
2063         mutex_unlock(&dev->struct_mutex);
2064
2065         return 0;
2066 }
2067
2068 int
2069 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2070                      struct drm_file *file_priv)
2071 {
2072         struct drm_i915_gem_pin *args = data;
2073         struct drm_gem_object *obj;
2074
2075         mutex_lock(&dev->struct_mutex);
2076
2077         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2078         if (obj == NULL) {
2079                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2080                           args->handle);
2081                 mutex_unlock(&dev->struct_mutex);
2082                 return -EBADF;
2083         }
2084
2085         i915_gem_object_unpin(obj);
2086
2087         drm_gem_object_unreference(obj);
2088         mutex_unlock(&dev->struct_mutex);
2089         return 0;
2090 }
2091
2092 int
2093 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2094                     struct drm_file *file_priv)
2095 {
2096         struct drm_i915_gem_busy *args = data;
2097         struct drm_gem_object *obj;
2098         struct drm_i915_gem_object *obj_priv;
2099
2100         mutex_lock(&dev->struct_mutex);
2101         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2102         if (obj == NULL) {
2103                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2104                           args->handle);
2105                 mutex_unlock(&dev->struct_mutex);
2106                 return -EBADF;
2107         }
2108
2109         obj_priv = obj->driver_private;
2110         args->busy = obj_priv->active;
2111
2112         drm_gem_object_unreference(obj);
2113         mutex_unlock(&dev->struct_mutex);
2114         return 0;
2115 }
2116
2117 int
2118 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2119                         struct drm_file *file_priv)
2120 {
2121     return i915_gem_ring_throttle(dev, file_priv);
2122 }
2123
2124 int i915_gem_init_object(struct drm_gem_object *obj)
2125 {
2126         struct drm_i915_gem_object *obj_priv;
2127
2128         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2129         if (obj_priv == NULL)
2130                 return -ENOMEM;
2131
2132         /*
2133          * We've just allocated pages from the kernel,
2134          * so they've just been written by the CPU with
2135          * zeros. They'll need to be clflushed before we
2136          * use them with the GPU.
2137          */
2138         obj->write_domain = I915_GEM_DOMAIN_CPU;
2139         obj->read_domains = I915_GEM_DOMAIN_CPU;
2140
2141         obj->driver_private = obj_priv;
2142         obj_priv->obj = obj;
2143         INIT_LIST_HEAD(&obj_priv->list);
2144         return 0;
2145 }
2146
2147 void i915_gem_free_object(struct drm_gem_object *obj)
2148 {
2149         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2150
2151         while (obj_priv->pin_count > 0)
2152                 i915_gem_object_unpin(obj);
2153
2154         i915_gem_object_unbind(obj);
2155
2156         drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2157         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2158 }
2159
2160 int
2161 i915_gem_set_domain(struct drm_gem_object *obj,
2162                     struct drm_file *file_priv,
2163                     uint32_t read_domains,
2164                     uint32_t write_domain)
2165 {
2166         struct drm_device *dev = obj->dev;
2167         int ret;
2168         uint32_t flush_domains;
2169
2170         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2171
2172         ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2173         if (ret)
2174                 return ret;
2175         flush_domains = i915_gem_dev_set_domain(obj->dev);
2176
2177         if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2178                 (void) i915_add_request(dev, flush_domains);
2179
2180         return 0;
2181 }
2182
2183 /** Unbinds all objects that are on the given buffer list. */
2184 static int
2185 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2186 {
2187         struct drm_gem_object *obj;
2188         struct drm_i915_gem_object *obj_priv;
2189         int ret;
2190
2191         while (!list_empty(head)) {
2192                 obj_priv = list_first_entry(head,
2193                                             struct drm_i915_gem_object,
2194                                             list);
2195                 obj = obj_priv->obj;
2196
2197                 if (obj_priv->pin_count != 0) {
2198                         DRM_ERROR("Pinned object in unbind list\n");
2199                         mutex_unlock(&dev->struct_mutex);
2200                         return -EINVAL;
2201                 }
2202
2203                 ret = i915_gem_object_unbind(obj);
2204                 if (ret != 0) {
2205                         DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2206                                   ret);
2207                         mutex_unlock(&dev->struct_mutex);
2208                         return ret;
2209                 }
2210         }
2211
2212
2213         return 0;
2214 }
2215
2216 static int
2217 i915_gem_idle(struct drm_device *dev)
2218 {
2219         drm_i915_private_t *dev_priv = dev->dev_private;
2220         uint32_t seqno, cur_seqno, last_seqno;
2221         int stuck;
2222
2223         if (dev_priv->mm.suspended)
2224                 return 0;
2225
2226         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
2227          * We need to replace this with a semaphore, or something.
2228          */
2229         dev_priv->mm.suspended = 1;
2230
2231         i915_kernel_lost_context(dev);
2232
2233         /* Flush the GPU along with all non-CPU write domains
2234          */
2235         i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2236                        ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2237         seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2238                                         I915_GEM_DOMAIN_GTT));
2239
2240         if (seqno == 0) {
2241                 mutex_unlock(&dev->struct_mutex);
2242                 return -ENOMEM;
2243         }
2244
2245         dev_priv->mm.waiting_gem_seqno = seqno;
2246         last_seqno = 0;
2247         stuck = 0;
2248         for (;;) {
2249                 cur_seqno = i915_get_gem_seqno(dev);
2250                 if (i915_seqno_passed(cur_seqno, seqno))
2251                         break;
2252                 if (last_seqno == cur_seqno) {
2253                         if (stuck++ > 100) {
2254                                 DRM_ERROR("hardware wedged\n");
2255                                 dev_priv->mm.wedged = 1;
2256                                 DRM_WAKEUP(&dev_priv->irq_queue);
2257                                 break;
2258                         }
2259                 }
2260                 msleep(10);
2261                 last_seqno = cur_seqno;
2262         }
2263         dev_priv->mm.waiting_gem_seqno = 0;
2264
2265         i915_gem_retire_requests(dev);
2266
2267         /* Active and flushing should now be empty as we've
2268          * waited for a sequence higher than any pending execbuffer
2269          */
2270         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2271         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2272
2273         /* Request should now be empty as we've also waited
2274          * for the last request in the list
2275          */
2276         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2277
2278         /* Move all buffers out of the GTT. */
2279         i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2280
2281         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2282         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2283         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2284         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2285         return 0;
2286 }
2287
2288 static int
2289 i915_gem_init_hws(struct drm_device *dev)
2290 {
2291         drm_i915_private_t *dev_priv = dev->dev_private;
2292         struct drm_gem_object *obj;
2293         struct drm_i915_gem_object *obj_priv;
2294         int ret;
2295
2296         /* If we need a physical address for the status page, it's already
2297          * initialized at driver load time.
2298          */
2299         if (!I915_NEED_GFX_HWS(dev))
2300                 return 0;
2301
2302         obj = drm_gem_object_alloc(dev, 4096);
2303         if (obj == NULL) {
2304                 DRM_ERROR("Failed to allocate status page\n");
2305                 return -ENOMEM;
2306         }
2307         obj_priv = obj->driver_private;
2308
2309         ret = i915_gem_object_pin(obj, 4096);
2310         if (ret != 0) {
2311                 drm_gem_object_unreference(obj);
2312                 return ret;
2313         }
2314
2315         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2316         dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
2317         dev_priv->hws_map.size = 4096;
2318         dev_priv->hws_map.type = 0;
2319         dev_priv->hws_map.flags = 0;
2320         dev_priv->hws_map.mtrr = 0;
2321
2322         drm_core_ioremap(&dev_priv->hws_map, dev);
2323         if (dev_priv->hws_map.handle == NULL) {
2324                 DRM_ERROR("Failed to map status page.\n");
2325                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2326                 drm_gem_object_unreference(obj);
2327                 return -EINVAL;
2328         }
2329         dev_priv->hws_obj = obj;
2330         dev_priv->hw_status_page = dev_priv->hws_map.handle;
2331         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2332         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2333         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2334
2335         return 0;
2336 }
2337
2338 static int
2339 i915_gem_init_ringbuffer(struct drm_device *dev)
2340 {
2341         drm_i915_private_t *dev_priv = dev->dev_private;
2342         struct drm_gem_object *obj;
2343         struct drm_i915_gem_object *obj_priv;
2344         int ret;
2345
2346         ret = i915_gem_init_hws(dev);
2347         if (ret != 0)
2348                 return ret;
2349
2350         obj = drm_gem_object_alloc(dev, 128 * 1024);
2351         if (obj == NULL) {
2352                 DRM_ERROR("Failed to allocate ringbuffer\n");
2353                 return -ENOMEM;
2354         }
2355         obj_priv = obj->driver_private;
2356
2357         ret = i915_gem_object_pin(obj, 4096);
2358         if (ret != 0) {
2359                 drm_gem_object_unreference(obj);
2360                 return ret;
2361         }
2362
2363         /* Set up the kernel mapping for the ring. */
2364         dev_priv->ring.Size = obj->size;
2365         dev_priv->ring.tail_mask = obj->size - 1;
2366
2367         dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2368         dev_priv->ring.map.size = obj->size;
2369         dev_priv->ring.map.type = 0;
2370         dev_priv->ring.map.flags = 0;
2371         dev_priv->ring.map.mtrr = 0;
2372
2373         drm_core_ioremap(&dev_priv->ring.map, dev);
2374         if (dev_priv->ring.map.handle == NULL) {
2375                 DRM_ERROR("Failed to map ringbuffer.\n");
2376                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2377                 drm_gem_object_unreference(obj);
2378                 return -EINVAL;
2379         }
2380         dev_priv->ring.ring_obj = obj;
2381         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2382
2383         /* Stop the ring if it's running. */
2384         I915_WRITE(LP_RING + RING_LEN, 0);
2385         I915_WRITE(LP_RING + RING_HEAD, 0);
2386         I915_WRITE(LP_RING + RING_TAIL, 0);
2387         I915_WRITE(LP_RING + RING_START, 0);
2388
2389         /* Initialize the ring. */
2390         I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
2391         I915_WRITE(LP_RING + RING_LEN,
2392                    ((obj->size - 4096) & RING_NR_PAGES) |
2393                    RING_NO_REPORT |
2394                    RING_VALID);
2395
2396         /* Update our cache of the ring state */
2397         i915_kernel_lost_context(dev);
2398
2399         return 0;
2400 }
2401
2402 static void
2403 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2404 {
2405         drm_i915_private_t *dev_priv = dev->dev_private;
2406
2407         if (dev_priv->ring.ring_obj == NULL)
2408                 return;
2409
2410         drm_core_ioremapfree(&dev_priv->ring.map, dev);
2411
2412         i915_gem_object_unpin(dev_priv->ring.ring_obj);
2413         drm_gem_object_unreference(dev_priv->ring.ring_obj);
2414         dev_priv->ring.ring_obj = NULL;
2415         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2416
2417         if (dev_priv->hws_obj != NULL) {
2418                 i915_gem_object_unpin(dev_priv->hws_obj);
2419                 drm_gem_object_unreference(dev_priv->hws_obj);
2420                 dev_priv->hws_obj = NULL;
2421                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2422
2423                 /* Write high address into HWS_PGA when disabling. */
2424                 I915_WRITE(HWS_PGA, 0x1ffff000);
2425         }
2426 }
2427
2428 int
2429 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2430                        struct drm_file *file_priv)
2431 {
2432         drm_i915_private_t *dev_priv = dev->dev_private;
2433         int ret;
2434
2435         if (dev_priv->mm.wedged) {
2436                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2437                 dev_priv->mm.wedged = 0;
2438         }
2439
2440         ret = i915_gem_init_ringbuffer(dev);
2441         if (ret != 0)
2442                 return ret;
2443
2444         mutex_lock(&dev->struct_mutex);
2445         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2446         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2447         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2448         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2449         dev_priv->mm.suspended = 0;
2450         mutex_unlock(&dev->struct_mutex);
2451         return 0;
2452 }
2453
2454 int
2455 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2456                        struct drm_file *file_priv)
2457 {
2458         int ret;
2459
2460         mutex_lock(&dev->struct_mutex);
2461         ret = i915_gem_idle(dev);
2462         if (ret == 0)
2463                 i915_gem_cleanup_ringbuffer(dev);
2464         mutex_unlock(&dev->struct_mutex);
2465
2466         return 0;
2467 }
2468
2469 void
2470 i915_gem_lastclose(struct drm_device *dev)
2471 {
2472         int ret;
2473         drm_i915_private_t *dev_priv = dev->dev_private;
2474
2475         mutex_lock(&dev->struct_mutex);
2476
2477         if (dev_priv->ring.ring_obj != NULL) {
2478                 ret = i915_gem_idle(dev);
2479                 if (ret)
2480                         DRM_ERROR("failed to idle hardware: %d\n", ret);
2481
2482                 i915_gem_cleanup_ringbuffer(dev);
2483         }
2484
2485         mutex_unlock(&dev->struct_mutex);
2486 }
2487
2488 void i915_gem_load(struct drm_device *dev)
2489 {
2490         drm_i915_private_t *dev_priv = dev->dev_private;
2491
2492         INIT_LIST_HEAD(&dev_priv->mm.active_list);
2493         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2494         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2495         INIT_LIST_HEAD(&dev_priv->mm.request_list);
2496         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2497                           i915_gem_retire_work_handler);
2498         dev_priv->mm.next_gem_seqno = 1;
2499
2500         i915_gem_detect_bit_6_swizzle(dev);
2501 }