2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
33 #define WATCH_COHERENCY 0
40 i915_gem_object_set_domain(struct drm_gem_object *obj,
41 uint32_t read_domains,
42 uint32_t write_domain);
45 i915_gem_init_ioctl(struct drm_device *dev, void *data,
46 struct drm_file *file_priv)
48 drm_i915_private_t *dev_priv = dev->dev_private;
49 struct drm_i915_gem_init *args = data;
51 mutex_lock(&dev->struct_mutex);
53 if (args->gtt_start >= args->gtt_end ||
54 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
55 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
56 mutex_unlock(&dev->struct_mutex);
60 drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
61 args->gtt_end - args->gtt_start);
63 mutex_unlock(&dev->struct_mutex);
69 i915_gem_object_free_page_list(struct drm_gem_object *obj)
71 struct drm_i915_gem_object *obj_priv = obj->driver_private;
72 int page_count = obj->size / PAGE_SIZE;
75 if (obj_priv->page_list == NULL)
79 for (i = 0; i < page_count; i++)
80 if (obj_priv->page_list[i] != NULL)
81 page_cache_release(obj_priv->page_list[i]);
83 drm_free(obj_priv->page_list,
84 page_count * sizeof(struct page *),
86 obj_priv->page_list = NULL;
90 i915_gem_object_move_to_active(struct drm_gem_object *obj)
92 struct drm_device *dev = obj->dev;
93 drm_i915_private_t *dev_priv = dev->dev_private;
94 struct drm_i915_gem_object *obj_priv = obj->driver_private;
96 /* Add a reference if we're newly entering the active list. */
97 if (!obj_priv->active) {
98 drm_gem_object_reference(obj);
101 /* Move from whatever list we were on to the tail of execution. */
102 list_move_tail(&obj_priv->list,
103 &dev_priv->mm.active_list);
107 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
109 struct drm_device *dev = obj->dev;
110 drm_i915_private_t *dev_priv = dev->dev_private;
111 struct drm_i915_gem_object *obj_priv = obj->driver_private;
113 if (obj_priv->pin_count != 0)
114 list_del_init(&obj_priv->list);
116 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
118 if (obj_priv->active) {
119 obj_priv->active = 0;
120 drm_gem_object_unreference(obj);
125 * Creates a new sequence number, emitting a write of it to the status page
126 * plus an interrupt, which will trigger i915_user_interrupt_handler.
128 * Must be called with struct_lock held.
130 * Returned sequence numbers are nonzero on success.
133 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
135 drm_i915_private_t *dev_priv = dev->dev_private;
136 struct drm_i915_gem_request *request;
140 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
144 /* Grab the seqno we're going to make this request be, and bump the
145 * next (skipping 0 so it can be the reserved no-seqno value).
147 seqno = dev_priv->mm.next_gem_seqno;
148 dev_priv->mm.next_gem_seqno++;
149 if (dev_priv->mm.next_gem_seqno == 0)
150 dev_priv->mm.next_gem_seqno++;
153 OUT_RING(CMD_STORE_DWORD_IDX);
154 OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
157 OUT_RING(GFX_OP_USER_INTERRUPT);
160 DRM_DEBUG("%d\n", seqno);
162 request->seqno = seqno;
163 request->emitted_jiffies = jiffies;
164 request->flush_domains = flush_domains;
165 list_add_tail(&request->list, &dev_priv->mm.request_list);
171 * Command execution barrier
173 * Ensures that all commands in the ring are finished
174 * before signalling the CPU
178 i915_retire_commands(struct drm_device *dev)
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
182 uint32_t flush_domains = 0;
185 /* The sampler always gets flushed on i965 (sigh) */
187 flush_domains |= DRM_GEM_DOMAIN_I915_SAMPLER;
190 OUT_RING(0); /* noop */
192 return flush_domains;
196 * Moves buffers associated only with the given active seqno from the active
197 * to inactive list, potentially freeing them.
200 i915_gem_retire_request(struct drm_device *dev,
201 struct drm_i915_gem_request *request)
203 drm_i915_private_t *dev_priv = dev->dev_private;
205 if (request->flush_domains != 0) {
206 struct drm_i915_gem_object *obj_priv, *next;
208 /* First clear any buffers that were only waiting for a flush
209 * matching the one just retired.
212 list_for_each_entry_safe(obj_priv, next,
213 &dev_priv->mm.flushing_list, list) {
214 struct drm_gem_object *obj = obj_priv->obj;
216 if (obj->write_domain & request->flush_domains) {
217 obj->write_domain = 0;
218 i915_gem_object_move_to_inactive(obj);
224 /* Move any buffers on the active list that are no longer referenced
225 * by the ringbuffer to the flushing/inactive lists as appropriate.
227 while (!list_empty(&dev_priv->mm.active_list)) {
228 struct drm_gem_object *obj;
229 struct drm_i915_gem_object *obj_priv;
231 obj_priv = list_first_entry(&dev_priv->mm.active_list,
232 struct drm_i915_gem_object,
236 /* If the seqno being retired doesn't match the oldest in the
237 * list, then the oldest in the list must still be newer than
240 if (obj_priv->last_rendering_seqno != request->seqno)
243 DRM_INFO("%s: retire %d moves to inactive list %p\n",
244 __func__, request->seqno, obj);
247 if (obj->write_domain != 0) {
248 list_move_tail(&obj_priv->list,
249 &dev_priv->mm.flushing_list);
251 i915_gem_object_move_to_inactive(obj);
257 * Returns true if seq1 is later than seq2.
260 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
262 return (int32_t)(seq1 - seq2) >= 0;
266 i915_get_gem_seqno(struct drm_device *dev)
268 drm_i915_private_t *dev_priv = dev->dev_private;
270 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
274 * This function clears the request list as sequence numbers are passed.
277 i915_gem_retire_requests(struct drm_device *dev)
279 drm_i915_private_t *dev_priv = dev->dev_private;
282 seqno = i915_get_gem_seqno(dev);
284 while (!list_empty(&dev_priv->mm.request_list)) {
285 struct drm_i915_gem_request *request;
286 uint32_t retiring_seqno;
288 request = list_first_entry(&dev_priv->mm.request_list,
289 struct drm_i915_gem_request,
291 retiring_seqno = request->seqno;
293 if (i915_seqno_passed(seqno, retiring_seqno)) {
294 i915_gem_retire_request(dev, request);
296 list_del(&request->list);
297 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
304 * Waits for a sequence number to be signaled, and cleans up the
305 * request and object lists appropriately for that event.
308 i915_wait_request(struct drm_device *dev, uint32_t seqno)
310 drm_i915_private_t *dev_priv = dev->dev_private;
315 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
316 i915_user_irq_on(dev_priv);
317 ret = wait_event_interruptible(dev_priv->irq_queue,
318 i915_seqno_passed(i915_get_gem_seqno(dev),
320 i915_user_irq_off(dev_priv);
323 /* Directly dispatch request retiring. While we have the work queue
324 * to handle this, the waiter on a request often wants an associated
325 * buffer to have made it to the inactive list, and we would need
326 * a separate wait queue to handle that.
329 i915_gem_retire_requests(dev);
335 i915_gem_flush(struct drm_device *dev,
336 uint32_t invalidate_domains,
337 uint32_t flush_domains)
339 drm_i915_private_t *dev_priv = dev->dev_private;
344 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
345 invalidate_domains, flush_domains);
348 if (flush_domains & DRM_GEM_DOMAIN_CPU)
349 drm_agp_chipset_flush(dev);
351 if ((invalidate_domains|flush_domains) & ~DRM_GEM_DOMAIN_CPU) {
355 * DRM_GEM_DOMAIN_I915_RENDER is always invalidated, but is
356 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
357 * also flushed at 2d versus 3d pipeline switches.
361 * DRM_GEM_DOMAIN_I915_SAMPLER is flushed on pre-965 if
362 * MI_READ_FLUSH is set, and is always flushed on 965.
364 * DRM_GEM_DOMAIN_I915_COMMAND may not exist?
366 * DRM_GEM_DOMAIN_I915_INSTRUCTION, which exists on 965, is
367 * invalidated when MI_EXE_FLUSH is set.
369 * DRM_GEM_DOMAIN_I915_VERTEX, which exists on 965, is
370 * invalidated with every MI_FLUSH.
374 * On 965, TLBs associated with DRM_GEM_DOMAIN_I915_COMMAND
375 * and DRM_GEM_DOMAIN_CPU in are invalidated at PTE write and
376 * DRM_GEM_DOMAIN_I915_RENDER and DRM_GEM_DOMAIN_I915_SAMPLER
377 * are flushed at any MI_FLUSH.
380 cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
381 if ((invalidate_domains|flush_domains) &
382 DRM_GEM_DOMAIN_I915_RENDER)
383 cmd &= ~MI_NO_WRITE_FLUSH;
384 if (!IS_I965G(dev)) {
386 * On the 965, the sampler cache always gets flushed
387 * and this bit is reserved.
389 if (invalidate_domains & DRM_GEM_DOMAIN_I915_SAMPLER)
390 cmd |= MI_READ_FLUSH;
392 if (invalidate_domains & DRM_GEM_DOMAIN_I915_INSTRUCTION)
396 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
400 OUT_RING(0); /* noop */
406 * Ensures that all rendering to the object has completed and the object is
407 * safe to unbind from the GTT or access from the CPU.
410 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
412 struct drm_device *dev = obj->dev;
413 struct drm_i915_gem_object *obj_priv = obj->driver_private;
416 /* If there are writes queued to the buffer, flush and
417 * create a new seqno to wait for.
419 if (obj->write_domain & ~(DRM_GEM_DOMAIN_CPU)) {
420 uint32_t write_domain = obj->write_domain;
422 DRM_INFO("%s: flushing object %p from write domain %08x\n",
423 __func__, obj, write_domain);
425 i915_gem_flush(dev, 0, write_domain);
426 obj->write_domain = 0;
428 i915_gem_object_move_to_active(obj);
429 obj_priv->last_rendering_seqno = i915_add_request(dev,
431 BUG_ON(obj_priv->last_rendering_seqno == 0);
433 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
436 /* If there is rendering queued on the buffer being evicted, wait for
439 if (obj_priv->active) {
441 DRM_INFO("%s: object %p wait for seqno %08x\n",
442 __func__, obj, obj_priv->last_rendering_seqno);
444 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
453 * Unbinds an object from the GTT aperture.
456 i915_gem_object_unbind(struct drm_gem_object *obj)
458 struct drm_i915_gem_object *obj_priv = obj->driver_private;
461 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
462 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
464 if (obj_priv->gtt_space == NULL)
467 /* Move the object to the CPU domain to ensure that
468 * any possible CPU writes while it's not in the GTT
469 * are flushed when we go to remap it. This will
470 * also ensure that all pending GPU writes are finished
473 i915_gem_object_set_domain (obj, DRM_GEM_DOMAIN_CPU,
476 if (obj_priv->agp_mem != NULL) {
477 drm_unbind_agp(obj_priv->agp_mem);
478 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
479 obj_priv->agp_mem = NULL;
482 i915_gem_object_free_page_list(obj);
484 drm_memrange_put_block(obj_priv->gtt_space);
485 obj_priv->gtt_space = NULL;
487 /* Remove ourselves from the LRU list if present. */
488 if (!list_empty(&obj_priv->list)) {
489 list_del_init(&obj_priv->list);
490 if (obj_priv->active) {
491 DRM_ERROR("Failed to wait on buffer when unbinding, "
492 "continued anyway.\n");
493 obj_priv->active = 0;
494 drm_gem_object_unreference(obj);
499 #if WATCH_BUF | WATCH_EXEC
501 i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
502 uint32_t bias, uint32_t mark)
504 uint32_t *mem = kmap_atomic(page, KM_USER0);
506 for (i = start; i < end; i += 4)
507 DRM_INFO("%08x: %08x%s\n",
508 (int) (bias + i), mem[i / 4],
509 (bias + i == mark) ? " ********" : "");
510 kunmap_atomic(mem, KM_USER0);
511 /* give syslog time to catch up */
516 i915_gem_dump_object(struct drm_gem_object *obj, int len,
517 const char *where, uint32_t mark)
519 struct drm_i915_gem_object *obj_priv = obj->driver_private;
522 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
523 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
524 int page_len, chunk, chunk_len;
526 page_len = len - page * PAGE_SIZE;
527 if (page_len > PAGE_SIZE)
528 page_len = PAGE_SIZE;
530 for (chunk = 0; chunk < page_len; chunk += 128) {
531 chunk_len = page_len - chunk;
534 i915_gem_dump_page(obj_priv->page_list[page],
535 chunk, chunk + chunk_len,
536 obj_priv->gtt_offset +
546 i915_dump_lru(struct drm_device *dev, const char *where)
548 drm_i915_private_t *dev_priv = dev->dev_private;
549 struct drm_i915_gem_object *obj_priv;
551 DRM_INFO("active list %s {\n", where);
552 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
555 DRM_INFO(" %p: %08x\n", obj_priv,
556 obj_priv->last_rendering_seqno);
559 DRM_INFO("flushing list %s {\n", where);
560 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
563 DRM_INFO(" %p: %08x\n", obj_priv,
564 obj_priv->last_rendering_seqno);
567 DRM_INFO("inactive %s {\n", where);
568 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
569 DRM_INFO(" %p: %08x\n", obj_priv,
570 obj_priv->last_rendering_seqno);
577 i915_gem_evict_something(struct drm_device *dev)
579 drm_i915_private_t *dev_priv = dev->dev_private;
580 struct drm_gem_object *obj;
581 struct drm_i915_gem_object *obj_priv;
584 /* If there's an inactive buffer available now, grab it
587 if (!list_empty(&dev_priv->mm.inactive_list)) {
588 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
589 struct drm_i915_gem_object,
592 BUG_ON(obj_priv->pin_count != 0);
596 /* If we didn't get anything, but the ring is still processing
597 * things, wait for one of those things to finish and hopefully
598 * leave us a buffer to evict.
600 if (!list_empty(&dev_priv->mm.request_list)) {
601 struct drm_i915_gem_request *request;
604 request = list_first_entry(&dev_priv->mm.request_list,
605 struct drm_i915_gem_request,
608 ret = i915_wait_request(dev, request->seqno);
615 /* If we didn't have anything on the request list but there
616 * are buffers awaiting a flush, emit one and try again.
617 * When we wait on it, those buffers waiting for that flush
618 * will get moved to inactive.
620 if (!list_empty(&dev_priv->mm.flushing_list)) {
621 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
622 struct drm_i915_gem_object,
629 i915_add_request(dev, obj->write_domain);
635 /* If we didn't do any of the above, there's nothing to be done
636 * and we just can't fit it in.
642 DRM_INFO("%s: evicting %p\n", __func__, obj);
645 BUG_ON(obj_priv->active);
647 /* Wait on the rendering and unbind the buffer. */
648 i915_gem_object_unbind(obj);
654 i915_gem_object_get_page_list(struct drm_gem_object *obj)
656 struct drm_i915_gem_object *obj_priv = obj->driver_private;
658 if (obj_priv->page_list)
661 /* Get the list of pages out of our struct file. They'll be pinned
662 * at this point until we release them.
664 page_count = obj->size / PAGE_SIZE;
665 BUG_ON(obj_priv->page_list != NULL);
666 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
668 if (obj_priv->page_list == NULL)
671 for (i = 0; i < page_count; i++) {
672 obj_priv->page_list[i] =
673 find_or_create_page(obj->filp->f_mapping, i, GFP_HIGHUSER);
675 if (obj_priv->page_list[i] == NULL) {
676 i915_gem_object_free_page_list(obj);
679 unlock_page(obj_priv->page_list[i]);
685 * Finds free space in the GTT aperture and binds the object there.
688 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
690 struct drm_device *dev = obj->dev;
691 drm_i915_private_t *dev_priv = dev->dev_private;
692 struct drm_i915_gem_object *obj_priv = obj->driver_private;
693 struct drm_memrange_node *free_space;
697 alignment = PAGE_SIZE;
698 if (alignment & (PAGE_SIZE - 1)) {
699 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
704 free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
707 if (free_space != NULL) {
708 obj_priv->gtt_space =
709 drm_memrange_get_block(free_space, obj->size,
711 if (obj_priv->gtt_space != NULL) {
712 obj_priv->gtt_space->private = obj;
713 obj_priv->gtt_offset = obj_priv->gtt_space->start;
716 if (obj_priv->gtt_space == NULL) {
717 /* If the gtt is empty and we're still having trouble
718 * fitting our object in, we're out of memory.
721 DRM_INFO("%s: GTT full, evicting something\n", __func__);
723 if (list_empty(&dev_priv->mm.inactive_list) &&
724 list_empty(&dev_priv->mm.active_list)) {
725 DRM_ERROR("GTT full, but LRU list empty\n");
729 ret = i915_gem_evict_something(dev);
736 DRM_INFO("Binding object of size %d at 0x%08x\n",
737 obj->size, obj_priv->gtt_offset);
739 ret = i915_gem_object_get_page_list(obj);
741 drm_memrange_put_block(obj_priv->gtt_space);
742 obj_priv->gtt_space = NULL;
746 page_count = obj->size / PAGE_SIZE;
747 /* Create an AGP memory structure pointing at our pages, and bind it
750 obj_priv->agp_mem = drm_agp_bind_pages(dev,
753 obj_priv->gtt_offset);
754 if (obj_priv->agp_mem == NULL) {
755 i915_gem_object_free_page_list(obj);
756 drm_memrange_put_block(obj_priv->gtt_space);
757 obj_priv->gtt_space = NULL;
761 /* Assert that the object is not currently in any GPU domain. As it
762 * wasn't in the GTT, there shouldn't be any way it could have been in
765 BUG_ON(obj->read_domains & ~DRM_GEM_DOMAIN_CPU);
766 BUG_ON(obj->write_domain & ~DRM_GEM_DOMAIN_CPU);
772 i915_gem_clflush_object(struct drm_gem_object *obj)
774 struct drm_i915_gem_object *obj_priv = obj->driver_private;
776 /* If we don't have a page list set up, then we're not pinned
777 * to GPU, and we can ignore the cache flush because it'll happen
778 * again at bind time.
780 if (obj_priv->page_list == NULL)
783 drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
787 * Set the next domain for the specified object. This
788 * may not actually perform the necessary flushing/invaliding though,
789 * as that may want to be batched with other set_domain operations
791 * This is (we hope) the only really tricky part of gem. The goal
792 * is fairly simple -- track which caches hold bits of the object
793 * and make sure they remain coherent. A few concrete examples may
794 * help to explain how it works. For shorthand, we use the notation
795 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
796 * a pair of read and write domain masks.
798 * Case 1: the batch buffer
804 * 5. Unmapped from GTT
807 * Let's take these a step at a time
810 * Pages allocated from the kernel may still have
811 * cache contents, so we set them to (CPU, CPU) always.
812 * 2. Written by CPU (using pwrite)
813 * The pwrite function calls set_domain (CPU, CPU) and
814 * this function does nothing (as nothing changes)
816 * This function asserts that the object is not
817 * currently in any GPU-based read or write domains
819 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
820 * As write_domain is zero, this function adds in the
821 * current read domains (CPU+COMMAND, 0).
822 * flush_domains is set to CPU.
823 * invalidate_domains is set to COMMAND
824 * clflush is run to get data out of the CPU caches
825 * then i915_dev_set_domain calls i915_gem_flush to
826 * emit an MI_FLUSH and drm_agp_chipset_flush
827 * 5. Unmapped from GTT
828 * i915_gem_object_unbind calls set_domain (CPU, CPU)
829 * flush_domains and invalidate_domains end up both zero
830 * so no flushing/invalidating happens
834 * Case 2: The shared render buffer
838 * 3. Read/written by GPU
839 * 4. set_domain to (CPU,CPU)
840 * 5. Read/written by CPU
841 * 6. Read/written by GPU
844 * Same as last example, (CPU, CPU)
846 * Nothing changes (assertions find that it is not in the GPU)
847 * 3. Read/written by GPU
848 * execbuffer calls set_domain (RENDER, RENDER)
849 * flush_domains gets CPU
850 * invalidate_domains gets GPU
852 * MI_FLUSH and drm_agp_chipset_flush
853 * 4. set_domain (CPU, CPU)
854 * flush_domains gets GPU
855 * invalidate_domains gets CPU
856 * wait_rendering (obj) to make sure all drawing is complete.
857 * This will include an MI_FLUSH to get the data from GPU
859 * clflush (obj) to invalidate the CPU cache
860 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
861 * 5. Read/written by CPU
862 * cache lines are loaded and dirtied
863 * 6. Read written by GPU
864 * Same as last GPU access
866 * Case 3: The constant buffer
871 * 4. Updated (written) by CPU again
880 * flush_domains = CPU
881 * invalidate_domains = RENDER
884 * drm_agp_chipset_flush
885 * 4. Updated (written) by CPU again
887 * flush_domains = 0 (no previous write domain)
888 * invalidate_domains = 0 (no new read domains)
891 * flush_domains = CPU
892 * invalidate_domains = RENDER
895 * drm_agp_chipset_flush
898 i915_gem_object_set_domain(struct drm_gem_object *obj,
899 uint32_t read_domains,
900 uint32_t write_domain)
902 struct drm_device *dev = obj->dev;
903 uint32_t invalidate_domains = 0;
904 uint32_t flush_domains = 0;
907 DRM_INFO("%s: object %p read %08x write %08x\n",
908 __func__, obj, read_domains, write_domain);
911 * If the object isn't moving to a new write domain,
912 * let the object stay in multiple read domains
914 if (write_domain == 0)
915 read_domains |= obj->read_domains;
918 * Flush the current write domain if
919 * the new read domains don't match. Invalidate
920 * any read domains which differ from the old
923 if (obj->write_domain && obj->write_domain != read_domains) {
924 flush_domains |= obj->write_domain;
925 invalidate_domains |= read_domains & ~obj->write_domain;
928 * Invalidate any read caches which may have
929 * stale data. That is, any new read domains.
931 invalidate_domains |= read_domains & ~obj->read_domains;
932 if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU) {
934 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
935 __func__, flush_domains, invalidate_domains);
938 * If we're invaliding the CPU cache and flushing a GPU cache,
939 * then pause for rendering so that the GPU caches will be
940 * flushed before the cpu cache is invalidated
942 if ((invalidate_domains & DRM_GEM_DOMAIN_CPU) &&
943 (flush_domains & ~DRM_GEM_DOMAIN_CPU))
944 i915_gem_object_wait_rendering(obj);
945 i915_gem_clflush_object(obj);
948 if ((write_domain | flush_domains) != 0)
949 obj->write_domain = write_domain;
950 obj->read_domains = read_domains;
951 dev->invalidate_domains |= invalidate_domains;
952 dev->flush_domains |= flush_domains;
956 * Once all of the objects have been set in the proper domain,
957 * perform the necessary flush and invalidate operations.
959 * Returns the write domains flushed, for use in flush tracking.
962 i915_gem_dev_set_domain(struct drm_device *dev)
964 uint32_t flush_domains = dev->flush_domains;
967 * Now that all the buffers are synced to the proper domains,
968 * flush and invalidate the collected domains
970 if (dev->invalidate_domains | dev->flush_domains) {
972 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
974 dev->invalidate_domains,
978 dev->invalidate_domains,
980 dev->invalidate_domains = 0;
981 dev->flush_domains = 0;
984 return flush_domains;
989 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
991 struct drm_device *dev = obj->dev;
992 struct drm_i915_gem_object *obj_priv = obj->driver_private;
994 uint32_t *gtt_mapping;
995 uint32_t *backing_map = NULL;
998 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
999 __FUNCTION__, obj, obj_priv->gtt_offset, handle,
1002 gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
1004 if (gtt_mapping == NULL) {
1005 DRM_ERROR("failed to map GTT space\n");
1009 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
1012 backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
1014 if (backing_map == NULL) {
1015 DRM_ERROR("failed to map backing page\n");
1019 for (i = 0; i < PAGE_SIZE / 4; i++) {
1020 uint32_t cpuval = backing_map[i];
1021 uint32_t gttval = readl(gtt_mapping +
1024 if (cpuval != gttval) {
1025 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
1026 "0x%08x vs 0x%08x\n",
1027 (int)(obj_priv->gtt_offset +
1028 page * PAGE_SIZE + i * 4),
1030 if (bad_count++ >= 8) {
1036 kunmap_atomic(backing_map, KM_USER0);
1041 if (backing_map != NULL)
1042 kunmap_atomic(backing_map, KM_USER0);
1043 iounmap(gtt_mapping);
1045 /* give syslog time to catch up */
1048 /* Directly flush the object, since we just loaded values with the CPU
1049 * from thebacking pages and we don't want to disturb the cache
1050 * management that we're trying to observe.
1053 i915_gem_clflush_object(obj);
1058 i915_gem_reloc_and_validate_object(struct drm_gem_object *obj,
1059 struct drm_file *file_priv,
1060 struct drm_i915_gem_exec_object *entry)
1062 struct drm_device *dev = obj->dev;
1063 struct drm_i915_gem_relocation_entry reloc;
1064 struct drm_i915_gem_relocation_entry __user *relocs;
1065 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1067 uint32_t last_reloc_offset = -1;
1068 void *reloc_page = NULL;
1070 /* Choose the GTT offset for our buffer and put it there. */
1071 if (obj_priv->gtt_space == NULL) {
1072 i915_gem_object_bind_to_gtt(obj, (unsigned) entry->alignment);
1073 if (obj_priv->gtt_space == NULL)
1077 entry->offset = obj_priv->gtt_offset;
1079 relocs = (struct drm_i915_gem_relocation_entry __user *)
1080 (uintptr_t) entry->relocs_ptr;
1081 /* Apply the relocations, using the GTT aperture to avoid cache
1082 * flushing requirements.
1084 for (i = 0; i < entry->relocation_count; i++) {
1085 struct drm_gem_object *target_obj;
1086 struct drm_i915_gem_object *target_obj_priv;
1087 uint32_t reloc_val, reloc_offset, *reloc_entry;
1090 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1094 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1095 reloc.target_handle);
1096 if (target_obj == NULL)
1098 target_obj_priv = target_obj->driver_private;
1100 /* The target buffer should have appeared before us in the
1101 * validate list, so it should have a GTT space bound by now.
1103 if (target_obj_priv->gtt_space == NULL) {
1104 DRM_ERROR("No GTT space found for object %d\n",
1105 reloc.target_handle);
1106 drm_gem_object_unreference(target_obj);
1110 if (reloc.offset > obj->size - 4) {
1111 DRM_ERROR("Relocation beyond object bounds: "
1112 "obj %p target %d offset %d size %d.\n",
1113 obj, reloc.target_handle,
1114 (int) reloc.offset, (int) obj->size);
1115 drm_gem_object_unreference(target_obj);
1118 if (reloc.offset & 3) {
1119 DRM_ERROR("Relocation not 4-byte aligned: "
1120 "obj %p target %d offset %d.\n",
1121 obj, reloc.target_handle,
1122 (int) reloc.offset);
1123 drm_gem_object_unreference(target_obj);
1127 if (reloc.write_domain && target_obj->pending_write_domain &&
1128 reloc.write_domain != target_obj->pending_write_domain) {
1129 DRM_ERROR("Write domain conflict: "
1130 "obj %p target %d offset %d "
1131 "new %08x old %08x\n",
1132 obj, reloc.target_handle,
1135 target_obj->pending_write_domain);
1136 drm_gem_object_unreference(target_obj);
1141 DRM_INFO("%s: obj %p offset %08x target %d "
1142 "read %08x write %08x gtt %08x "
1143 "presumed %08x delta %08x\n",
1147 (int) reloc.target_handle,
1148 (int) reloc.read_domains,
1149 (int) reloc.write_domain,
1150 (int) target_obj_priv->gtt_offset,
1151 (int) reloc.presumed_offset,
1155 target_obj->pending_read_domains |= reloc.read_domains;
1156 target_obj->pending_write_domain |= reloc.write_domain;
1158 /* If the relocation already has the right value in it, no
1159 * more work needs to be done.
1161 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1162 drm_gem_object_unreference(target_obj);
1166 /* Now that we're going to actually write some data in,
1167 * make sure that any rendering using this buffer's contents
1170 i915_gem_object_wait_rendering(obj);
1172 /* As we're writing through the gtt, flush
1173 * any CPU writes before we write the relocations
1175 if (obj->write_domain & DRM_GEM_DOMAIN_CPU) {
1176 i915_gem_clflush_object(obj);
1177 drm_agp_chipset_flush(dev);
1178 obj->write_domain = 0;
1181 /* Map the page containing the relocation we're going to
1184 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1185 if (reloc_page == NULL ||
1186 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1187 (reloc_offset & ~(PAGE_SIZE - 1))) {
1188 if (reloc_page != NULL)
1189 iounmap(reloc_page);
1191 reloc_page = ioremap(dev->agp->base +
1192 (reloc_offset & ~(PAGE_SIZE - 1)),
1194 last_reloc_offset = reloc_offset;
1195 if (reloc_page == NULL) {
1196 drm_gem_object_unreference(target_obj);
1201 reloc_entry = (uint32_t *)((char *)reloc_page +
1202 (reloc_offset & (PAGE_SIZE - 1)));
1203 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1206 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1207 obj, (unsigned int) reloc.offset,
1208 readl(reloc_entry), reloc_val);
1210 writel(reloc_val, reloc_entry);
1212 /* Write the updated presumed offset for this entry back out
1215 reloc.presumed_offset = target_obj_priv->gtt_offset;
1216 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1218 drm_gem_object_unreference(target_obj);
1222 drm_gem_object_unreference(target_obj);
1225 if (reloc_page != NULL)
1226 iounmap(reloc_page);
1230 i915_gem_dump_object(obj, 128, __func__, ~0);
1236 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1237 struct drm_i915_gem_execbuffer *exec,
1238 uint64_t exec_offset)
1240 drm_i915_private_t *dev_priv = dev->dev_private;
1241 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1242 (uintptr_t) exec->cliprects_ptr;
1243 int nbox = exec->num_cliprects;
1245 uint32_t exec_start, exec_len;
1248 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1249 exec_len = (uint32_t) exec->batch_len;
1251 if ((exec_start | exec_len) & 0x7) {
1252 DRM_ERROR("alignment\n");
1259 count = nbox ? nbox : 1;
1261 for (i = 0; i < count; i++) {
1263 int ret = i915_emit_box(dev, boxes, i,
1264 exec->DR1, exec->DR4);
1269 if (dev_priv->use_mi_batchbuffer_start) {
1271 if (IS_I965G(dev)) {
1272 OUT_RING(MI_BATCH_BUFFER_START |
1274 MI_BATCH_NON_SECURE_I965);
1275 OUT_RING(exec_start);
1277 OUT_RING(MI_BATCH_BUFFER_START |
1279 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1285 OUT_RING(MI_BATCH_BUFFER);
1286 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1287 OUT_RING(exec_start + exec_len - 4);
1293 /* XXX breadcrumb */
1297 /* Throttle our rendering by waiting until the ring has completed our requests
1298 * emitted over 20 msec ago.
1300 * This should get us reasonable parallelism between CPU and GPU but also
1301 * relatively low latency when blocking on a particular request to finish.
1304 i915_gem_ring_throttle(struct drm_device *dev)
1306 drm_i915_private_t *dev_priv = dev->dev_private;
1309 mutex_lock(&dev->struct_mutex);
1310 while (!list_empty(&dev_priv->mm.request_list)) {
1311 struct drm_i915_gem_request *request;
1313 request = list_first_entry(&dev_priv->mm.request_list,
1314 struct drm_i915_gem_request,
1317 /* Break out if we're close enough. */
1318 if ((long) (jiffies - request->emitted_jiffies) <= (20 * HZ) / 1000) {
1319 mutex_unlock(&dev->struct_mutex);
1323 /* Wait on the last request if not. */
1324 ret = i915_wait_request(dev, request->seqno);
1326 mutex_unlock(&dev->struct_mutex);
1330 mutex_unlock(&dev->struct_mutex);
1335 i915_gem_execbuffer(struct drm_device *dev, void *data,
1336 struct drm_file *file_priv)
1338 struct drm_i915_gem_execbuffer *args = data;
1339 struct drm_i915_gem_exec_object *validate_list = NULL;
1340 struct drm_gem_object **object_list = NULL;
1341 struct drm_gem_object *batch_obj;
1343 uint64_t exec_offset;
1344 uint32_t seqno, flush_domains;
1346 LOCK_TEST_WITH_RETURN(dev, file_priv);
1349 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1350 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1352 i915_kernel_lost_context(dev);
1354 ret = i915_gem_ring_throttle(dev);
1358 /* Copy in the validate list from userland */
1359 validate_list = drm_calloc(sizeof(*validate_list), args->buffer_count,
1361 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1363 if (validate_list == NULL || object_list == NULL) {
1364 DRM_ERROR("Failed to allocate validate or object list "
1366 args->buffer_count);
1370 ret = copy_from_user(validate_list,
1371 (struct drm_i915_relocation_entry __user *)
1372 (uintptr_t) args->buffers_ptr,
1373 sizeof(*validate_list) * args->buffer_count);
1375 DRM_ERROR("copy %d validate entries failed %d\n",
1376 args->buffer_count, ret);
1380 mutex_lock(&dev->struct_mutex);
1381 /* Look up object handles and perform the relocations */
1382 for (i = 0; i < args->buffer_count; i++) {
1383 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1384 validate_list[i].handle);
1385 if (object_list[i] == NULL) {
1386 DRM_ERROR("Invalid object handle %d at index %d\n",
1387 validate_list[i].handle, i);
1392 ret = i915_gem_reloc_and_validate_object(object_list[i],
1396 DRM_ERROR("reloc and validate failed %d\n", ret);
1401 /* Set the pending read domains for the batch buffer to COMMAND */
1402 batch_obj = object_list[args->buffer_count-1];
1403 batch_obj->pending_read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
1404 batch_obj->pending_write_domain = 0;
1406 for (i = 0; i < args->buffer_count; i++) {
1407 struct drm_gem_object *obj = object_list[i];
1408 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1410 if (obj_priv->gtt_space == NULL) {
1411 /* We evicted the buffer in the process of validating
1412 * our set of buffers in. We could try to recover by
1413 * kicking them everything out and trying again from
1420 /* make sure all previous memory operations have passed */
1421 i915_gem_object_set_domain(obj,
1422 obj->pending_read_domains,
1423 obj->pending_write_domain);
1424 obj->pending_read_domains = 0;
1425 obj->pending_write_domain = 0;
1428 /* Flush/invalidate caches and chipset buffer */
1429 flush_domains = i915_gem_dev_set_domain(dev);
1432 for (i = 0; i < args->buffer_count; i++) {
1433 i915_gem_object_check_coherency(object_list[i],
1434 validate_list[i].handle);
1438 exec_offset = validate_list[args->buffer_count - 1].offset;
1441 i915_gem_dump_object(object_list[args->buffer_count - 1],
1447 /* Exec the batchbuffer */
1448 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1450 DRM_ERROR("dispatch failed %d\n", ret);
1455 * Ensure that the commands in the batch buffer are
1456 * finished before the interrupt fires
1458 flush_domains |= i915_retire_commands(dev);
1461 * Get a seqno representing the execution of the current buffer,
1462 * which we can wait on. We would like to mitigate these interrupts,
1463 * likely by only creating seqnos occasionally (so that we have
1464 * *some* interrupts representing completion of buffers that we can
1465 * wait on when trying to clear up gtt space).
1467 seqno = i915_add_request(dev, flush_domains);
1469 for (i = 0; i < args->buffer_count; i++) {
1470 struct drm_gem_object *obj = object_list[i];
1471 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1473 i915_gem_object_move_to_active(obj);
1474 obj_priv->last_rendering_seqno = seqno;
1476 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1480 i915_dump_lru(dev, __func__);
1483 /* Copy the new buffer offsets back to the user's validate list. */
1484 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1485 (uintptr_t) args->buffers_ptr,
1487 sizeof(*validate_list) * args->buffer_count);
1489 DRM_ERROR("failed to copy %d validate entries "
1490 "back to user (%d)\n",
1491 args->buffer_count, ret);
1493 if (object_list != NULL) {
1494 for (i = 0; i < args->buffer_count; i++)
1495 drm_gem_object_unreference(object_list[i]);
1497 mutex_unlock(&dev->struct_mutex);
1499 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1501 drm_free(validate_list, sizeof(*validate_list) * args->buffer_count,
1508 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1509 struct drm_file *file_priv)
1511 struct drm_i915_gem_pin *args = data;
1512 struct drm_gem_object *obj;
1513 struct drm_i915_gem_object *obj_priv;
1516 mutex_lock(&dev->struct_mutex);
1518 i915_kernel_lost_context(dev);
1519 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1521 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
1523 mutex_unlock(&dev->struct_mutex);
1527 obj_priv = obj->driver_private;
1528 if (obj_priv->gtt_space == NULL) {
1529 ret = i915_gem_object_bind_to_gtt(obj,
1530 (unsigned) args->alignment);
1532 DRM_ERROR("Failure to bind in "
1533 "i915_gem_pin_ioctl(): %d\n",
1535 drm_gem_object_unreference(obj);
1536 mutex_unlock(&dev->struct_mutex);
1541 obj_priv->pin_count++;
1542 args->offset = obj_priv->gtt_offset;
1543 drm_gem_object_unreference(obj);
1544 mutex_unlock(&dev->struct_mutex);
1550 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1551 struct drm_file *file_priv)
1553 struct drm_i915_gem_pin *args = data;
1554 struct drm_gem_object *obj;
1555 struct drm_i915_gem_object *obj_priv;
1557 mutex_lock(&dev->struct_mutex);
1559 i915_kernel_lost_context(dev);
1560 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1562 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
1564 mutex_unlock(&dev->struct_mutex);
1568 obj_priv = obj->driver_private;
1569 obj_priv->pin_count--;
1570 drm_gem_object_unreference(obj);
1571 mutex_unlock(&dev->struct_mutex);
1576 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1577 struct drm_file *file_priv)
1579 struct drm_i915_gem_busy *args = data;
1580 struct drm_gem_object *obj;
1581 struct drm_i915_gem_object *obj_priv;
1583 mutex_lock(&dev->struct_mutex);
1584 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1586 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
1588 mutex_unlock(&dev->struct_mutex);
1592 obj_priv = obj->driver_private;
1593 args->busy = obj_priv->active;
1595 drm_gem_object_unreference(obj);
1596 mutex_unlock(&dev->struct_mutex);
1600 int i915_gem_init_object(struct drm_gem_object *obj)
1602 struct drm_i915_gem_object *obj_priv;
1604 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
1605 if (obj_priv == NULL)
1608 obj->driver_private = obj_priv;
1609 obj_priv->obj = obj;
1610 INIT_LIST_HEAD(&obj_priv->list);
1614 void i915_gem_free_object(struct drm_gem_object *obj)
1616 i915_kernel_lost_context(obj->dev);
1617 i915_gem_object_unbind(obj);
1619 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
1623 i915_gem_set_domain(struct drm_gem_object *obj,
1624 struct drm_file *file_priv,
1625 uint32_t read_domains,
1626 uint32_t write_domain)
1628 struct drm_device *dev = obj->dev;
1630 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1632 drm_client_lock_take(dev, file_priv);
1633 i915_kernel_lost_context(dev);
1634 i915_gem_object_set_domain(obj, read_domains, write_domain);
1635 i915_gem_dev_set_domain(obj->dev);
1636 drm_client_lock_release(dev);
1642 i915_gem_flush_pwrite(struct drm_gem_object *obj,
1643 uint64_t offset, uint64_t size)
1646 struct drm_device *dev = obj->dev;
1647 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1650 * For writes much less than the size of the object and
1651 * which are already pinned in memory, do the flush right now
1654 if ((size < obj->size >> 1) && obj_priv->page_list != NULL) {
1655 unsigned long first_page = offset / PAGE_SIZE;
1656 unsigned long beyond_page = roundup(offset + size, PAGE_SIZE) / PAGE_SIZE;
1658 drm_ttm_cache_flush(obj_priv->page_list + first_page,
1659 beyond_page - first_page);
1660 drm_agp_chipset_flush(dev);
1661 obj->write_domain = 0;
1668 i915_gem_lastclose(struct drm_device *dev)
1670 drm_i915_private_t *dev_priv = dev->dev_private;
1672 mutex_lock(&dev->struct_mutex);
1674 /* Assume that the chip has been idled at this point. Just pull them
1675 * off the execution list and unref them. Since this is the last
1676 * close, this is also the last ref and they'll go away.
1679 while (!list_empty(&dev_priv->mm.active_list)) {
1680 struct drm_i915_gem_object *obj_priv;
1682 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1683 struct drm_i915_gem_object,
1686 list_del_init(&obj_priv->list);
1687 obj_priv->active = 0;
1688 obj_priv->obj->write_domain = 0;
1689 drm_gem_object_unreference(obj_priv->obj);
1692 mutex_unlock(&dev->struct_mutex);