OSDN Git Service

android: set LOCAL_MODULE_TAGS to optional
[android-x86/external-libdrm.git] / intel / intel_bufmgr_fake.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  * 
26  **************************************************************************/
27
28 /* Originally a fake version of the buffer manager so that we can
29  * prototype the changes in a driver fairly quickly, has been fleshed
30  * out to a fully functional interim solution.
31  *
32  * Basically wraps the old style memory management in the new
33  * programming interface, but is more expressive and avoids many of
34  * the bugs in the old texture manager.
35  */
36
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40
41 #include <stdlib.h>
42 #include <string.h>
43 #include <assert.h>
44 #include <errno.h>
45 #include <xf86drm.h>
46 #include <pthread.h>
47 #include "intel_bufmgr.h"
48 #include "intel_bufmgr_priv.h"
49 #include "drm.h"
50 #include "i915_drm.h"
51 #include "mm.h"
52 #include "libdrm_lists.h"
53
54 /* Support gcc's __FUNCTION__ for people using other compilers */
55 #if !defined(__GNUC__) && !defined(__FUNCTION__)
56 # define __FUNCTION__ __func__ /* C99 */
57 #endif
58
59 #define DBG(...) do {                                   \
60         if (bufmgr_fake->bufmgr.debug)                  \
61                 drmMsg(__VA_ARGS__);                    \
62 } while (0)
63
64 /* Internal flags:
65  */
66 #define BM_NO_BACKING_STORE                     0x00000001
67 #define BM_NO_FENCE_SUBDATA                     0x00000002
68 #define BM_PINNED                               0x00000004
69
70 /* Wrapper around mm.c's mem_block, which understands that you must
71  * wait for fences to expire before memory can be freed.  This is
72  * specific to our use of memcpy for uploads - an upload that was
73  * processed through the command queue wouldn't need to care about
74  * fences.
75  */
76 #define MAX_RELOCS 4096
77
78 struct fake_buffer_reloc {
79         /** Buffer object that the relocation points at. */
80         drm_intel_bo *target_buf;
81         /** Offset of the relocation entry within reloc_buf. */
82         uint32_t offset;
83         /**
84          * Cached value of the offset when we last performed this relocation.
85          */
86         uint32_t last_target_offset;
87         /** Value added to target_buf's offset to get the relocation entry. */
88         uint32_t delta;
89         /** Cache domains the target buffer is read into. */
90         uint32_t read_domains;
91         /** Cache domain the target buffer will have dirty cachelines in. */
92         uint32_t write_domain;
93 };
94
95 struct block {
96         struct block *next, *prev;
97         struct mem_block *mem;  /* BM_MEM_AGP */
98
99         /**
100          * Marks that the block is currently in the aperture and has yet to be
101          * fenced.
102          */
103         unsigned on_hardware:1;
104         /**
105          * Marks that the block is currently fenced (being used by rendering)
106          * and can't be freed until @fence is passed.
107          */
108         unsigned fenced:1;
109
110         /** Fence cookie for the block. */
111         unsigned fence;         /* Split to read_fence, write_fence */
112
113         drm_intel_bo *bo;
114         void *virtual;
115 };
116
117 typedef struct _bufmgr_fake {
118         drm_intel_bufmgr bufmgr;
119
120         pthread_mutex_t lock;
121
122         unsigned long low_offset;
123         unsigned long size;
124         void *virtual;
125
126         struct mem_block *heap;
127
128         unsigned buf_nr;        /* for generating ids */
129
130         /**
131          * List of blocks which are currently in the GART but haven't been
132          * fenced yet.
133          */
134         struct block on_hardware;
135         /**
136          * List of blocks which are in the GART and have an active fence on
137          * them.
138          */
139         struct block fenced;
140         /**
141          * List of blocks which have an expired fence and are ready to be
142          * evicted.
143          */
144         struct block lru;
145
146         unsigned int last_fence;
147
148         unsigned fail:1;
149         unsigned need_fence:1;
150         int thrashing;
151
152         /**
153          * Driver callback to emit a fence, returning the cookie.
154          *
155          * This allows the driver to hook in a replacement for the DRM usage in
156          * bufmgr_fake.
157          *
158          * Currently, this also requires that a write flush be emitted before
159          * emitting the fence, but this should change.
160          */
161         unsigned int (*fence_emit) (void *private);
162         /** Driver callback to wait for a fence cookie to have passed. */
163         void (*fence_wait) (unsigned int fence, void *private);
164         void *fence_priv;
165
166         /**
167          * Driver callback to execute a buffer.
168          *
169          * This allows the driver to hook in a replacement for the DRM usage in
170          * bufmgr_fake.
171          */
172         int (*exec) (drm_intel_bo *bo, unsigned int used, void *priv);
173         void *exec_priv;
174
175         /** Driver-supplied argument to driver callbacks */
176         void *driver_priv;
177         /**
178          * Pointer to kernel-updated sarea data for the last completed user irq
179          */
180         volatile int *last_dispatch;
181
182         int fd;
183
184         int debug;
185
186         int performed_rendering;
187 } drm_intel_bufmgr_fake;
188
189 typedef struct _drm_intel_bo_fake {
190         drm_intel_bo bo;
191
192         unsigned id;            /* debug only */
193         const char *name;
194
195         unsigned dirty:1;
196         /**
197          * has the card written to this buffer - we make need to copy it back
198          */
199         unsigned card_dirty:1;
200         unsigned int refcount;
201         /* Flags may consist of any of the DRM_BO flags, plus
202          * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
203          * first two driver private flags.
204          */
205         uint64_t flags;
206         /** Cache domains the target buffer is read into. */
207         uint32_t read_domains;
208         /** Cache domain the target buffer will have dirty cachelines in. */
209         uint32_t write_domain;
210
211         unsigned int alignment;
212         int is_static, validated;
213         unsigned int map_count;
214
215         /** relocation list */
216         struct fake_buffer_reloc *relocs;
217         int nr_relocs;
218         /**
219          * Total size of the target_bos of this buffer.
220          *
221          * Used for estimation in check_aperture.
222          */
223         unsigned int child_size;
224
225         struct block *block;
226         void *backing_store;
227         void (*invalidate_cb) (drm_intel_bo *bo, void *ptr);
228         void *invalidate_ptr;
229 } drm_intel_bo_fake;
230
231 static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
232                         unsigned int fence_cookie);
233
234 #define MAXFENCE 0x7fffffff
235
236 static int
237 FENCE_LTE(unsigned a, unsigned b)
238 {
239         if (a == b)
240                 return 1;
241
242         if (a < b && b - a < (1 << 24))
243                 return 1;
244
245         if (a > b && MAXFENCE - a + b < (1 << 24))
246                 return 1;
247
248         return 0;
249 }
250
251 void
252 drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
253                                          unsigned int (*emit) (void *priv),
254                                          void (*wait) (unsigned int fence,
255                                                        void *priv),
256                                          void *priv)
257 {
258         drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
259
260         bufmgr_fake->fence_emit = emit;
261         bufmgr_fake->fence_wait = wait;
262         bufmgr_fake->fence_priv = priv;
263 }
264
265 static unsigned int
266 _fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
267 {
268         struct drm_i915_irq_emit ie;
269         int ret, seq = 1;
270
271         if (bufmgr_fake->fence_emit != NULL) {
272                 seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
273                 return seq;
274         }
275
276         ie.irq_seq = &seq;
277         ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
278                                   &ie, sizeof(ie));
279         if (ret) {
280                 drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__, ret);
281                 abort();
282         }
283
284         DBG("emit 0x%08x\n", seq);
285         return seq;
286 }
287
288 static void
289 _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
290 {
291         struct drm_i915_irq_wait iw;
292         int hw_seq, busy_count = 0;
293         int ret;
294         int kernel_lied;
295
296         if (bufmgr_fake->fence_wait != NULL) {
297                 bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
298                 clear_fenced(bufmgr_fake, seq);
299                 return;
300         }
301
302         iw.irq_seq = seq;
303
304         DBG("wait 0x%08x\n", iw.irq_seq);
305
306         /* The kernel IRQ_WAIT implementation is all sorts of broken.
307          * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
308          *    unsigned range.
309          * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
310          *    signed range.
311          * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
312          *    signed range.
313          * 4) It returns -EBUSY in 3 seconds even if the hardware is still
314          *    successfully chewing through buffers.
315          *
316          * Assume that in userland we treat sequence numbers as ints, which
317          * makes some of the comparisons convenient, since the sequence
318          * numbers are all postive signed integers.
319          *
320          * From this we get several cases we need to handle.  Here's a timeline.
321          * 0x2   0x7                                    0x7ffffff8   0x7ffffffd
322          *   |    |                                             |    |
323          * ------------------------------------------------------------
324          *
325          * A) Normal wait for hw to catch up
326          * hw_seq seq
327          *   |    |
328          * ------------------------------------------------------------
329          * seq - hw_seq = 5.  If we call IRQ_WAIT, it will wait for hw to
330          * catch up.
331          *
332          * B) Normal wait for a sequence number that's already passed.
333          * seq    hw_seq
334          *   |    |
335          * ------------------------------------------------------------
336          * seq - hw_seq = -5.  If we call IRQ_WAIT, it returns 0 quickly.
337          *
338          * C) Hardware has already wrapped around ahead of us
339          * hw_seq                                                    seq
340          *   |                                                       |
341          * ------------------------------------------------------------
342          * seq - hw_seq = 0x80000000 - 5.  If we called IRQ_WAIT, it would wait
343          * for hw_seq >= seq, which may never occur.  Thus, we want to catch
344          * this in userland and return 0.
345          *
346          * D) We've wrapped around ahead of the hardware.
347          * seq                                                      hw_seq
348          *   |                                                       |
349          * ------------------------------------------------------------
350          * seq - hw_seq = -(0x80000000 - 5).  If we called IRQ_WAIT, it would
351          * return 0 quickly because hw_seq >= seq, even though the hardware
352          * isn't caught up. Thus, we need to catch this early return in
353          * userland and bother the kernel until the hardware really does
354          * catch up.
355          *
356          * E) Hardware might wrap after we test in userland.
357          *                                                  hw_seq  seq
358          *                                                      |    |
359          * ------------------------------------------------------------
360          * seq - hw_seq = 5.  If we call IRQ_WAIT, it will likely see seq >=
361          * hw_seq and wait.  However, suppose hw_seq wraps before we make it
362          * into the kernel.  The kernel sees hw_seq >= seq and waits for 3
363          * seconds then returns -EBUSY.  This is case C).  We should catch
364          * this and then return successfully.
365          *
366          * F) Hardware might take a long time on a buffer.
367          * hw_seq seq
368          *   |    |
369          * -------------------------------------------------------------------
370          * seq - hw_seq = 5.  If we call IRQ_WAIT, if sequence 2 through 5
371          * take too long, it will return -EBUSY.  Batchbuffers in the
372          * gltestperf demo were seen to take up to 7 seconds.  We should
373          * catch early -EBUSY return and keep trying.
374          */
375
376         do {
377                 /* Keep a copy of last_dispatch so that if the wait -EBUSYs
378                  * because the hardware didn't catch up in 3 seconds, we can
379                  * see if it at least made progress and retry.
380                  */
381                 hw_seq = *bufmgr_fake->last_dispatch;
382
383                 /* Catch case C */
384                 if (seq - hw_seq > 0x40000000)
385                         return;
386
387                 ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
388                                       &iw, sizeof(iw));
389                 /* Catch case D */
390                 kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
391                                              -0x40000000);
392
393                 /* Catch case E */
394                 if (ret == -EBUSY
395                     && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
396                         ret = 0;
397
398                 /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
399                 if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
400                         busy_count = 0;
401                 else
402                         busy_count++;
403         } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
404                  (ret == -EBUSY && busy_count < 5));
405
406         if (ret != 0) {
407                 drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
408                        __LINE__, strerror(-ret));
409                 abort();
410         }
411         clear_fenced(bufmgr_fake, seq);
412 }
413
414 static int
415 _fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
416 {
417         /* Slight problem with wrap-around:
418          */
419         return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
420 }
421
422 /**
423  * Allocate a memory manager block for the buffer.
424  */
425 static int
426 alloc_block(drm_intel_bo *bo)
427 {
428         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
429         drm_intel_bufmgr_fake *bufmgr_fake =
430             (drm_intel_bufmgr_fake *) bo->bufmgr;
431         struct block *block = (struct block *)calloc(sizeof *block, 1);
432         unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
433         unsigned int sz;
434
435         if (!block)
436                 return 1;
437
438         sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
439
440         block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
441         if (!block->mem) {
442                 free(block);
443                 return 0;
444         }
445
446         DRMINITLISTHEAD(block);
447
448         /* Insert at head or at tail??? */
449         DRMLISTADDTAIL(block, &bufmgr_fake->lru);
450
451         block->virtual = (uint8_t *) bufmgr_fake->virtual +
452             block->mem->ofs - bufmgr_fake->low_offset;
453         block->bo = bo;
454
455         bo_fake->block = block;
456
457         return 1;
458 }
459
460 /* Release the card storage associated with buf:
461  */
462 static void
463 free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
464            int skip_dirty_copy)
465 {
466         drm_intel_bo_fake *bo_fake;
467         DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
468             block->on_hardware, block->fenced);
469
470         if (!block)
471                 return;
472
473         bo_fake = (drm_intel_bo_fake *) block->bo;
474
475         if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
476                 skip_dirty_copy = 1;
477
478         if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
479                 memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
480                 bo_fake->card_dirty = 0;
481                 bo_fake->dirty = 1;
482         }
483
484         if (block->on_hardware) {
485                 block->bo = NULL;
486         } else if (block->fenced) {
487                 block->bo = NULL;
488         } else {
489                 DBG("    - free immediately\n");
490                 DRMLISTDEL(block);
491
492                 mmFreeMem(block->mem);
493                 free(block);
494         }
495 }
496
497 static void
498 alloc_backing_store(drm_intel_bo *bo)
499 {
500         drm_intel_bufmgr_fake *bufmgr_fake =
501             (drm_intel_bufmgr_fake *) bo->bufmgr;
502         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
503         assert(!bo_fake->backing_store);
504         assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
505
506         bo_fake->backing_store = malloc(bo->size);
507
508         DBG("alloc_backing - buf %d %p %d\n", bo_fake->id,
509             bo_fake->backing_store, bo->size);
510         assert(bo_fake->backing_store);
511 }
512
513 static void
514 free_backing_store(drm_intel_bo *bo)
515 {
516         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
517
518         if (bo_fake->backing_store) {
519                 assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
520                 free(bo_fake->backing_store);
521                 bo_fake->backing_store = NULL;
522         }
523 }
524
525 static void
526 set_dirty(drm_intel_bo *bo)
527 {
528         drm_intel_bufmgr_fake *bufmgr_fake =
529             (drm_intel_bufmgr_fake *) bo->bufmgr;
530         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
531
532         if (bo_fake->flags & BM_NO_BACKING_STORE
533             && bo_fake->invalidate_cb != NULL)
534                 bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
535
536         assert(!(bo_fake->flags & BM_PINNED));
537
538         DBG("set_dirty - buf %d\n", bo_fake->id);
539         bo_fake->dirty = 1;
540 }
541
542 static int
543 evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
544 {
545         struct block *block, *tmp;
546
547         DBG("%s\n", __FUNCTION__);
548
549         DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
550                 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
551
552                 if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
553                         continue;
554
555                 if (block->fence && max_fence && !FENCE_LTE(block->fence,
556                                                             max_fence))
557                         return 0;
558
559                 set_dirty(&bo_fake->bo);
560                 bo_fake->block = NULL;
561
562                 free_block(bufmgr_fake, block, 0);
563                 return 1;
564         }
565
566         return 0;
567 }
568
569 static int
570 evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
571 {
572         struct block *block, *tmp;
573
574         DBG("%s\n", __FUNCTION__);
575
576         DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
577                 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
578
579                 if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
580                         continue;
581
582                 set_dirty(&bo_fake->bo);
583                 bo_fake->block = NULL;
584
585                 free_block(bufmgr_fake, block, 0);
586                 return 1;
587         }
588
589         return 0;
590 }
591
592 /**
593  * Removes all objects from the fenced list older than the given fence.
594  */
595 static int
596 clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
597 {
598         struct block *block, *tmp;
599         int ret = 0;
600
601         bufmgr_fake->last_fence = fence_cookie;
602         DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
603                 assert(block->fenced);
604
605                 if (_fence_test(bufmgr_fake, block->fence)) {
606
607                         block->fenced = 0;
608
609                         if (!block->bo) {
610                                 DBG("delayed free: offset %x sz %x\n",
611                                     block->mem->ofs, block->mem->size);
612                                 DRMLISTDEL(block);
613                                 mmFreeMem(block->mem);
614                                 free(block);
615                         } else {
616                                 DBG("return to lru: offset %x sz %x\n",
617                                     block->mem->ofs, block->mem->size);
618                                 DRMLISTDEL(block);
619                                 DRMLISTADDTAIL(block, &bufmgr_fake->lru);
620                         }
621
622                         ret = 1;
623                 } else {
624                         /* Blocks are ordered by fence, so if one fails, all
625                          * from here will fail also:
626                          */
627                         DBG("fence not passed: offset %x sz %x %d %d \n",
628                             block->mem->ofs, block->mem->size, block->fence,
629                             bufmgr_fake->last_fence);
630                         break;
631                 }
632         }
633
634         DBG("%s: %d\n", __FUNCTION__, ret);
635         return ret;
636 }
637
638 static void
639 fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
640 {
641         struct block *block, *tmp;
642
643         DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
644                 DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
645                     block, block->mem->size, block->mem->ofs, block->bo, fence);
646                 block->fence = fence;
647
648                 block->on_hardware = 0;
649                 block->fenced = 1;
650
651                 /* Move to tail of pending list here
652                  */
653                 DRMLISTDEL(block);
654                 DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
655         }
656
657         assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
658 }
659
660 static int
661 evict_and_alloc_block(drm_intel_bo *bo)
662 {
663         drm_intel_bufmgr_fake *bufmgr_fake =
664             (drm_intel_bufmgr_fake *) bo->bufmgr;
665         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
666
667         assert(bo_fake->block == NULL);
668
669         /* Search for already free memory:
670          */
671         if (alloc_block(bo))
672                 return 1;
673
674         /* If we're not thrashing, allow lru eviction to dig deeper into
675          * recently used textures.  We'll probably be thrashing soon:
676          */
677         if (!bufmgr_fake->thrashing) {
678                 while (evict_lru(bufmgr_fake, 0))
679                         if (alloc_block(bo))
680                                 return 1;
681         }
682
683         /* Keep thrashing counter alive?
684          */
685         if (bufmgr_fake->thrashing)
686                 bufmgr_fake->thrashing = 20;
687
688         /* Wait on any already pending fences - here we are waiting for any
689          * freed memory that has been submitted to hardware and fenced to
690          * become available:
691          */
692         while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
693                 uint32_t fence = bufmgr_fake->fenced.next->fence;
694                 _fence_wait_internal(bufmgr_fake, fence);
695
696                 if (alloc_block(bo))
697                         return 1;
698         }
699
700         if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
701                 while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
702                         uint32_t fence = bufmgr_fake->fenced.next->fence;
703                         _fence_wait_internal(bufmgr_fake, fence);
704                 }
705
706                 if (!bufmgr_fake->thrashing) {
707                         DBG("thrashing\n");
708                 }
709                 bufmgr_fake->thrashing = 20;
710
711                 if (alloc_block(bo))
712                         return 1;
713         }
714
715         while (evict_mru(bufmgr_fake))
716                 if (alloc_block(bo))
717                         return 1;
718
719         DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
720
721         return 0;
722 }
723
724 /***********************************************************************
725  * Public functions
726  */
727
728 /**
729  * Wait for hardware idle by emitting a fence and waiting for it.
730  */
731 static void
732 drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
733 {
734         unsigned int cookie;
735
736         cookie = _fence_emit_internal(bufmgr_fake);
737         _fence_wait_internal(bufmgr_fake, cookie);
738 }
739
740 /**
741  * Wait for rendering to a buffer to complete.
742  *
743  * It is assumed that the bathcbuffer which performed the rendering included
744  * the necessary flushing.
745  */
746 static void
747 drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
748 {
749         drm_intel_bufmgr_fake *bufmgr_fake =
750             (drm_intel_bufmgr_fake *) bo->bufmgr;
751         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
752
753         if (bo_fake->block == NULL || !bo_fake->block->fenced)
754                 return;
755
756         _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
757 }
758
759 static void
760 drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
761 {
762         drm_intel_bufmgr_fake *bufmgr_fake =
763             (drm_intel_bufmgr_fake *) bo->bufmgr;
764
765         pthread_mutex_lock(&bufmgr_fake->lock);
766         drm_intel_fake_bo_wait_rendering_locked(bo);
767         pthread_mutex_unlock(&bufmgr_fake->lock);
768 }
769
770 /* Specifically ignore texture memory sharing.
771  *  -- just evict everything
772  *  -- and wait for idle
773  */
774 void
775 drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
776 {
777         drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
778         struct block *block, *tmp;
779
780         pthread_mutex_lock(&bufmgr_fake->lock);
781
782         bufmgr_fake->need_fence = 1;
783         bufmgr_fake->fail = 0;
784
785         /* Wait for hardware idle.  We don't know where acceleration has been
786          * happening, so we'll need to wait anyway before letting anything get
787          * put on the card again.
788          */
789         drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
790
791         /* Check that we hadn't released the lock without having fenced the last
792          * set of buffers.
793          */
794         assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
795         assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
796
797         DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
798                 assert(_fence_test(bufmgr_fake, block->fence));
799                 set_dirty(block->bo);
800         }
801
802         pthread_mutex_unlock(&bufmgr_fake->lock);
803 }
804
805 static drm_intel_bo *
806 drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
807                         const char *name,
808                         unsigned long size,
809                         unsigned int alignment)
810 {
811         drm_intel_bufmgr_fake *bufmgr_fake;
812         drm_intel_bo_fake *bo_fake;
813
814         bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
815
816         assert(size != 0);
817
818         bo_fake = calloc(1, sizeof(*bo_fake));
819         if (!bo_fake)
820                 return NULL;
821
822         bo_fake->bo.size = size;
823         bo_fake->bo.offset = -1;
824         bo_fake->bo.virtual = NULL;
825         bo_fake->bo.bufmgr = bufmgr;
826         bo_fake->refcount = 1;
827
828         /* Alignment must be a power of two */
829         assert((alignment & (alignment - 1)) == 0);
830         if (alignment == 0)
831                 alignment = 1;
832         bo_fake->alignment = alignment;
833         bo_fake->id = ++bufmgr_fake->buf_nr;
834         bo_fake->name = name;
835         bo_fake->flags = 0;
836         bo_fake->is_static = 0;
837
838         DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
839             bo_fake->bo.size / 1024);
840
841         return &bo_fake->bo;
842 }
843
844 static drm_intel_bo *
845 drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
846                               const char *name,
847                               int x, int y, int cpp,
848                               uint32_t *tiling_mode,
849                               unsigned long *pitch,
850                               unsigned long flags)
851 {
852         unsigned long stride, aligned_y;
853
854         /* No runtime tiling support for fake. */
855         *tiling_mode = I915_TILING_NONE;
856
857         /* Align it for being a render target.  Shouldn't need anything else. */
858         stride = x * cpp;
859         stride = ROUND_UP_TO(stride, 64);
860
861         /* 965 subspan loading alignment */
862         aligned_y = ALIGN(y, 2);
863
864         *pitch = stride;
865
866         return drm_intel_fake_bo_alloc(bufmgr, name, stride * aligned_y,
867                                        4096);
868 }
869
870 drm_intel_bo *
871 drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
872                                const char *name,
873                                unsigned long offset,
874                                unsigned long size, void *virtual)
875 {
876         drm_intel_bufmgr_fake *bufmgr_fake;
877         drm_intel_bo_fake *bo_fake;
878
879         bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
880
881         assert(size != 0);
882
883         bo_fake = calloc(1, sizeof(*bo_fake));
884         if (!bo_fake)
885                 return NULL;
886
887         bo_fake->bo.size = size;
888         bo_fake->bo.offset = offset;
889         bo_fake->bo.virtual = virtual;
890         bo_fake->bo.bufmgr = bufmgr;
891         bo_fake->refcount = 1;
892         bo_fake->id = ++bufmgr_fake->buf_nr;
893         bo_fake->name = name;
894         bo_fake->flags = BM_PINNED;
895         bo_fake->is_static = 1;
896
897         DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id,
898             bo_fake->name, bo_fake->bo.size / 1024);
899
900         return &bo_fake->bo;
901 }
902
903 static void
904 drm_intel_fake_bo_reference(drm_intel_bo *bo)
905 {
906         drm_intel_bufmgr_fake *bufmgr_fake =
907             (drm_intel_bufmgr_fake *) bo->bufmgr;
908         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
909
910         pthread_mutex_lock(&bufmgr_fake->lock);
911         bo_fake->refcount++;
912         pthread_mutex_unlock(&bufmgr_fake->lock);
913 }
914
915 static void
916 drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
917 {
918         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
919
920         bo_fake->refcount++;
921 }
922
923 static void
924 drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
925 {
926         drm_intel_bufmgr_fake *bufmgr_fake =
927             (drm_intel_bufmgr_fake *) bo->bufmgr;
928         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
929         int i;
930
931         if (--bo_fake->refcount == 0) {
932                 assert(bo_fake->map_count == 0);
933                 /* No remaining references, so free it */
934                 if (bo_fake->block)
935                         free_block(bufmgr_fake, bo_fake->block, 1);
936                 free_backing_store(bo);
937
938                 for (i = 0; i < bo_fake->nr_relocs; i++)
939                         drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
940                                                              target_buf);
941
942                 DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
943                     bo_fake->name);
944
945                 free(bo_fake->relocs);
946                 free(bo);
947         }
948 }
949
950 static void
951 drm_intel_fake_bo_unreference(drm_intel_bo *bo)
952 {
953         drm_intel_bufmgr_fake *bufmgr_fake =
954             (drm_intel_bufmgr_fake *) bo->bufmgr;
955
956         pthread_mutex_lock(&bufmgr_fake->lock);
957         drm_intel_fake_bo_unreference_locked(bo);
958         pthread_mutex_unlock(&bufmgr_fake->lock);
959 }
960
961 /**
962  * Set the buffer as not requiring backing store, and instead get the callback
963  * invoked whenever it would be set dirty.
964  */
965 void
966 drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
967                                         void (*invalidate_cb) (drm_intel_bo *bo,
968                                                                void *ptr),
969                                         void *ptr)
970 {
971         drm_intel_bufmgr_fake *bufmgr_fake =
972             (drm_intel_bufmgr_fake *) bo->bufmgr;
973         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
974
975         pthread_mutex_lock(&bufmgr_fake->lock);
976
977         if (bo_fake->backing_store)
978                 free_backing_store(bo);
979
980         bo_fake->flags |= BM_NO_BACKING_STORE;
981
982         DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
983         bo_fake->dirty = 1;
984         bo_fake->invalidate_cb = invalidate_cb;
985         bo_fake->invalidate_ptr = ptr;
986
987         /* Note that it is invalid right from the start.  Also note
988          * invalidate_cb is called with the bufmgr locked, so cannot
989          * itself make bufmgr calls.
990          */
991         if (invalidate_cb != NULL)
992                 invalidate_cb(bo, ptr);
993
994         pthread_mutex_unlock(&bufmgr_fake->lock);
995 }
996
997 /**
998  * Map a buffer into bo->virtual, allocating either card memory space (If
999  * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
1000  */
1001 static int
1002  drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
1003 {
1004         drm_intel_bufmgr_fake *bufmgr_fake =
1005             (drm_intel_bufmgr_fake *) bo->bufmgr;
1006         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1007
1008         /* Static buffers are always mapped. */
1009         if (bo_fake->is_static) {
1010                 if (bo_fake->card_dirty) {
1011                         drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
1012                         bo_fake->card_dirty = 0;
1013                 }
1014                 return 0;
1015         }
1016
1017         /* Allow recursive mapping.  Mesa may recursively map buffers with
1018          * nested display loops, and it is used internally in bufmgr_fake
1019          * for relocation.
1020          */
1021         if (bo_fake->map_count++ != 0)
1022                 return 0;
1023
1024         {
1025                 DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id,
1026                     bo_fake->name, bo_fake->bo.size / 1024);
1027
1028                 if (bo->virtual != NULL) {
1029                         drmMsg("%s: already mapped\n", __FUNCTION__);
1030                         abort();
1031                 } else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
1032
1033                         if (!bo_fake->block && !evict_and_alloc_block(bo)) {
1034                                 DBG("%s: alloc failed\n", __FUNCTION__);
1035                                 bufmgr_fake->fail = 1;
1036                                 return 1;
1037                         } else {
1038                                 assert(bo_fake->block);
1039                                 bo_fake->dirty = 0;
1040
1041                                 if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
1042                                     bo_fake->block->fenced) {
1043                                         drm_intel_fake_bo_wait_rendering_locked
1044                                             (bo);
1045                                 }
1046
1047                                 bo->virtual = bo_fake->block->virtual;
1048                         }
1049                 } else {
1050                         if (write_enable)
1051                                 set_dirty(bo);
1052
1053                         if (bo_fake->backing_store == 0)
1054                                 alloc_backing_store(bo);
1055
1056                         if ((bo_fake->card_dirty == 1) && bo_fake->block) {
1057                                 if (bo_fake->block->fenced)
1058                                         drm_intel_fake_bo_wait_rendering_locked
1059                                             (bo);
1060
1061                                 memcpy(bo_fake->backing_store,
1062                                        bo_fake->block->virtual,
1063                                        bo_fake->block->bo->size);
1064                                 bo_fake->card_dirty = 0;
1065                         }
1066
1067                         bo->virtual = bo_fake->backing_store;
1068                 }
1069         }
1070
1071         return 0;
1072 }
1073
1074 static int
1075  drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
1076 {
1077         drm_intel_bufmgr_fake *bufmgr_fake =
1078             (drm_intel_bufmgr_fake *) bo->bufmgr;
1079         int ret;
1080
1081         pthread_mutex_lock(&bufmgr_fake->lock);
1082         ret = drm_intel_fake_bo_map_locked(bo, write_enable);
1083         pthread_mutex_unlock(&bufmgr_fake->lock);
1084
1085         return ret;
1086 }
1087
1088 static int
1089  drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
1090 {
1091         drm_intel_bufmgr_fake *bufmgr_fake =
1092             (drm_intel_bufmgr_fake *) bo->bufmgr;
1093         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1094
1095         /* Static buffers are always mapped. */
1096         if (bo_fake->is_static)
1097                 return 0;
1098
1099         assert(bo_fake->map_count != 0);
1100         if (--bo_fake->map_count != 0)
1101                 return 0;
1102
1103         DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
1104             bo_fake->bo.size / 1024);
1105
1106         bo->virtual = NULL;
1107
1108         return 0;
1109 }
1110
1111 static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
1112 {
1113         drm_intel_bufmgr_fake *bufmgr_fake =
1114             (drm_intel_bufmgr_fake *) bo->bufmgr;
1115         int ret;
1116
1117         pthread_mutex_lock(&bufmgr_fake->lock);
1118         ret = drm_intel_fake_bo_unmap_locked(bo);
1119         pthread_mutex_unlock(&bufmgr_fake->lock);
1120
1121         return ret;
1122 }
1123
1124 static int
1125 drm_intel_fake_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1126                           unsigned long size, const void *data)
1127 {
1128         int ret;
1129
1130         if (size == 0 || data == NULL)
1131                 return 0;
1132
1133         ret = drm_intel_bo_map(bo, 1);
1134         if (ret)
1135                 return ret;
1136         memcpy((unsigned char *)bo->virtual + offset, data, size);
1137         drm_intel_bo_unmap(bo);
1138         return 0;
1139 }
1140
1141 static void
1142  drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
1143 {
1144         struct block *block, *tmp;
1145
1146         bufmgr_fake->performed_rendering = 0;
1147         /* okay for ever BO that is on the HW kick it off.
1148            seriously not afraid of the POLICE right now */
1149         DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
1150                 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
1151
1152                 block->on_hardware = 0;
1153                 free_block(bufmgr_fake, block, 0);
1154                 bo_fake->block = NULL;
1155                 bo_fake->validated = 0;
1156                 if (!(bo_fake->flags & BM_NO_BACKING_STORE))
1157                         bo_fake->dirty = 1;
1158         }
1159
1160 }
1161
1162 static int
1163  drm_intel_fake_bo_validate(drm_intel_bo *bo)
1164 {
1165         drm_intel_bufmgr_fake *bufmgr_fake;
1166         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1167
1168         bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
1169
1170         DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id,
1171             bo_fake->name, bo_fake->bo.size / 1024);
1172
1173         /* Sanity check: Buffers should be unmapped before being validated.
1174          * This is not so much of a problem for bufmgr_fake, but TTM refuses,
1175          * and the problem is harder to debug there.
1176          */
1177         assert(bo_fake->map_count == 0);
1178
1179         if (bo_fake->is_static) {
1180                 /* Add it to the needs-fence list */
1181                 bufmgr_fake->need_fence = 1;
1182                 return 0;
1183         }
1184
1185         /* Allocate the card memory */
1186         if (!bo_fake->block && !evict_and_alloc_block(bo)) {
1187                 bufmgr_fake->fail = 1;
1188                 DBG("Failed to validate buf %d:%s\n", bo_fake->id,
1189                     bo_fake->name);
1190                 return -1;
1191         }
1192
1193         assert(bo_fake->block);
1194         assert(bo_fake->block->bo == &bo_fake->bo);
1195
1196         bo->offset = bo_fake->block->mem->ofs;
1197
1198         /* Upload the buffer contents if necessary */
1199         if (bo_fake->dirty) {
1200                 DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
1201                     bo_fake->name, bo->size, bo_fake->block->mem->ofs);
1202
1203                 assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
1204
1205                 /* Actually, should be able to just wait for a fence on the
1206                  * mmory, hich we would be tracking when we free it.  Waiting
1207                  * for idle is a sufficiently large hammer for now.
1208                  */
1209                 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
1210
1211                 /* we may never have mapped this BO so it might not have any
1212                  * backing store if this happens it should be rare, but 0 the
1213                  * card memory in any case */
1214                 if (bo_fake->backing_store)
1215                         memcpy(bo_fake->block->virtual, bo_fake->backing_store,
1216                                bo->size);
1217                 else
1218                         memset(bo_fake->block->virtual, 0, bo->size);
1219
1220                 bo_fake->dirty = 0;
1221         }
1222
1223         bo_fake->block->fenced = 0;
1224         bo_fake->block->on_hardware = 1;
1225         DRMLISTDEL(bo_fake->block);
1226         DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
1227
1228         bo_fake->validated = 1;
1229         bufmgr_fake->need_fence = 1;
1230
1231         return 0;
1232 }
1233
1234 static void
1235 drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
1236 {
1237         drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1238         unsigned int cookie;
1239
1240         cookie = _fence_emit_internal(bufmgr_fake);
1241         fence_blocks(bufmgr_fake, cookie);
1242
1243         DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
1244 }
1245
1246 static void
1247 drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
1248 {
1249         drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1250
1251         pthread_mutex_destroy(&bufmgr_fake->lock);
1252         mmDestroy(bufmgr_fake->heap);
1253         free(bufmgr);
1254 }
1255
1256 static int
1257 drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1258                           drm_intel_bo *target_bo, uint32_t target_offset,
1259                           uint32_t read_domains, uint32_t write_domain)
1260 {
1261         drm_intel_bufmgr_fake *bufmgr_fake =
1262             (drm_intel_bufmgr_fake *) bo->bufmgr;
1263         struct fake_buffer_reloc *r;
1264         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1265         drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
1266         int i;
1267
1268         pthread_mutex_lock(&bufmgr_fake->lock);
1269
1270         assert(bo);
1271         assert(target_bo);
1272
1273         if (bo_fake->relocs == NULL) {
1274                 bo_fake->relocs =
1275                     malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
1276         }
1277
1278         r = &bo_fake->relocs[bo_fake->nr_relocs++];
1279
1280         assert(bo_fake->nr_relocs <= MAX_RELOCS);
1281
1282         drm_intel_fake_bo_reference_locked(target_bo);
1283
1284         if (!target_fake->is_static) {
1285                 bo_fake->child_size +=
1286                     ALIGN(target_bo->size, target_fake->alignment);
1287                 bo_fake->child_size += target_fake->child_size;
1288         }
1289         r->target_buf = target_bo;
1290         r->offset = offset;
1291         r->last_target_offset = target_bo->offset;
1292         r->delta = target_offset;
1293         r->read_domains = read_domains;
1294         r->write_domain = write_domain;
1295
1296         if (bufmgr_fake->debug) {
1297                 /* Check that a conflicting relocation hasn't already been
1298                  * emitted.
1299                  */
1300                 for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
1301                         struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
1302
1303                         assert(r->offset != r2->offset);
1304                 }
1305         }
1306
1307         pthread_mutex_unlock(&bufmgr_fake->lock);
1308
1309         return 0;
1310 }
1311
1312 /**
1313  * Incorporates the validation flags associated with each relocation into
1314  * the combined validation flags for the buffer on this batchbuffer submission.
1315  */
1316 static void
1317 drm_intel_fake_calculate_domains(drm_intel_bo *bo)
1318 {
1319         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1320         int i;
1321
1322         for (i = 0; i < bo_fake->nr_relocs; i++) {
1323                 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1324                 drm_intel_bo_fake *target_fake =
1325                     (drm_intel_bo_fake *) r->target_buf;
1326
1327                 /* Do the same for the tree of buffers we depend on */
1328                 drm_intel_fake_calculate_domains(r->target_buf);
1329
1330                 target_fake->read_domains |= r->read_domains;
1331                 target_fake->write_domain |= r->write_domain;
1332         }
1333 }
1334
1335 static int
1336 drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
1337 {
1338         drm_intel_bufmgr_fake *bufmgr_fake =
1339             (drm_intel_bufmgr_fake *) bo->bufmgr;
1340         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1341         int i, ret;
1342
1343         assert(bo_fake->map_count == 0);
1344
1345         for (i = 0; i < bo_fake->nr_relocs; i++) {
1346                 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1347                 drm_intel_bo_fake *target_fake =
1348                     (drm_intel_bo_fake *) r->target_buf;
1349                 uint32_t reloc_data;
1350
1351                 /* Validate the target buffer if that hasn't been done. */
1352                 if (!target_fake->validated) {
1353                         ret =
1354                             drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
1355                         if (ret != 0) {
1356                                 if (bo->virtual != NULL)
1357                                         drm_intel_fake_bo_unmap_locked(bo);
1358                                 return ret;
1359                         }
1360                 }
1361
1362                 /* Calculate the value of the relocation entry. */
1363                 if (r->target_buf->offset != r->last_target_offset) {
1364                         reloc_data = r->target_buf->offset + r->delta;
1365
1366                         if (bo->virtual == NULL)
1367                                 drm_intel_fake_bo_map_locked(bo, 1);
1368
1369                         *(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
1370                             reloc_data;
1371
1372                         r->last_target_offset = r->target_buf->offset;
1373                 }
1374         }
1375
1376         if (bo->virtual != NULL)
1377                 drm_intel_fake_bo_unmap_locked(bo);
1378
1379         if (bo_fake->write_domain != 0) {
1380                 if (!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))) {
1381                         if (bo_fake->backing_store == 0)
1382                                 alloc_backing_store(bo);
1383                 }
1384                 bo_fake->card_dirty = 1;
1385                 bufmgr_fake->performed_rendering = 1;
1386         }
1387
1388         return drm_intel_fake_bo_validate(bo);
1389 }
1390
1391 static void
1392 drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
1393 {
1394         drm_intel_bufmgr_fake *bufmgr_fake =
1395             (drm_intel_bufmgr_fake *) bo->bufmgr;
1396         drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
1397         int i;
1398
1399         for (i = 0; i < bo_fake->nr_relocs; i++) {
1400                 struct fake_buffer_reloc *r = &bo_fake->relocs[i];
1401                 drm_intel_bo_fake *target_fake =
1402                     (drm_intel_bo_fake *) r->target_buf;
1403
1404                 if (target_fake->validated)
1405                         drm_intel_bo_fake_post_submit(r->target_buf);
1406
1407                 DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
1408                     bo_fake->name, (uint32_t) bo->offset, r->offset,
1409                     target_fake->name, (uint32_t) r->target_buf->offset,
1410                     r->delta);
1411         }
1412
1413         assert(bo_fake->map_count == 0);
1414         bo_fake->validated = 0;
1415         bo_fake->read_domains = 0;
1416         bo_fake->write_domain = 0;
1417 }
1418
1419 void
1420 drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
1421                                              int (*exec) (drm_intel_bo *bo,
1422                                                           unsigned int used,
1423                                                           void *priv),
1424                                              void *priv)
1425 {
1426         drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1427
1428         bufmgr_fake->exec = exec;
1429         bufmgr_fake->exec_priv = priv;
1430 }
1431
1432 static int
1433 drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
1434                        drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1435 {
1436         drm_intel_bufmgr_fake *bufmgr_fake =
1437             (drm_intel_bufmgr_fake *) bo->bufmgr;
1438         drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
1439         struct drm_i915_batchbuffer batch;
1440         int ret;
1441         int retry_count = 0;
1442
1443         pthread_mutex_lock(&bufmgr_fake->lock);
1444
1445         bufmgr_fake->performed_rendering = 0;
1446
1447         drm_intel_fake_calculate_domains(bo);
1448
1449         batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
1450
1451         /* we've ran out of RAM so blow the whole lot away and retry */
1452 restart:
1453         ret = drm_intel_fake_reloc_and_validate_buffer(bo);
1454         if (bufmgr_fake->fail == 1) {
1455                 if (retry_count == 0) {
1456                         retry_count++;
1457                         drm_intel_fake_kick_all_locked(bufmgr_fake);
1458                         bufmgr_fake->fail = 0;
1459                         goto restart;
1460                 } else          /* dump out the memory here */
1461                         mmDumpMemInfo(bufmgr_fake->heap);
1462         }
1463
1464         assert(ret == 0);
1465
1466         if (bufmgr_fake->exec != NULL) {
1467                 int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
1468                 if (ret != 0) {
1469                         pthread_mutex_unlock(&bufmgr_fake->lock);
1470                         return ret;
1471                 }
1472         } else {
1473                 batch.start = bo->offset;
1474                 batch.used = used;
1475                 batch.cliprects = cliprects;
1476                 batch.num_cliprects = num_cliprects;
1477                 batch.DR1 = 0;
1478                 batch.DR4 = DR4;
1479
1480                 if (drmCommandWrite
1481                     (bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
1482                      sizeof(batch))) {
1483                         drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
1484                         pthread_mutex_unlock(&bufmgr_fake->lock);
1485                         return -errno;
1486                 }
1487         }
1488
1489         drm_intel_fake_fence_validated(bo->bufmgr);
1490
1491         drm_intel_bo_fake_post_submit(bo);
1492
1493         pthread_mutex_unlock(&bufmgr_fake->lock);
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * Return an error if the list of BOs will exceed the aperture size.
1500  *
1501  * This is a rough guess and likely to fail, as during the validate sequence we
1502  * may place a buffer in an inopportune spot early on and then fail to fit
1503  * a set smaller than the aperture.
1504  */
1505 static int
1506 drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
1507 {
1508         drm_intel_bufmgr_fake *bufmgr_fake =
1509             (drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
1510         unsigned int sz = 0;
1511         int i;
1512
1513         for (i = 0; i < count; i++) {
1514                 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo_array[i];
1515
1516                 if (bo_fake == NULL)
1517                         continue;
1518
1519                 if (!bo_fake->is_static)
1520                         sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
1521                 sz += bo_fake->child_size;
1522         }
1523
1524         if (sz > bufmgr_fake->size) {
1525                 DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n",
1526                     sz / 1024, bufmgr_fake->size / 1024);
1527                 return -1;
1528         }
1529
1530         DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024,
1531             bufmgr_fake->size / 1024);
1532         return 0;
1533 }
1534
1535 /**
1536  * Evicts all buffers, waiting for fences to pass and copying contents out
1537  * as necessary.
1538  *
1539  * Used by the X Server on LeaveVT, when the card memory is no longer our
1540  * own.
1541  */
1542 void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
1543 {
1544         drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1545         struct block *block, *tmp;
1546
1547         pthread_mutex_lock(&bufmgr_fake->lock);
1548
1549         bufmgr_fake->need_fence = 1;
1550         bufmgr_fake->fail = 0;
1551
1552         /* Wait for hardware idle.  We don't know where acceleration has been
1553          * happening, so we'll need to wait anyway before letting anything get
1554          * put on the card again.
1555          */
1556         drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
1557
1558         /* Check that we hadn't released the lock without having fenced the last
1559          * set of buffers.
1560          */
1561         assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
1562         assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
1563
1564         DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
1565                 drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
1566                 /* Releases the memory, and memcpys dirty contents out if
1567                  * necessary.
1568                  */
1569                 free_block(bufmgr_fake, block, 0);
1570                 bo_fake->block = NULL;
1571         }
1572
1573         pthread_mutex_unlock(&bufmgr_fake->lock);
1574 }
1575
1576 void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
1577                                              volatile unsigned int
1578                                              *last_dispatch)
1579 {
1580         drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
1581
1582         bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
1583 }
1584
1585 drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
1586                                              unsigned long low_offset,
1587                                              void *low_virtual,
1588                                              unsigned long size,
1589                                              volatile unsigned int
1590                                              *last_dispatch)
1591 {
1592         drm_intel_bufmgr_fake *bufmgr_fake;
1593
1594         bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
1595
1596         if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
1597                 free(bufmgr_fake);
1598                 return NULL;
1599         }
1600
1601         /* Initialize allocator */
1602         DRMINITLISTHEAD(&bufmgr_fake->fenced);
1603         DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
1604         DRMINITLISTHEAD(&bufmgr_fake->lru);
1605
1606         bufmgr_fake->low_offset = low_offset;
1607         bufmgr_fake->virtual = low_virtual;
1608         bufmgr_fake->size = size;
1609         bufmgr_fake->heap = mmInit(low_offset, size);
1610
1611         /* Hook in methods */
1612         bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
1613         bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
1614         bufmgr_fake->bufmgr.bo_alloc_tiled = drm_intel_fake_bo_alloc_tiled;
1615         bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
1616         bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
1617         bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
1618         bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
1619         bufmgr_fake->bufmgr.bo_subdata = drm_intel_fake_bo_subdata;
1620         bufmgr_fake->bufmgr.bo_wait_rendering =
1621             drm_intel_fake_bo_wait_rendering;
1622         bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
1623         bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
1624         bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
1625         bufmgr_fake->bufmgr.check_aperture_space =
1626             drm_intel_fake_check_aperture_space;
1627         bufmgr_fake->bufmgr.debug = 0;
1628
1629         bufmgr_fake->fd = fd;
1630         bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
1631
1632         return &bufmgr_fake->bufmgr;
1633 }