OSDN Git Service

haswell: fix render kernels.
[android-x86/hardware-intel-common-vaapi.git] / src / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include "intel_batchbuffer.h"
40 #include "intel_driver.h"
41 #include "i965_defines.h"
42 #include "i965_drv_video.h"
43 #include "i965_structs.h"
44
45 #include "i965_render.h"
46
47 #define SF_KERNEL_NUM_GRF       16
48 #define SF_MAX_THREADS          1
49
50 static const uint32_t sf_kernel_static[][4] = 
51 {
52 #include "shaders/render/exa_sf.g4b"
53 };
54
55 #define PS_KERNEL_NUM_GRF       32
56 #define PS_MAX_THREADS          32
57
58 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
59
60 static const uint32_t ps_kernel_static[][4] = 
61 {
62 #include "shaders/render/exa_wm_xy.g4b"
63 #include "shaders/render/exa_wm_src_affine.g4b"
64 #include "shaders/render/exa_wm_src_sample_planar.g4b"
65 #include "shaders/render/exa_wm_yuv_rgb.g4b"
66 #include "shaders/render/exa_wm_write.g4b"
67 };
68 static const uint32_t ps_subpic_kernel_static[][4] = 
69 {
70 #include "shaders/render/exa_wm_xy.g4b"
71 #include "shaders/render/exa_wm_src_affine.g4b"
72 #include "shaders/render/exa_wm_src_sample_argb.g4b"
73 #include "shaders/render/exa_wm_write.g4b"
74 };
75
76 /* On IRONLAKE */
77 static const uint32_t sf_kernel_static_gen5[][4] = 
78 {
79 #include "shaders/render/exa_sf.g4b.gen5"
80 };
81
82 static const uint32_t ps_kernel_static_gen5[][4] = 
83 {
84 #include "shaders/render/exa_wm_xy.g4b.gen5"
85 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
86 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
87 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
88 #include "shaders/render/exa_wm_write.g4b.gen5"
89 };
90 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
91 {
92 #include "shaders/render/exa_wm_xy.g4b.gen5"
93 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
94 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
95 #include "shaders/render/exa_wm_write.g4b.gen5"
96 };
97
98 /* programs for Sandybridge */
99 static const uint32_t sf_kernel_static_gen6[][4] = 
100 {
101 };
102
103 static const uint32_t ps_kernel_static_gen6[][4] = {
104 #include "shaders/render/exa_wm_src_affine.g6b"
105 #include "shaders/render/exa_wm_src_sample_planar.g6b"
106 #include "shaders/render/exa_wm_yuv_rgb.g6b"
107 #include "shaders/render/exa_wm_write.g6b"
108 };
109
110 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
111 #include "shaders/render/exa_wm_src_affine.g6b"
112 #include "shaders/render/exa_wm_src_sample_argb.g6b"
113 #include "shaders/render/exa_wm_write.g6b"
114 };
115
116 /* programs for Ivybridge */
117 static const uint32_t sf_kernel_static_gen7[][4] = 
118 {
119 };
120
121 static const uint32_t ps_kernel_static_gen7[][4] = {
122 #include "shaders/render/exa_wm_src_affine.g7b"
123 #include "shaders/render/exa_wm_src_sample_planar.g7b"
124 #include "shaders/render/exa_wm_yuv_rgb.g7b"
125 #include "shaders/render/exa_wm_write.g7b"
126 };
127
128 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
129 #include "shaders/render/exa_wm_src_affine.g7b"
130 #include "shaders/render/exa_wm_src_sample_argb.g7b"
131 #include "shaders/render/exa_wm_write.g7b"
132 };
133
134 /* Programs for Haswell */
135 static const uint32_t ps_kernel_static_gen7_haswell[][4] = {
136 #include "shaders/render/exa_wm_src_affine.g7b"
137 #include "shaders/render/exa_wm_src_sample_planar.g7b.haswell"
138 #include "shaders/render/exa_wm_yuv_rgb.g7b"
139 #include "shaders/render/exa_wm_write.g7b"
140 };
141
142 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
143 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
144 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
145 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
146 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
147
148 static uint32_t float_to_uint (float f) 
149 {
150     union {
151         uint32_t i; 
152         float f;
153     } x;
154
155     x.f = f;
156     return x.i;
157 }
158
159 enum 
160 {
161     SF_KERNEL = 0,
162     PS_KERNEL,
163     PS_SUBPIC_KERNEL
164 };
165
166 static struct i965_kernel render_kernels_gen4[] = {
167     {
168         "SF",
169         SF_KERNEL,
170         sf_kernel_static,
171         sizeof(sf_kernel_static),
172         NULL
173     },
174     {
175         "PS",
176         PS_KERNEL,
177         ps_kernel_static,
178         sizeof(ps_kernel_static),
179         NULL
180     },
181
182     {
183         "PS_SUBPIC",
184         PS_SUBPIC_KERNEL,
185         ps_subpic_kernel_static,
186         sizeof(ps_subpic_kernel_static),
187         NULL
188     }
189 };
190
191 static struct i965_kernel render_kernels_gen5[] = {
192     {
193         "SF",
194         SF_KERNEL,
195         sf_kernel_static_gen5,
196         sizeof(sf_kernel_static_gen5),
197         NULL
198     },
199     {
200         "PS",
201         PS_KERNEL,
202         ps_kernel_static_gen5,
203         sizeof(ps_kernel_static_gen5),
204         NULL
205     },
206
207     {
208         "PS_SUBPIC",
209         PS_SUBPIC_KERNEL,
210         ps_subpic_kernel_static_gen5,
211         sizeof(ps_subpic_kernel_static_gen5),
212         NULL
213     }
214 };
215
216 static struct i965_kernel render_kernels_gen6[] = {
217     {
218         "SF",
219         SF_KERNEL,
220         sf_kernel_static_gen6,
221         sizeof(sf_kernel_static_gen6),
222         NULL
223     },
224     {
225         "PS",
226         PS_KERNEL,
227         ps_kernel_static_gen6,
228         sizeof(ps_kernel_static_gen6),
229         NULL
230     },
231
232     {
233         "PS_SUBPIC",
234         PS_SUBPIC_KERNEL,
235         ps_subpic_kernel_static_gen6,
236         sizeof(ps_subpic_kernel_static_gen6),
237         NULL
238     }
239 };
240
241 static struct i965_kernel render_kernels_gen7[] = {
242     {
243         "SF",
244         SF_KERNEL,
245         sf_kernel_static_gen7,
246         sizeof(sf_kernel_static_gen7),
247         NULL
248     },
249     {
250         "PS",
251         PS_KERNEL,
252         ps_kernel_static_gen7,
253         sizeof(ps_kernel_static_gen7),
254         NULL
255     },
256
257     {
258         "PS_SUBPIC",
259         PS_SUBPIC_KERNEL,
260         ps_subpic_kernel_static_gen7,
261         sizeof(ps_subpic_kernel_static_gen7),
262         NULL
263     }
264 };
265
266 static struct i965_kernel render_kernels_gen7_haswell[] = {
267     {
268         "SF",
269         SF_KERNEL,
270         sf_kernel_static_gen7,
271         sizeof(sf_kernel_static_gen7),
272         NULL
273     },
274     {
275         "PS",
276         PS_KERNEL,
277         ps_kernel_static_gen7_haswell,
278         sizeof(ps_kernel_static_gen7_haswell),
279         NULL
280     },
281
282     {
283         "PS_SUBPIC",
284         PS_SUBPIC_KERNEL,
285         ps_subpic_kernel_static_gen7,
286         sizeof(ps_subpic_kernel_static_gen7),
287         NULL
288     }
289 };
290
291 #define URB_VS_ENTRIES        8
292 #define URB_VS_ENTRY_SIZE     1
293
294 #define URB_GS_ENTRIES        0
295 #define URB_GS_ENTRY_SIZE     0
296
297 #define URB_CLIP_ENTRIES      0
298 #define URB_CLIP_ENTRY_SIZE   0
299
300 #define URB_SF_ENTRIES        1
301 #define URB_SF_ENTRY_SIZE     2
302
303 #define URB_CS_ENTRIES        1
304 #define URB_CS_ENTRY_SIZE     1
305
306 static void
307 i965_render_vs_unit(VADriverContextP ctx)
308 {
309     struct i965_driver_data *i965 = i965_driver_data(ctx);
310     struct i965_render_state *render_state = &i965->render_state;
311     struct i965_vs_unit_state *vs_state;
312
313     dri_bo_map(render_state->vs.state, 1);
314     assert(render_state->vs.state->virtual);
315     vs_state = render_state->vs.state->virtual;
316     memset(vs_state, 0, sizeof(*vs_state));
317
318     if (IS_IRONLAKE(i965->intel.device_id))
319         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
320     else
321         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
322
323     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
324     vs_state->vs6.vs_enable = 0;
325     vs_state->vs6.vert_cache_disable = 1;
326     
327     dri_bo_unmap(render_state->vs.state);
328 }
329
330 static void
331 i965_render_sf_unit(VADriverContextP ctx)
332 {
333     struct i965_driver_data *i965 = i965_driver_data(ctx);
334     struct i965_render_state *render_state = &i965->render_state;
335     struct i965_sf_unit_state *sf_state;
336
337     dri_bo_map(render_state->sf.state, 1);
338     assert(render_state->sf.state->virtual);
339     sf_state = render_state->sf.state->virtual;
340     memset(sf_state, 0, sizeof(*sf_state));
341
342     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
343     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
344
345     sf_state->sf1.single_program_flow = 1; /* XXX */
346     sf_state->sf1.binding_table_entry_count = 0;
347     sf_state->sf1.thread_priority = 0;
348     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
349     sf_state->sf1.illegal_op_exception_enable = 1;
350     sf_state->sf1.mask_stack_exception_enable = 1;
351     sf_state->sf1.sw_exception_enable = 1;
352
353     /* scratch space is not used in our kernel */
354     sf_state->thread2.per_thread_scratch_space = 0;
355     sf_state->thread2.scratch_space_base_pointer = 0;
356
357     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
358     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
359     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
360     sf_state->thread3.urb_entry_read_offset = 0;
361     sf_state->thread3.dispatch_grf_start_reg = 3;
362
363     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
364     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
365     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
366     sf_state->thread4.stats_enable = 1;
367
368     sf_state->sf5.viewport_transform = 0; /* skip viewport */
369
370     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
371     sf_state->sf6.scissor = 0;
372
373     sf_state->sf7.trifan_pv = 2;
374
375     sf_state->sf6.dest_org_vbias = 0x8;
376     sf_state->sf6.dest_org_hbias = 0x8;
377
378     dri_bo_emit_reloc(render_state->sf.state,
379                       I915_GEM_DOMAIN_INSTRUCTION, 0,
380                       sf_state->thread0.grf_reg_count << 1,
381                       offsetof(struct i965_sf_unit_state, thread0),
382                       render_state->render_kernels[SF_KERNEL].bo);
383
384     dri_bo_unmap(render_state->sf.state);
385 }
386
387 static void 
388 i965_render_sampler(VADriverContextP ctx)
389 {
390     struct i965_driver_data *i965 = i965_driver_data(ctx);
391     struct i965_render_state *render_state = &i965->render_state;
392     struct i965_sampler_state *sampler_state;
393     int i;
394     
395     assert(render_state->wm.sampler_count > 0);
396     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
397
398     dri_bo_map(render_state->wm.sampler, 1);
399     assert(render_state->wm.sampler->virtual);
400     sampler_state = render_state->wm.sampler->virtual;
401     for (i = 0; i < render_state->wm.sampler_count; i++) {
402         memset(sampler_state, 0, sizeof(*sampler_state));
403         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
404         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
405         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
406         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
407         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
408         sampler_state++;
409     }
410
411     dri_bo_unmap(render_state->wm.sampler);
412 }
413 static void
414 i965_subpic_render_wm_unit(VADriverContextP ctx)
415 {
416     struct i965_driver_data *i965 = i965_driver_data(ctx);
417     struct i965_render_state *render_state = &i965->render_state;
418     struct i965_wm_unit_state *wm_state;
419
420     assert(render_state->wm.sampler);
421
422     dri_bo_map(render_state->wm.state, 1);
423     assert(render_state->wm.state->virtual);
424     wm_state = render_state->wm.state->virtual;
425     memset(wm_state, 0, sizeof(*wm_state));
426
427     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
428     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
429
430     wm_state->thread1.single_program_flow = 1; /* XXX */
431
432     if (IS_IRONLAKE(i965->intel.device_id))
433         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
434     else
435         wm_state->thread1.binding_table_entry_count = 7;
436
437     wm_state->thread2.scratch_space_base_pointer = 0;
438     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
439
440     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
441     wm_state->thread3.const_urb_entry_read_length = 0;
442     wm_state->thread3.const_urb_entry_read_offset = 0;
443     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
444     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
445
446     wm_state->wm4.stats_enable = 0;
447     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
448
449     if (IS_IRONLAKE(i965->intel.device_id)) {
450         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
451         wm_state->wm5.max_threads = 12 * 6 - 1;
452     } else {
453         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
454         wm_state->wm5.max_threads = 10 * 5 - 1;
455     }
456
457     wm_state->wm5.thread_dispatch_enable = 1;
458     wm_state->wm5.enable_16_pix = 1;
459     wm_state->wm5.enable_8_pix = 0;
460     wm_state->wm5.early_depth_test = 1;
461
462     dri_bo_emit_reloc(render_state->wm.state,
463                       I915_GEM_DOMAIN_INSTRUCTION, 0,
464                       wm_state->thread0.grf_reg_count << 1,
465                       offsetof(struct i965_wm_unit_state, thread0),
466                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
467
468     dri_bo_emit_reloc(render_state->wm.state,
469                       I915_GEM_DOMAIN_INSTRUCTION, 0,
470                       wm_state->wm4.sampler_count << 2,
471                       offsetof(struct i965_wm_unit_state, wm4),
472                       render_state->wm.sampler);
473
474     dri_bo_unmap(render_state->wm.state);
475 }
476
477
478 static void
479 i965_render_wm_unit(VADriverContextP ctx)
480 {
481     struct i965_driver_data *i965 = i965_driver_data(ctx);
482     struct i965_render_state *render_state = &i965->render_state;
483     struct i965_wm_unit_state *wm_state;
484
485     assert(render_state->wm.sampler);
486
487     dri_bo_map(render_state->wm.state, 1);
488     assert(render_state->wm.state->virtual);
489     wm_state = render_state->wm.state->virtual;
490     memset(wm_state, 0, sizeof(*wm_state));
491
492     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
493     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
494
495     wm_state->thread1.single_program_flow = 1; /* XXX */
496
497     if (IS_IRONLAKE(i965->intel.device_id))
498         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
499     else
500         wm_state->thread1.binding_table_entry_count = 7;
501
502     wm_state->thread2.scratch_space_base_pointer = 0;
503     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
504
505     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
506     wm_state->thread3.const_urb_entry_read_length = 1;
507     wm_state->thread3.const_urb_entry_read_offset = 0;
508     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
509     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
510
511     wm_state->wm4.stats_enable = 0;
512     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
513
514     if (IS_IRONLAKE(i965->intel.device_id)) {
515         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
516         wm_state->wm5.max_threads = 12 * 6 - 1;
517     } else {
518         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
519         wm_state->wm5.max_threads = 10 * 5 - 1;
520     }
521
522     wm_state->wm5.thread_dispatch_enable = 1;
523     wm_state->wm5.enable_16_pix = 1;
524     wm_state->wm5.enable_8_pix = 0;
525     wm_state->wm5.early_depth_test = 1;
526
527     dri_bo_emit_reloc(render_state->wm.state,
528                       I915_GEM_DOMAIN_INSTRUCTION, 0,
529                       wm_state->thread0.grf_reg_count << 1,
530                       offsetof(struct i965_wm_unit_state, thread0),
531                       render_state->render_kernels[PS_KERNEL].bo);
532
533     dri_bo_emit_reloc(render_state->wm.state,
534                       I915_GEM_DOMAIN_INSTRUCTION, 0,
535                       wm_state->wm4.sampler_count << 2,
536                       offsetof(struct i965_wm_unit_state, wm4),
537                       render_state->wm.sampler);
538
539     dri_bo_unmap(render_state->wm.state);
540 }
541
542 static void 
543 i965_render_cc_viewport(VADriverContextP ctx)
544 {
545     struct i965_driver_data *i965 = i965_driver_data(ctx);
546     struct i965_render_state *render_state = &i965->render_state;
547     struct i965_cc_viewport *cc_viewport;
548
549     dri_bo_map(render_state->cc.viewport, 1);
550     assert(render_state->cc.viewport->virtual);
551     cc_viewport = render_state->cc.viewport->virtual;
552     memset(cc_viewport, 0, sizeof(*cc_viewport));
553     
554     cc_viewport->min_depth = -1.e35;
555     cc_viewport->max_depth = 1.e35;
556
557     dri_bo_unmap(render_state->cc.viewport);
558 }
559
560 static void 
561 i965_subpic_render_cc_unit(VADriverContextP ctx)
562 {
563     struct i965_driver_data *i965 = i965_driver_data(ctx);
564     struct i965_render_state *render_state = &i965->render_state;
565     struct i965_cc_unit_state *cc_state;
566
567     assert(render_state->cc.viewport);
568
569     dri_bo_map(render_state->cc.state, 1);
570     assert(render_state->cc.state->virtual);
571     cc_state = render_state->cc.state->virtual;
572     memset(cc_state, 0, sizeof(*cc_state));
573
574     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
575     cc_state->cc2.depth_test = 0;       /* disable depth test */
576     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
577     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
578     cc_state->cc3.blend_enable = 1;     /* enable color blend */
579     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
580     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
581     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
582     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
583
584     cc_state->cc5.dither_enable = 0;    /* disable dither */
585     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
586     cc_state->cc5.statistics_enable = 1;
587     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
588     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
589     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
590
591     cc_state->cc6.clamp_post_alpha_blend = 0; 
592     cc_state->cc6.clamp_pre_alpha_blend  =0; 
593     
594     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
595     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
596     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
597     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
598    
599     /*alpha test reference*/
600     cc_state->cc7.alpha_ref.f =0.0 ;
601
602
603     dri_bo_emit_reloc(render_state->cc.state,
604                       I915_GEM_DOMAIN_INSTRUCTION, 0,
605                       0,
606                       offsetof(struct i965_cc_unit_state, cc4),
607                       render_state->cc.viewport);
608
609     dri_bo_unmap(render_state->cc.state);
610 }
611
612
613 static void 
614 i965_render_cc_unit(VADriverContextP ctx)
615 {
616     struct i965_driver_data *i965 = i965_driver_data(ctx);
617     struct i965_render_state *render_state = &i965->render_state;
618     struct i965_cc_unit_state *cc_state;
619
620     assert(render_state->cc.viewport);
621
622     dri_bo_map(render_state->cc.state, 1);
623     assert(render_state->cc.state->virtual);
624     cc_state = render_state->cc.state->virtual;
625     memset(cc_state, 0, sizeof(*cc_state));
626
627     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
628     cc_state->cc2.depth_test = 0;       /* disable depth test */
629     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
630     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
631     cc_state->cc3.blend_enable = 0;     /* disable color blend */
632     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
633     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
634
635     cc_state->cc5.dither_enable = 0;    /* disable dither */
636     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
637     cc_state->cc5.statistics_enable = 1;
638     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
639     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
640     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
641
642     dri_bo_emit_reloc(render_state->cc.state,
643                       I915_GEM_DOMAIN_INSTRUCTION, 0,
644                       0,
645                       offsetof(struct i965_cc_unit_state, cc4),
646                       render_state->cc.viewport);
647
648     dri_bo_unmap(render_state->cc.state);
649 }
650
651 static void
652 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
653 {
654     switch (tiling) {
655     case I915_TILING_NONE:
656         ss->ss3.tiled_surface = 0;
657         ss->ss3.tile_walk = 0;
658         break;
659     case I915_TILING_X:
660         ss->ss3.tiled_surface = 1;
661         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
662         break;
663     case I915_TILING_Y:
664         ss->ss3.tiled_surface = 1;
665         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
666         break;
667     }
668 }
669
670 static void
671 i965_render_set_surface_state(
672     struct i965_surface_state *ss,
673     dri_bo                    *bo,
674     unsigned long              offset,
675     unsigned int               width,
676     unsigned int               height,
677     unsigned int               pitch,
678     unsigned int               format,
679     unsigned int               flags
680 )
681 {
682     unsigned int tiling;
683     unsigned int swizzle;
684
685     memset(ss, 0, sizeof(*ss));
686
687     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
688     case I965_PP_FLAG_BOTTOM_FIELD:
689         ss->ss0.vert_line_stride_ofs = 1;
690         /* fall-through */
691     case I965_PP_FLAG_TOP_FIELD:
692         ss->ss0.vert_line_stride = 1;
693         height /= 2;
694         break;
695     }
696
697     ss->ss0.surface_type = I965_SURFACE_2D;
698     ss->ss0.surface_format = format;
699     ss->ss0.color_blend = 1;
700
701     ss->ss1.base_addr = bo->offset + offset;
702
703     ss->ss2.width = width - 1;
704     ss->ss2.height = height - 1;
705
706     ss->ss3.pitch = pitch - 1;
707
708     dri_bo_get_tiling(bo, &tiling, &swizzle);
709     i965_render_set_surface_tiling(ss, tiling);
710 }
711
712 static void
713 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
714 {
715    switch (tiling) {
716    case I915_TILING_NONE:
717       ss->ss0.tiled_surface = 0;
718       ss->ss0.tile_walk = 0;
719       break;
720    case I915_TILING_X:
721       ss->ss0.tiled_surface = 1;
722       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
723       break;
724    case I915_TILING_Y:
725       ss->ss0.tiled_surface = 1;
726       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
727       break;
728    }
729 }
730
731 /* Set "Shader Channel Select" */
732 static void
733 gen7_render_set_surface_scs(struct gen7_surface_state *ss)
734 {
735     ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
736     ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
737     ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
738     ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
739 }
740
741 static void
742 gen7_render_set_surface_state(
743     struct gen7_surface_state *ss,
744     dri_bo                    *bo,
745     unsigned long              offset,
746     int                        width,
747     int                        height,
748     int                        pitch,
749     int                        format,
750     unsigned int               flags
751 )
752 {
753     unsigned int tiling;
754     unsigned int swizzle;
755
756     memset(ss, 0, sizeof(*ss));
757
758     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
759     case I965_PP_FLAG_BOTTOM_FIELD:
760         ss->ss0.vert_line_stride_ofs = 1;
761         /* fall-through */
762     case I965_PP_FLAG_TOP_FIELD:
763         ss->ss0.vert_line_stride = 1;
764         height /= 2;
765         break;
766     }
767
768     ss->ss0.surface_type = I965_SURFACE_2D;
769     ss->ss0.surface_format = format;
770
771     ss->ss1.base_addr = bo->offset + offset;
772
773     ss->ss2.width = width - 1;
774     ss->ss2.height = height - 1;
775
776     ss->ss3.pitch = pitch - 1;
777
778     dri_bo_get_tiling(bo, &tiling, &swizzle);
779     gen7_render_set_surface_tiling(ss, tiling);
780 }
781
782 static void
783 i965_render_src_surface_state(
784     VADriverContextP ctx, 
785     int              index,
786     dri_bo          *region,
787     unsigned long    offset,
788     int              w,
789     int              h,
790     int              pitch,
791     int              format,
792     unsigned int     flags
793 )
794 {
795     struct i965_driver_data *i965 = i965_driver_data(ctx);  
796     struct i965_render_state *render_state = &i965->render_state;
797     void *ss;
798     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
799
800     assert(index < MAX_RENDER_SURFACES);
801
802     dri_bo_map(ss_bo, 1);
803     assert(ss_bo->virtual);
804     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
805
806     if (IS_GEN7(i965->intel.device_id)) {
807         gen7_render_set_surface_state(ss,
808                                       region, offset,
809                                       w, h,
810                                       pitch, format, flags);
811         if (IS_HASWELL(i965->intel.device_id))
812             gen7_render_set_surface_scs(ss);
813         dri_bo_emit_reloc(ss_bo,
814                           I915_GEM_DOMAIN_SAMPLER, 0,
815                           offset,
816                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
817                           region);
818     } else {
819         i965_render_set_surface_state(ss,
820                                       region, offset,
821                                       w, h,
822                                       pitch, format, flags);
823         dri_bo_emit_reloc(ss_bo,
824                           I915_GEM_DOMAIN_SAMPLER, 0,
825                           offset,
826                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
827                           region);
828     }
829
830     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
831     dri_bo_unmap(ss_bo);
832     render_state->wm.sampler_count++;
833 }
834
835 static void
836 i965_render_src_surfaces_state(
837     VADriverContextP ctx,
838     VASurfaceID      surface,
839     unsigned int     flags
840 )
841 {
842     struct i965_driver_data *i965 = i965_driver_data(ctx);  
843     struct object_surface *obj_surface;
844     int region_pitch;
845     int rw, rh;
846     dri_bo *region;
847
848     obj_surface = SURFACE(surface);
849     assert(obj_surface);
850
851     region_pitch = obj_surface->width;
852     rw = obj_surface->orig_width;
853     rh = obj_surface->orig_height;
854     region = obj_surface->bo;
855
856     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);     /* Y */
857     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);
858
859     if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) {
860         i965_render_src_surface_state(ctx, 3, region,
861                                       region_pitch * obj_surface->y_cb_offset,
862                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
863                                       I965_SURFACEFORMAT_R8G8_UNORM, flags); /* UV */
864         i965_render_src_surface_state(ctx, 4, region,
865                                       region_pitch * obj_surface->y_cb_offset,
866                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
867                                       I965_SURFACEFORMAT_R8G8_UNORM, flags);
868     } else {
869         i965_render_src_surface_state(ctx, 3, region,
870                                       region_pitch * obj_surface->y_cb_offset,
871                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
872                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* U */
873         i965_render_src_surface_state(ctx, 4, region,
874                                       region_pitch * obj_surface->y_cb_offset,
875                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
876                                       I965_SURFACEFORMAT_R8_UNORM, flags);
877         i965_render_src_surface_state(ctx, 5, region,
878                                       region_pitch * obj_surface->y_cr_offset,
879                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
880                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* V */
881         i965_render_src_surface_state(ctx, 6, region,
882                                       region_pitch * obj_surface->y_cr_offset,
883                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
884                                       I965_SURFACEFORMAT_R8_UNORM, flags);
885     }
886 }
887
888 static void
889 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
890                               VASurfaceID surface)
891 {
892     struct i965_driver_data *i965 = i965_driver_data(ctx);  
893     struct object_surface *obj_surface = SURFACE(surface);
894     int w, h;
895     dri_bo *region;
896     dri_bo *subpic_region;
897     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
898     struct object_image *obj_image = IMAGE(obj_subpic->image);
899     assert(obj_surface);
900     assert(obj_surface->bo);
901     w = obj_surface->width;
902     h = obj_surface->height;
903     region = obj_surface->bo;
904     subpic_region = obj_image->bo;
905     /*subpicture surface*/
906     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
907     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
908 }
909
910 static void
911 i965_render_dest_surface_state(VADriverContextP ctx, int index)
912 {
913     struct i965_driver_data *i965 = i965_driver_data(ctx);  
914     struct i965_render_state *render_state = &i965->render_state;
915     struct intel_region *dest_region = render_state->draw_region;
916     void *ss;
917     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
918     int format;
919     assert(index < MAX_RENDER_SURFACES);
920
921     if (dest_region->cpp == 2) {
922         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
923     } else {
924         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
925     }
926
927     dri_bo_map(ss_bo, 1);
928     assert(ss_bo->virtual);
929     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
930
931     if (IS_GEN7(i965->intel.device_id)) {
932         gen7_render_set_surface_state(ss,
933                                       dest_region->bo, 0,
934                                       dest_region->width, dest_region->height,
935                                       dest_region->pitch, format, 0);
936         if (IS_HASWELL(i965->intel.device_id))
937             gen7_render_set_surface_scs(ss);
938         dri_bo_emit_reloc(ss_bo,
939                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
940                           0,
941                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
942                           dest_region->bo);
943     } else {
944         i965_render_set_surface_state(ss,
945                                       dest_region->bo, 0,
946                                       dest_region->width, dest_region->height,
947                                       dest_region->pitch, format, 0);
948         dri_bo_emit_reloc(ss_bo,
949                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
950                           0,
951                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
952                           dest_region->bo);
953     }
954
955     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
956     dri_bo_unmap(ss_bo);
957 }
958
959 static void
960 i965_fill_vertex_buffer(
961     VADriverContextP ctx,
962     float tex_coords[4], /* [(u1,v1);(u2,v2)] */
963     float vid_coords[4]  /* [(x1,y1);(x2,y2)] */
964 )
965 {
966     struct i965_driver_data * const i965 = i965_driver_data(ctx);
967     float vb[12];
968
969     enum { X1, Y1, X2, Y2 };
970
971     static const unsigned int g_rotation_indices[][6] = {
972         [VA_ROTATION_NONE] = { X2, Y2, X1, Y2, X1, Y1 },
973         [VA_ROTATION_90]   = { X2, Y1, X2, Y2, X1, Y2 },
974         [VA_ROTATION_180]  = { X1, Y1, X2, Y1, X2, Y2 },
975         [VA_ROTATION_270]  = { X1, Y2, X1, Y1, X2, Y1 },
976     };
977
978     const unsigned int * const rotation_indices =
979         g_rotation_indices[i965->rotation_attrib->value];
980
981     vb[0]  = tex_coords[rotation_indices[0]]; /* bottom-right corner */
982     vb[1]  = tex_coords[rotation_indices[1]];
983     vb[2]  = vid_coords[X2];
984     vb[3]  = vid_coords[Y2];
985
986     vb[4]  = tex_coords[rotation_indices[2]]; /* bottom-left corner */
987     vb[5]  = tex_coords[rotation_indices[3]];
988     vb[6]  = vid_coords[X1];
989     vb[7]  = vid_coords[Y2];
990
991     vb[8]  = tex_coords[rotation_indices[4]]; /* top-left corner */
992     vb[9]  = tex_coords[rotation_indices[5]];
993     vb[10] = vid_coords[X1];
994     vb[11] = vid_coords[Y1];
995
996     dri_bo_subdata(i965->render_state.vb.vertex_buffer, 0, sizeof(vb), vb);
997 }
998
999 static void 
1000 i965_subpic_render_upload_vertex(VADriverContextP ctx,
1001                                  VASurfaceID surface,
1002                                  const VARectangle *output_rect)
1003 {    
1004     struct i965_driver_data  *i965         = i965_driver_data(ctx);
1005     struct object_surface    *obj_surface  = SURFACE(surface);
1006     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic);
1007     float tex_coords[4], vid_coords[4];
1008     VARectangle dst_rect;
1009
1010     if (obj_subpic->flags & VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD)
1011         dst_rect = obj_subpic->dst_rect;
1012     else {
1013         const float sx  = (float)output_rect->width  / obj_surface->orig_width;
1014         const float sy  = (float)output_rect->height / obj_surface->orig_height;
1015         dst_rect.x      = output_rect->x + sx * obj_subpic->dst_rect.x;
1016         dst_rect.y      = output_rect->y + sy * obj_subpic->dst_rect.y;
1017         dst_rect.width  = sx * obj_subpic->dst_rect.width;
1018         dst_rect.height = sy * obj_subpic->dst_rect.height;
1019     }
1020
1021     tex_coords[0] = (float)obj_subpic->src_rect.x / obj_subpic->width;
1022     tex_coords[1] = (float)obj_subpic->src_rect.y / obj_subpic->height;
1023     tex_coords[2] = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / obj_subpic->width;
1024     tex_coords[3] = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / obj_subpic->height;
1025
1026     vid_coords[0] = dst_rect.x;
1027     vid_coords[1] = dst_rect.y;
1028     vid_coords[2] = (float)(dst_rect.x + dst_rect.width);
1029     vid_coords[3] = (float)(dst_rect.y + dst_rect.height);
1030
1031     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1032 }
1033
1034 static void 
1035 i965_render_upload_vertex(
1036     VADriverContextP   ctx,
1037     VASurfaceID        surface,
1038     const VARectangle *src_rect,
1039     const VARectangle *dst_rect
1040 )
1041 {
1042     struct i965_driver_data *i965 = i965_driver_data(ctx);
1043     struct i965_render_state *render_state = &i965->render_state;
1044     struct intel_region *dest_region = render_state->draw_region;
1045     struct object_surface *obj_surface;
1046     float tex_coords[4], vid_coords[4];
1047     int width, height;
1048
1049     obj_surface = SURFACE(surface);
1050     assert(surface);
1051
1052     width  = obj_surface->orig_width;
1053     height = obj_surface->orig_height;
1054
1055     tex_coords[0] = (float)src_rect->x / width;
1056     tex_coords[1] = (float)src_rect->y / height;
1057     tex_coords[2] = (float)(src_rect->x + src_rect->width) / width;
1058     tex_coords[3] = (float)(src_rect->y + src_rect->height) / height;
1059
1060     vid_coords[0] = dest_region->x + dst_rect->x;
1061     vid_coords[1] = dest_region->y + dst_rect->y;
1062     vid_coords[2] = vid_coords[0] + dst_rect->width;
1063     vid_coords[3] = vid_coords[1] + dst_rect->height;
1064
1065     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1066 }
1067
1068 static void
1069 i965_render_upload_constants(VADriverContextP ctx,
1070                              VASurfaceID surface)
1071 {
1072     struct i965_driver_data *i965 = i965_driver_data(ctx);
1073     struct i965_render_state *render_state = &i965->render_state;
1074     unsigned short *constant_buffer;
1075     struct object_surface *obj_surface = SURFACE(surface);
1076
1077     dri_bo_map(render_state->curbe.bo, 1);
1078     assert(render_state->curbe.bo->virtual);
1079     constant_buffer = render_state->curbe.bo->virtual;
1080
1081     if (obj_surface->subsampling == SUBSAMPLE_YUV400) {
1082         assert(obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '1') ||
1083                obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '3'));
1084         *constant_buffer = 2;
1085     } else {
1086         if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'))
1087             *constant_buffer = 1;
1088         else
1089             *constant_buffer = 0;
1090     }
1091
1092     dri_bo_unmap(render_state->curbe.bo);
1093 }
1094
1095 static void
1096 i965_surface_render_state_setup(
1097     VADriverContextP   ctx,
1098     VASurfaceID        surface,
1099     const VARectangle *src_rect,
1100     const VARectangle *dst_rect,
1101     unsigned int       flags
1102 )
1103 {
1104     i965_render_vs_unit(ctx);
1105     i965_render_sf_unit(ctx);
1106     i965_render_dest_surface_state(ctx, 0);
1107     i965_render_src_surfaces_state(ctx, surface, flags);
1108     i965_render_sampler(ctx);
1109     i965_render_wm_unit(ctx);
1110     i965_render_cc_viewport(ctx);
1111     i965_render_cc_unit(ctx);
1112     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1113     i965_render_upload_constants(ctx, surface);
1114 }
1115
1116 static void
1117 i965_subpic_render_state_setup(
1118     VADriverContextP   ctx,
1119     VASurfaceID        surface,
1120     const VARectangle *src_rect,
1121     const VARectangle *dst_rect
1122 )
1123 {
1124     i965_render_vs_unit(ctx);
1125     i965_render_sf_unit(ctx);
1126     i965_render_dest_surface_state(ctx, 0);
1127     i965_subpic_render_src_surfaces_state(ctx, surface);
1128     i965_render_sampler(ctx);
1129     i965_subpic_render_wm_unit(ctx);
1130     i965_render_cc_viewport(ctx);
1131     i965_subpic_render_cc_unit(ctx);
1132     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
1133 }
1134
1135
1136 static void
1137 i965_render_pipeline_select(VADriverContextP ctx)
1138 {
1139     struct i965_driver_data *i965 = i965_driver_data(ctx);
1140     struct intel_batchbuffer *batch = i965->batch;
1141  
1142     BEGIN_BATCH(batch, 1);
1143     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1144     ADVANCE_BATCH(batch);
1145 }
1146
1147 static void
1148 i965_render_state_sip(VADriverContextP ctx)
1149 {
1150     struct i965_driver_data *i965 = i965_driver_data(ctx);
1151     struct intel_batchbuffer *batch = i965->batch;
1152
1153     BEGIN_BATCH(batch, 2);
1154     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1155     OUT_BATCH(batch, 0);
1156     ADVANCE_BATCH(batch);
1157 }
1158
1159 static void
1160 i965_render_state_base_address(VADriverContextP ctx)
1161 {
1162     struct i965_driver_data *i965 = i965_driver_data(ctx);
1163     struct intel_batchbuffer *batch = i965->batch;
1164     struct i965_render_state *render_state = &i965->render_state;
1165
1166     if (IS_IRONLAKE(i965->intel.device_id)) {
1167         BEGIN_BATCH(batch, 8);
1168         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1169         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1170         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1171         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1172         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1173         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1174         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1175         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1176         ADVANCE_BATCH(batch);
1177     } else {
1178         BEGIN_BATCH(batch, 6);
1179         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1180         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1181         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1182         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1183         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1184         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1185         ADVANCE_BATCH(batch);
1186     }
1187 }
1188
1189 static void
1190 i965_render_binding_table_pointers(VADriverContextP ctx)
1191 {
1192     struct i965_driver_data *i965 = i965_driver_data(ctx);
1193     struct intel_batchbuffer *batch = i965->batch;
1194
1195     BEGIN_BATCH(batch, 6);
1196     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1197     OUT_BATCH(batch, 0); /* vs */
1198     OUT_BATCH(batch, 0); /* gs */
1199     OUT_BATCH(batch, 0); /* clip */
1200     OUT_BATCH(batch, 0); /* sf */
1201     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1202     ADVANCE_BATCH(batch);
1203 }
1204
1205 static void 
1206 i965_render_constant_color(VADriverContextP ctx)
1207 {
1208     struct i965_driver_data *i965 = i965_driver_data(ctx);
1209     struct intel_batchbuffer *batch = i965->batch;
1210
1211     BEGIN_BATCH(batch, 5);
1212     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1213     OUT_BATCH(batch, float_to_uint(1.0));
1214     OUT_BATCH(batch, float_to_uint(0.0));
1215     OUT_BATCH(batch, float_to_uint(1.0));
1216     OUT_BATCH(batch, float_to_uint(1.0));
1217     ADVANCE_BATCH(batch);
1218 }
1219
1220 static void
1221 i965_render_pipelined_pointers(VADriverContextP ctx)
1222 {
1223     struct i965_driver_data *i965 = i965_driver_data(ctx);
1224     struct intel_batchbuffer *batch = i965->batch;
1225     struct i965_render_state *render_state = &i965->render_state;
1226
1227     BEGIN_BATCH(batch, 7);
1228     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1229     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1230     OUT_BATCH(batch, 0);  /* disable GS */
1231     OUT_BATCH(batch, 0);  /* disable CLIP */
1232     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1233     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1234     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1235     ADVANCE_BATCH(batch);
1236 }
1237
1238 static void
1239 i965_render_urb_layout(VADriverContextP ctx)
1240 {
1241     struct i965_driver_data *i965 = i965_driver_data(ctx);
1242     struct intel_batchbuffer *batch = i965->batch;
1243     int urb_vs_start, urb_vs_size;
1244     int urb_gs_start, urb_gs_size;
1245     int urb_clip_start, urb_clip_size;
1246     int urb_sf_start, urb_sf_size;
1247     int urb_cs_start, urb_cs_size;
1248
1249     urb_vs_start = 0;
1250     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1251     urb_gs_start = urb_vs_start + urb_vs_size;
1252     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1253     urb_clip_start = urb_gs_start + urb_gs_size;
1254     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1255     urb_sf_start = urb_clip_start + urb_clip_size;
1256     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1257     urb_cs_start = urb_sf_start + urb_sf_size;
1258     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1259
1260     BEGIN_BATCH(batch, 3);
1261     OUT_BATCH(batch, 
1262               CMD_URB_FENCE |
1263               UF0_CS_REALLOC |
1264               UF0_SF_REALLOC |
1265               UF0_CLIP_REALLOC |
1266               UF0_GS_REALLOC |
1267               UF0_VS_REALLOC |
1268               1);
1269     OUT_BATCH(batch, 
1270               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1271               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1272               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1273     OUT_BATCH(batch,
1274               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1275               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1276     ADVANCE_BATCH(batch);
1277 }
1278
1279 static void 
1280 i965_render_cs_urb_layout(VADriverContextP ctx)
1281 {
1282     struct i965_driver_data *i965 = i965_driver_data(ctx);
1283     struct intel_batchbuffer *batch = i965->batch;
1284
1285     BEGIN_BATCH(batch, 2);
1286     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1287     OUT_BATCH(batch,
1288               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1289               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1290     ADVANCE_BATCH(batch);
1291 }
1292
1293 static void
1294 i965_render_constant_buffer(VADriverContextP ctx)
1295 {
1296     struct i965_driver_data *i965 = i965_driver_data(ctx);
1297     struct intel_batchbuffer *batch = i965->batch;
1298     struct i965_render_state *render_state = &i965->render_state;
1299
1300     BEGIN_BATCH(batch, 2);
1301     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1302     OUT_RELOC(batch, render_state->curbe.bo,
1303               I915_GEM_DOMAIN_INSTRUCTION, 0,
1304               URB_CS_ENTRY_SIZE - 1);
1305     ADVANCE_BATCH(batch);    
1306 }
1307
1308 static void
1309 i965_render_drawing_rectangle(VADriverContextP ctx)
1310 {
1311     struct i965_driver_data *i965 = i965_driver_data(ctx);
1312     struct intel_batchbuffer *batch = i965->batch;
1313     struct i965_render_state *render_state = &i965->render_state;
1314     struct intel_region *dest_region = render_state->draw_region;
1315
1316     BEGIN_BATCH(batch, 4);
1317     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1318     OUT_BATCH(batch, 0x00000000);
1319     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1320     OUT_BATCH(batch, 0x00000000);         
1321     ADVANCE_BATCH(batch);
1322 }
1323
1324 static void
1325 i965_render_vertex_elements(VADriverContextP ctx)
1326 {
1327     struct i965_driver_data *i965 = i965_driver_data(ctx);
1328     struct intel_batchbuffer *batch = i965->batch;
1329
1330     if (IS_IRONLAKE(i965->intel.device_id)) {
1331         BEGIN_BATCH(batch, 5);
1332         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1333         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1334         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1335                   VE0_VALID |
1336                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1337                   (0 << VE0_OFFSET_SHIFT));
1338         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1339                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1340                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1341                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1342         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1343         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1344                   VE0_VALID |
1345                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1346                   (8 << VE0_OFFSET_SHIFT));
1347         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1348                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1349                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1350                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1351         ADVANCE_BATCH(batch);
1352     } else {
1353         BEGIN_BATCH(batch, 5);
1354         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1355         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1356         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1357                   VE0_VALID |
1358                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1359                   (0 << VE0_OFFSET_SHIFT));
1360         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1361                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1362                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1363                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1364                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1365         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1366         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1367                   VE0_VALID |
1368                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1369                   (8 << VE0_OFFSET_SHIFT));
1370         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1371                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1372                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1373                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1374                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1375         ADVANCE_BATCH(batch);
1376     }
1377 }
1378
1379 static void
1380 i965_render_upload_image_palette(
1381     VADriverContextP ctx,
1382     VAImageID        image_id,
1383     unsigned int     alpha
1384 )
1385 {
1386     struct i965_driver_data *i965 = i965_driver_data(ctx);
1387     struct intel_batchbuffer *batch = i965->batch;
1388     unsigned int i;
1389
1390     struct object_image *obj_image = IMAGE(image_id);
1391     assert(obj_image);
1392
1393     if (obj_image->image.num_palette_entries == 0)
1394         return;
1395
1396     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1397     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1398     /*fill palette*/
1399     //int32_t out[16]; //0-23:color 23-31:alpha
1400     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1401         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1402     ADVANCE_BATCH(batch);
1403 }
1404
1405 static void
1406 i965_render_startup(VADriverContextP ctx)
1407 {
1408     struct i965_driver_data *i965 = i965_driver_data(ctx);
1409     struct intel_batchbuffer *batch = i965->batch;
1410     struct i965_render_state *render_state = &i965->render_state;
1411
1412     BEGIN_BATCH(batch, 11);
1413     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1414     OUT_BATCH(batch, 
1415               (0 << VB0_BUFFER_INDEX_SHIFT) |
1416               VB0_VERTEXDATA |
1417               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1418     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1419
1420     if (IS_IRONLAKE(i965->intel.device_id))
1421         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1422     else
1423         OUT_BATCH(batch, 3);
1424
1425     OUT_BATCH(batch, 0);
1426
1427     OUT_BATCH(batch, 
1428               CMD_3DPRIMITIVE |
1429               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1430               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1431               (0 << 9) |
1432               4);
1433     OUT_BATCH(batch, 3); /* vertex count per instance */
1434     OUT_BATCH(batch, 0); /* start vertex offset */
1435     OUT_BATCH(batch, 1); /* single instance */
1436     OUT_BATCH(batch, 0); /* start instance location */
1437     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1438     ADVANCE_BATCH(batch);
1439 }
1440
1441 static void 
1442 i965_clear_dest_region(VADriverContextP ctx)
1443 {
1444     struct i965_driver_data *i965 = i965_driver_data(ctx);
1445     struct intel_batchbuffer *batch = i965->batch;
1446     struct i965_render_state *render_state = &i965->render_state;
1447     struct intel_region *dest_region = render_state->draw_region;
1448     unsigned int blt_cmd, br13;
1449     int pitch;
1450
1451     blt_cmd = XY_COLOR_BLT_CMD;
1452     br13 = 0xf0 << 16;
1453     pitch = dest_region->pitch;
1454
1455     if (dest_region->cpp == 4) {
1456         br13 |= BR13_8888;
1457         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1458     } else {
1459         assert(dest_region->cpp == 2);
1460         br13 |= BR13_565;
1461     }
1462
1463     if (dest_region->tiling != I915_TILING_NONE) {
1464         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1465         pitch /= 4;
1466     }
1467
1468     br13 |= pitch;
1469
1470     if (IS_GEN6(i965->intel.device_id) ||
1471         IS_GEN7(i965->intel.device_id)) {
1472         intel_batchbuffer_start_atomic_blt(batch, 24);
1473         BEGIN_BLT_BATCH(batch, 6);
1474     } else {
1475         intel_batchbuffer_start_atomic(batch, 24);
1476         BEGIN_BATCH(batch, 6);
1477     }
1478
1479     OUT_BATCH(batch, blt_cmd);
1480     OUT_BATCH(batch, br13);
1481     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1482     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1483               (dest_region->x + dest_region->width));
1484     OUT_RELOC(batch, dest_region->bo, 
1485               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1486               0);
1487     OUT_BATCH(batch, 0x0);
1488     ADVANCE_BATCH(batch);
1489     intel_batchbuffer_end_atomic(batch);
1490 }
1491
1492 static void
1493 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1494 {
1495     struct i965_driver_data *i965 = i965_driver_data(ctx);
1496     struct intel_batchbuffer *batch = i965->batch;
1497
1498     i965_clear_dest_region(ctx);
1499     intel_batchbuffer_start_atomic(batch, 0x1000);
1500     intel_batchbuffer_emit_mi_flush(batch);
1501     i965_render_pipeline_select(ctx);
1502     i965_render_state_sip(ctx);
1503     i965_render_state_base_address(ctx);
1504     i965_render_binding_table_pointers(ctx);
1505     i965_render_constant_color(ctx);
1506     i965_render_pipelined_pointers(ctx);
1507     i965_render_urb_layout(ctx);
1508     i965_render_cs_urb_layout(ctx);
1509     i965_render_constant_buffer(ctx);
1510     i965_render_drawing_rectangle(ctx);
1511     i965_render_vertex_elements(ctx);
1512     i965_render_startup(ctx);
1513     intel_batchbuffer_end_atomic(batch);
1514 }
1515
1516 static void
1517 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1518 {
1519     struct i965_driver_data *i965 = i965_driver_data(ctx);
1520     struct intel_batchbuffer *batch = i965->batch;
1521
1522     intel_batchbuffer_start_atomic(batch, 0x1000);
1523     intel_batchbuffer_emit_mi_flush(batch);
1524     i965_render_pipeline_select(ctx);
1525     i965_render_state_sip(ctx);
1526     i965_render_state_base_address(ctx);
1527     i965_render_binding_table_pointers(ctx);
1528     i965_render_constant_color(ctx);
1529     i965_render_pipelined_pointers(ctx);
1530     i965_render_urb_layout(ctx);
1531     i965_render_cs_urb_layout(ctx);
1532     i965_render_drawing_rectangle(ctx);
1533     i965_render_vertex_elements(ctx);
1534     i965_render_startup(ctx);
1535     intel_batchbuffer_end_atomic(batch);
1536 }
1537
1538
1539 static void 
1540 i965_render_initialize(VADriverContextP ctx)
1541 {
1542     struct i965_driver_data *i965 = i965_driver_data(ctx);
1543     struct i965_render_state *render_state = &i965->render_state;
1544     dri_bo *bo;
1545
1546     /* VERTEX BUFFER */
1547     dri_bo_unreference(render_state->vb.vertex_buffer);
1548     bo = dri_bo_alloc(i965->intel.bufmgr,
1549                       "vertex buffer",
1550                       4096,
1551                       4096);
1552     assert(bo);
1553     render_state->vb.vertex_buffer = bo;
1554
1555     /* VS */
1556     dri_bo_unreference(render_state->vs.state);
1557     bo = dri_bo_alloc(i965->intel.bufmgr,
1558                       "vs state",
1559                       sizeof(struct i965_vs_unit_state),
1560                       64);
1561     assert(bo);
1562     render_state->vs.state = bo;
1563
1564     /* GS */
1565     /* CLIP */
1566     /* SF */
1567     dri_bo_unreference(render_state->sf.state);
1568     bo = dri_bo_alloc(i965->intel.bufmgr,
1569                       "sf state",
1570                       sizeof(struct i965_sf_unit_state),
1571                       64);
1572     assert(bo);
1573     render_state->sf.state = bo;
1574
1575     /* WM */
1576     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1577     bo = dri_bo_alloc(i965->intel.bufmgr,
1578                       "surface state & binding table",
1579                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1580                       4096);
1581     assert(bo);
1582     render_state->wm.surface_state_binding_table_bo = bo;
1583
1584     dri_bo_unreference(render_state->wm.sampler);
1585     bo = dri_bo_alloc(i965->intel.bufmgr,
1586                       "sampler state",
1587                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1588                       64);
1589     assert(bo);
1590     render_state->wm.sampler = bo;
1591     render_state->wm.sampler_count = 0;
1592
1593     dri_bo_unreference(render_state->wm.state);
1594     bo = dri_bo_alloc(i965->intel.bufmgr,
1595                       "wm state",
1596                       sizeof(struct i965_wm_unit_state),
1597                       64);
1598     assert(bo);
1599     render_state->wm.state = bo;
1600
1601     /* COLOR CALCULATOR */
1602     dri_bo_unreference(render_state->cc.state);
1603     bo = dri_bo_alloc(i965->intel.bufmgr,
1604                       "color calc state",
1605                       sizeof(struct i965_cc_unit_state),
1606                       64);
1607     assert(bo);
1608     render_state->cc.state = bo;
1609
1610     dri_bo_unreference(render_state->cc.viewport);
1611     bo = dri_bo_alloc(i965->intel.bufmgr,
1612                       "cc viewport",
1613                       sizeof(struct i965_cc_viewport),
1614                       64);
1615     assert(bo);
1616     render_state->cc.viewport = bo;
1617 }
1618
1619 static void
1620 i965_render_put_surface(
1621     VADriverContextP   ctx,
1622     VASurfaceID        surface,
1623     const VARectangle *src_rect,
1624     const VARectangle *dst_rect,
1625     unsigned int       flags
1626 )
1627 {
1628     struct i965_driver_data *i965 = i965_driver_data(ctx);
1629     struct intel_batchbuffer *batch = i965->batch;
1630
1631     i965_render_initialize(ctx);
1632     i965_surface_render_state_setup(ctx, surface, src_rect, dst_rect, flags);
1633     i965_surface_render_pipeline_setup(ctx);
1634     intel_batchbuffer_flush(batch);
1635 }
1636
1637 static void
1638 i965_render_put_subpicture(
1639     VADriverContextP   ctx,
1640     VASurfaceID        surface,
1641     const VARectangle *src_rect,
1642     const VARectangle *dst_rect
1643 )
1644 {
1645     struct i965_driver_data *i965 = i965_driver_data(ctx);
1646     struct intel_batchbuffer *batch = i965->batch;
1647     struct object_surface *obj_surface = SURFACE(surface);
1648     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
1649
1650     assert(obj_subpic);
1651
1652     i965_render_initialize(ctx);
1653     i965_subpic_render_state_setup(ctx, surface, src_rect, dst_rect);
1654     i965_subpic_render_pipeline_setup(ctx);
1655     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1656     intel_batchbuffer_flush(batch);
1657 }
1658
1659 /*
1660  * for GEN6+
1661  */
1662 static void 
1663 gen6_render_initialize(VADriverContextP ctx)
1664 {
1665     struct i965_driver_data *i965 = i965_driver_data(ctx);
1666     struct i965_render_state *render_state = &i965->render_state;
1667     dri_bo *bo;
1668
1669     /* VERTEX BUFFER */
1670     dri_bo_unreference(render_state->vb.vertex_buffer);
1671     bo = dri_bo_alloc(i965->intel.bufmgr,
1672                       "vertex buffer",
1673                       4096,
1674                       4096);
1675     assert(bo);
1676     render_state->vb.vertex_buffer = bo;
1677
1678     /* WM */
1679     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1680     bo = dri_bo_alloc(i965->intel.bufmgr,
1681                       "surface state & binding table",
1682                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1683                       4096);
1684     assert(bo);
1685     render_state->wm.surface_state_binding_table_bo = bo;
1686
1687     dri_bo_unreference(render_state->wm.sampler);
1688     bo = dri_bo_alloc(i965->intel.bufmgr,
1689                       "sampler state",
1690                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1691                       4096);
1692     assert(bo);
1693     render_state->wm.sampler = bo;
1694     render_state->wm.sampler_count = 0;
1695
1696     /* COLOR CALCULATOR */
1697     dri_bo_unreference(render_state->cc.state);
1698     bo = dri_bo_alloc(i965->intel.bufmgr,
1699                       "color calc state",
1700                       sizeof(struct gen6_color_calc_state),
1701                       4096);
1702     assert(bo);
1703     render_state->cc.state = bo;
1704
1705     /* CC VIEWPORT */
1706     dri_bo_unreference(render_state->cc.viewport);
1707     bo = dri_bo_alloc(i965->intel.bufmgr,
1708                       "cc viewport",
1709                       sizeof(struct i965_cc_viewport),
1710                       4096);
1711     assert(bo);
1712     render_state->cc.viewport = bo;
1713
1714     /* BLEND STATE */
1715     dri_bo_unreference(render_state->cc.blend);
1716     bo = dri_bo_alloc(i965->intel.bufmgr,
1717                       "blend state",
1718                       sizeof(struct gen6_blend_state),
1719                       4096);
1720     assert(bo);
1721     render_state->cc.blend = bo;
1722
1723     /* DEPTH & STENCIL STATE */
1724     dri_bo_unreference(render_state->cc.depth_stencil);
1725     bo = dri_bo_alloc(i965->intel.bufmgr,
1726                       "depth & stencil state",
1727                       sizeof(struct gen6_depth_stencil_state),
1728                       4096);
1729     assert(bo);
1730     render_state->cc.depth_stencil = bo;
1731 }
1732
1733 static void
1734 gen6_render_color_calc_state(VADriverContextP ctx)
1735 {
1736     struct i965_driver_data *i965 = i965_driver_data(ctx);
1737     struct i965_render_state *render_state = &i965->render_state;
1738     struct gen6_color_calc_state *color_calc_state;
1739     
1740     dri_bo_map(render_state->cc.state, 1);
1741     assert(render_state->cc.state->virtual);
1742     color_calc_state = render_state->cc.state->virtual;
1743     memset(color_calc_state, 0, sizeof(*color_calc_state));
1744     color_calc_state->constant_r = 1.0;
1745     color_calc_state->constant_g = 0.0;
1746     color_calc_state->constant_b = 1.0;
1747     color_calc_state->constant_a = 1.0;
1748     dri_bo_unmap(render_state->cc.state);
1749 }
1750
1751 static void
1752 gen6_render_blend_state(VADriverContextP ctx)
1753 {
1754     struct i965_driver_data *i965 = i965_driver_data(ctx);
1755     struct i965_render_state *render_state = &i965->render_state;
1756     struct gen6_blend_state *blend_state;
1757     
1758     dri_bo_map(render_state->cc.blend, 1);
1759     assert(render_state->cc.blend->virtual);
1760     blend_state = render_state->cc.blend->virtual;
1761     memset(blend_state, 0, sizeof(*blend_state));
1762     blend_state->blend1.logic_op_enable = 1;
1763     blend_state->blend1.logic_op_func = 0xc;
1764     dri_bo_unmap(render_state->cc.blend);
1765 }
1766
1767 static void
1768 gen6_render_depth_stencil_state(VADriverContextP ctx)
1769 {
1770     struct i965_driver_data *i965 = i965_driver_data(ctx);
1771     struct i965_render_state *render_state = &i965->render_state;
1772     struct gen6_depth_stencil_state *depth_stencil_state;
1773     
1774     dri_bo_map(render_state->cc.depth_stencil, 1);
1775     assert(render_state->cc.depth_stencil->virtual);
1776     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1777     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1778     dri_bo_unmap(render_state->cc.depth_stencil);
1779 }
1780
1781 static void
1782 gen6_render_setup_states(
1783     VADriverContextP   ctx,
1784     VASurfaceID        surface,
1785     const VARectangle *src_rect,
1786     const VARectangle *dst_rect,
1787     unsigned int       flags
1788 )
1789 {
1790     i965_render_dest_surface_state(ctx, 0);
1791     i965_render_src_surfaces_state(ctx, surface, flags);
1792     i965_render_sampler(ctx);
1793     i965_render_cc_viewport(ctx);
1794     gen6_render_color_calc_state(ctx);
1795     gen6_render_blend_state(ctx);
1796     gen6_render_depth_stencil_state(ctx);
1797     i965_render_upload_constants(ctx, surface);
1798     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1799 }
1800
1801 static void
1802 gen6_emit_invarient_states(VADriverContextP ctx)
1803 {
1804     struct i965_driver_data *i965 = i965_driver_data(ctx);
1805     struct intel_batchbuffer *batch = i965->batch;
1806
1807     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1808
1809     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1810     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1811               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1812     OUT_BATCH(batch, 0);
1813
1814     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1815     OUT_BATCH(batch, 1);
1816
1817     /* Set system instruction pointer */
1818     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1819     OUT_BATCH(batch, 0);
1820 }
1821
1822 static void
1823 gen6_emit_state_base_address(VADriverContextP ctx)
1824 {
1825     struct i965_driver_data *i965 = i965_driver_data(ctx);
1826     struct intel_batchbuffer *batch = i965->batch;
1827     struct i965_render_state *render_state = &i965->render_state;
1828
1829     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1830     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1831     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1832     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1833     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1834     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1835     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1836     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1837     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1838     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1839 }
1840
1841 static void
1842 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1843 {
1844     struct i965_driver_data *i965 = i965_driver_data(ctx);
1845     struct intel_batchbuffer *batch = i965->batch;
1846     struct i965_render_state *render_state = &i965->render_state;
1847
1848     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1849               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1850               (4 - 2));
1851     OUT_BATCH(batch, 0);
1852     OUT_BATCH(batch, 0);
1853     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1854 }
1855
1856 static void
1857 gen6_emit_urb(VADriverContextP ctx)
1858 {
1859     struct i965_driver_data *i965 = i965_driver_data(ctx);
1860     struct intel_batchbuffer *batch = i965->batch;
1861
1862     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1863     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1864               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1865     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1866               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1867 }
1868
1869 static void
1870 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1871 {
1872     struct i965_driver_data *i965 = i965_driver_data(ctx);
1873     struct intel_batchbuffer *batch = i965->batch;
1874     struct i965_render_state *render_state = &i965->render_state;
1875
1876     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1877     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1878     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1879     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1880 }
1881
1882 static void
1883 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1884 {
1885     struct i965_driver_data *i965 = i965_driver_data(ctx);
1886     struct intel_batchbuffer *batch = i965->batch;
1887     struct i965_render_state *render_state = &i965->render_state;
1888
1889     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1890               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1891               (4 - 2));
1892     OUT_BATCH(batch, 0); /* VS */
1893     OUT_BATCH(batch, 0); /* GS */
1894     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1895 }
1896
1897 static void
1898 gen6_emit_binding_table(VADriverContextP ctx)
1899 {
1900     struct i965_driver_data *i965 = i965_driver_data(ctx);
1901     struct intel_batchbuffer *batch = i965->batch;
1902
1903     /* Binding table pointers */
1904     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1905               GEN6_BINDING_TABLE_MODIFY_PS |
1906               (4 - 2));
1907     OUT_BATCH(batch, 0);                /* vs */
1908     OUT_BATCH(batch, 0);                /* gs */
1909     /* Only the PS uses the binding table */
1910     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1911 }
1912
1913 static void
1914 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1915 {
1916     struct i965_driver_data *i965 = i965_driver_data(ctx);
1917     struct intel_batchbuffer *batch = i965->batch;
1918
1919     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1920     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1921               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1922     OUT_BATCH(batch, 0);
1923     OUT_BATCH(batch, 0);
1924     OUT_BATCH(batch, 0);
1925     OUT_BATCH(batch, 0);
1926     OUT_BATCH(batch, 0);
1927
1928     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1929     OUT_BATCH(batch, 0);
1930 }
1931
1932 static void
1933 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1934 {
1935     i965_render_drawing_rectangle(ctx);
1936 }
1937
1938 static void 
1939 gen6_emit_vs_state(VADriverContextP ctx)
1940 {
1941     struct i965_driver_data *i965 = i965_driver_data(ctx);
1942     struct intel_batchbuffer *batch = i965->batch;
1943
1944     /* disable VS constant buffer */
1945     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1946     OUT_BATCH(batch, 0);
1947     OUT_BATCH(batch, 0);
1948     OUT_BATCH(batch, 0);
1949     OUT_BATCH(batch, 0);
1950         
1951     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1952     OUT_BATCH(batch, 0); /* without VS kernel */
1953     OUT_BATCH(batch, 0);
1954     OUT_BATCH(batch, 0);
1955     OUT_BATCH(batch, 0);
1956     OUT_BATCH(batch, 0); /* pass-through */
1957 }
1958
1959 static void 
1960 gen6_emit_gs_state(VADriverContextP ctx)
1961 {
1962     struct i965_driver_data *i965 = i965_driver_data(ctx);
1963     struct intel_batchbuffer *batch = i965->batch;
1964
1965     /* disable GS constant buffer */
1966     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1967     OUT_BATCH(batch, 0);
1968     OUT_BATCH(batch, 0);
1969     OUT_BATCH(batch, 0);
1970     OUT_BATCH(batch, 0);
1971         
1972     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1973     OUT_BATCH(batch, 0); /* without GS kernel */
1974     OUT_BATCH(batch, 0);
1975     OUT_BATCH(batch, 0);
1976     OUT_BATCH(batch, 0);
1977     OUT_BATCH(batch, 0);
1978     OUT_BATCH(batch, 0); /* pass-through */
1979 }
1980
1981 static void 
1982 gen6_emit_clip_state(VADriverContextP ctx)
1983 {
1984     struct i965_driver_data *i965 = i965_driver_data(ctx);
1985     struct intel_batchbuffer *batch = i965->batch;
1986
1987     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
1988     OUT_BATCH(batch, 0);
1989     OUT_BATCH(batch, 0); /* pass-through */
1990     OUT_BATCH(batch, 0);
1991 }
1992
1993 static void 
1994 gen6_emit_sf_state(VADriverContextP ctx)
1995 {
1996     struct i965_driver_data *i965 = i965_driver_data(ctx);
1997     struct intel_batchbuffer *batch = i965->batch;
1998
1999     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
2000     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
2001               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
2002               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
2003     OUT_BATCH(batch, 0);
2004     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2005     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
2006     OUT_BATCH(batch, 0);
2007     OUT_BATCH(batch, 0);
2008     OUT_BATCH(batch, 0);
2009     OUT_BATCH(batch, 0);
2010     OUT_BATCH(batch, 0); /* DW9 */
2011     OUT_BATCH(batch, 0);
2012     OUT_BATCH(batch, 0);
2013     OUT_BATCH(batch, 0);
2014     OUT_BATCH(batch, 0);
2015     OUT_BATCH(batch, 0); /* DW14 */
2016     OUT_BATCH(batch, 0);
2017     OUT_BATCH(batch, 0);
2018     OUT_BATCH(batch, 0);
2019     OUT_BATCH(batch, 0);
2020     OUT_BATCH(batch, 0); /* DW19 */
2021 }
2022
2023 static void 
2024 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
2025 {
2026     struct i965_driver_data *i965 = i965_driver_data(ctx);
2027     struct intel_batchbuffer *batch = i965->batch;
2028     struct i965_render_state *render_state = &i965->render_state;
2029
2030     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
2031               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
2032               (5 - 2));
2033     OUT_RELOC(batch, 
2034               render_state->curbe.bo,
2035               I915_GEM_DOMAIN_INSTRUCTION, 0,
2036               0);
2037     OUT_BATCH(batch, 0);
2038     OUT_BATCH(batch, 0);
2039     OUT_BATCH(batch, 0);
2040
2041     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
2042     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
2043               I915_GEM_DOMAIN_INSTRUCTION, 0,
2044               0);
2045     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
2046               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2047     OUT_BATCH(batch, 0);
2048     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
2049     OUT_BATCH(batch, ((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
2050               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
2051               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
2052     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
2053               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2054     OUT_BATCH(batch, 0);
2055     OUT_BATCH(batch, 0);
2056 }
2057
2058 static void
2059 gen6_emit_vertex_element_state(VADriverContextP ctx)
2060 {
2061     struct i965_driver_data *i965 = i965_driver_data(ctx);
2062     struct intel_batchbuffer *batch = i965->batch;
2063
2064     /* Set up our vertex elements, sourced from the single vertex buffer. */
2065     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2066     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2067     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2068               GEN6_VE0_VALID |
2069               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2070               (0 << VE0_OFFSET_SHIFT));
2071     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2072               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2073               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2074               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2075     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2076     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2077               GEN6_VE0_VALID |
2078               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2079               (8 << VE0_OFFSET_SHIFT));
2080     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2081               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2082               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2083               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2084 }
2085
2086 static void
2087 gen6_emit_vertices(VADriverContextP ctx)
2088 {
2089     struct i965_driver_data *i965 = i965_driver_data(ctx);
2090     struct intel_batchbuffer *batch = i965->batch;
2091     struct i965_render_state *render_state = &i965->render_state;
2092
2093     BEGIN_BATCH(batch, 11);
2094     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
2095     OUT_BATCH(batch, 
2096               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2097               GEN6_VB0_VERTEXDATA |
2098               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2099     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2100     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2101     OUT_BATCH(batch, 0);
2102
2103     OUT_BATCH(batch, 
2104               CMD_3DPRIMITIVE |
2105               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2106               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2107               (0 << 9) |
2108               4);
2109     OUT_BATCH(batch, 3); /* vertex count per instance */
2110     OUT_BATCH(batch, 0); /* start vertex offset */
2111     OUT_BATCH(batch, 1); /* single instance */
2112     OUT_BATCH(batch, 0); /* start instance location */
2113     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2114     ADVANCE_BATCH(batch);
2115 }
2116
2117 static void
2118 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2119 {
2120     struct i965_driver_data *i965 = i965_driver_data(ctx);
2121     struct intel_batchbuffer *batch = i965->batch;
2122
2123     intel_batchbuffer_start_atomic(batch, 0x1000);
2124     intel_batchbuffer_emit_mi_flush(batch);
2125     gen6_emit_invarient_states(ctx);
2126     gen6_emit_state_base_address(ctx);
2127     gen6_emit_viewport_state_pointers(ctx);
2128     gen6_emit_urb(ctx);
2129     gen6_emit_cc_state_pointers(ctx);
2130     gen6_emit_sampler_state_pointers(ctx);
2131     gen6_emit_vs_state(ctx);
2132     gen6_emit_gs_state(ctx);
2133     gen6_emit_clip_state(ctx);
2134     gen6_emit_sf_state(ctx);
2135     gen6_emit_wm_state(ctx, kernel);
2136     gen6_emit_binding_table(ctx);
2137     gen6_emit_depth_buffer_state(ctx);
2138     gen6_emit_drawing_rectangle(ctx);
2139     gen6_emit_vertex_element_state(ctx);
2140     gen6_emit_vertices(ctx);
2141     intel_batchbuffer_end_atomic(batch);
2142 }
2143
2144 static void
2145 gen6_render_put_surface(
2146     VADriverContextP   ctx,
2147     VASurfaceID        surface,
2148     const VARectangle *src_rect,
2149     const VARectangle *dst_rect,
2150     unsigned int       flags
2151 )
2152 {
2153     struct i965_driver_data *i965 = i965_driver_data(ctx);
2154     struct intel_batchbuffer *batch = i965->batch;
2155
2156     gen6_render_initialize(ctx);
2157     gen6_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2158     i965_clear_dest_region(ctx);
2159     gen6_render_emit_states(ctx, PS_KERNEL);
2160     intel_batchbuffer_flush(batch);
2161 }
2162
2163 static void
2164 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2165 {
2166     struct i965_driver_data *i965 = i965_driver_data(ctx);
2167     struct i965_render_state *render_state = &i965->render_state;
2168     struct gen6_blend_state *blend_state;
2169
2170     dri_bo_unmap(render_state->cc.state);    
2171     dri_bo_map(render_state->cc.blend, 1);
2172     assert(render_state->cc.blend->virtual);
2173     blend_state = render_state->cc.blend->virtual;
2174     memset(blend_state, 0, sizeof(*blend_state));
2175     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2176     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2177     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2178     blend_state->blend0.blend_enable = 1;
2179     blend_state->blend1.post_blend_clamp_enable = 1;
2180     blend_state->blend1.pre_blend_clamp_enable = 1;
2181     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2182     dri_bo_unmap(render_state->cc.blend);
2183 }
2184
2185 static void
2186 gen6_subpicture_render_setup_states(
2187     VADriverContextP   ctx,
2188     VASurfaceID        surface,
2189     const VARectangle *src_rect,
2190     const VARectangle *dst_rect
2191 )
2192 {
2193     i965_render_dest_surface_state(ctx, 0);
2194     i965_subpic_render_src_surfaces_state(ctx, surface);
2195     i965_render_sampler(ctx);
2196     i965_render_cc_viewport(ctx);
2197     gen6_render_color_calc_state(ctx);
2198     gen6_subpicture_render_blend_state(ctx);
2199     gen6_render_depth_stencil_state(ctx);
2200     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2201 }
2202
2203 static void
2204 gen6_render_put_subpicture(
2205     VADriverContextP   ctx,
2206     VASurfaceID        surface,
2207     const VARectangle *src_rect,
2208     const VARectangle *dst_rect
2209 )
2210 {
2211     struct i965_driver_data *i965 = i965_driver_data(ctx);
2212     struct intel_batchbuffer *batch = i965->batch;
2213     struct object_surface *obj_surface = SURFACE(surface);
2214     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2215
2216     assert(obj_subpic);
2217     gen6_render_initialize(ctx);
2218     gen6_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2219     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2220     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2221     intel_batchbuffer_flush(batch);
2222 }
2223
2224 /*
2225  * for GEN7
2226  */
2227 static void 
2228 gen7_render_initialize(VADriverContextP ctx)
2229 {
2230     struct i965_driver_data *i965 = i965_driver_data(ctx);
2231     struct i965_render_state *render_state = &i965->render_state;
2232     dri_bo *bo;
2233
2234     /* VERTEX BUFFER */
2235     dri_bo_unreference(render_state->vb.vertex_buffer);
2236     bo = dri_bo_alloc(i965->intel.bufmgr,
2237                       "vertex buffer",
2238                       4096,
2239                       4096);
2240     assert(bo);
2241     render_state->vb.vertex_buffer = bo;
2242
2243     /* WM */
2244     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2245     bo = dri_bo_alloc(i965->intel.bufmgr,
2246                       "surface state & binding table",
2247                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2248                       4096);
2249     assert(bo);
2250     render_state->wm.surface_state_binding_table_bo = bo;
2251
2252     dri_bo_unreference(render_state->wm.sampler);
2253     bo = dri_bo_alloc(i965->intel.bufmgr,
2254                       "sampler state",
2255                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2256                       4096);
2257     assert(bo);
2258     render_state->wm.sampler = bo;
2259     render_state->wm.sampler_count = 0;
2260
2261     /* COLOR CALCULATOR */
2262     dri_bo_unreference(render_state->cc.state);
2263     bo = dri_bo_alloc(i965->intel.bufmgr,
2264                       "color calc state",
2265                       sizeof(struct gen6_color_calc_state),
2266                       4096);
2267     assert(bo);
2268     render_state->cc.state = bo;
2269
2270     /* CC VIEWPORT */
2271     dri_bo_unreference(render_state->cc.viewport);
2272     bo = dri_bo_alloc(i965->intel.bufmgr,
2273                       "cc viewport",
2274                       sizeof(struct i965_cc_viewport),
2275                       4096);
2276     assert(bo);
2277     render_state->cc.viewport = bo;
2278
2279     /* BLEND STATE */
2280     dri_bo_unreference(render_state->cc.blend);
2281     bo = dri_bo_alloc(i965->intel.bufmgr,
2282                       "blend state",
2283                       sizeof(struct gen6_blend_state),
2284                       4096);
2285     assert(bo);
2286     render_state->cc.blend = bo;
2287
2288     /* DEPTH & STENCIL STATE */
2289     dri_bo_unreference(render_state->cc.depth_stencil);
2290     bo = dri_bo_alloc(i965->intel.bufmgr,
2291                       "depth & stencil state",
2292                       sizeof(struct gen6_depth_stencil_state),
2293                       4096);
2294     assert(bo);
2295     render_state->cc.depth_stencil = bo;
2296 }
2297
2298 static void
2299 gen7_render_color_calc_state(VADriverContextP ctx)
2300 {
2301     struct i965_driver_data *i965 = i965_driver_data(ctx);
2302     struct i965_render_state *render_state = &i965->render_state;
2303     struct gen6_color_calc_state *color_calc_state;
2304     
2305     dri_bo_map(render_state->cc.state, 1);
2306     assert(render_state->cc.state->virtual);
2307     color_calc_state = render_state->cc.state->virtual;
2308     memset(color_calc_state, 0, sizeof(*color_calc_state));
2309     color_calc_state->constant_r = 1.0;
2310     color_calc_state->constant_g = 0.0;
2311     color_calc_state->constant_b = 1.0;
2312     color_calc_state->constant_a = 1.0;
2313     dri_bo_unmap(render_state->cc.state);
2314 }
2315
2316 static void
2317 gen7_render_blend_state(VADriverContextP ctx)
2318 {
2319     struct i965_driver_data *i965 = i965_driver_data(ctx);
2320     struct i965_render_state *render_state = &i965->render_state;
2321     struct gen6_blend_state *blend_state;
2322     
2323     dri_bo_map(render_state->cc.blend, 1);
2324     assert(render_state->cc.blend->virtual);
2325     blend_state = render_state->cc.blend->virtual;
2326     memset(blend_state, 0, sizeof(*blend_state));
2327     blend_state->blend1.logic_op_enable = 1;
2328     blend_state->blend1.logic_op_func = 0xc;
2329     blend_state->blend1.pre_blend_clamp_enable = 1;
2330     dri_bo_unmap(render_state->cc.blend);
2331 }
2332
2333 static void
2334 gen7_render_depth_stencil_state(VADriverContextP ctx)
2335 {
2336     struct i965_driver_data *i965 = i965_driver_data(ctx);
2337     struct i965_render_state *render_state = &i965->render_state;
2338     struct gen6_depth_stencil_state *depth_stencil_state;
2339     
2340     dri_bo_map(render_state->cc.depth_stencil, 1);
2341     assert(render_state->cc.depth_stencil->virtual);
2342     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2343     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2344     dri_bo_unmap(render_state->cc.depth_stencil);
2345 }
2346
2347 static void 
2348 gen7_render_sampler(VADriverContextP ctx)
2349 {
2350     struct i965_driver_data *i965 = i965_driver_data(ctx);
2351     struct i965_render_state *render_state = &i965->render_state;
2352     struct gen7_sampler_state *sampler_state;
2353     int i;
2354     
2355     assert(render_state->wm.sampler_count > 0);
2356     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2357
2358     dri_bo_map(render_state->wm.sampler, 1);
2359     assert(render_state->wm.sampler->virtual);
2360     sampler_state = render_state->wm.sampler->virtual;
2361     for (i = 0; i < render_state->wm.sampler_count; i++) {
2362         memset(sampler_state, 0, sizeof(*sampler_state));
2363         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2364         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2365         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2366         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2367         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2368         sampler_state++;
2369     }
2370
2371     dri_bo_unmap(render_state->wm.sampler);
2372 }
2373
2374 static void
2375 gen7_render_setup_states(
2376     VADriverContextP   ctx,
2377     VASurfaceID        surface,
2378     const VARectangle *src_rect,
2379     const VARectangle *dst_rect,
2380     unsigned int       flags
2381 )
2382 {
2383     i965_render_dest_surface_state(ctx, 0);
2384     i965_render_src_surfaces_state(ctx, surface, flags);
2385     gen7_render_sampler(ctx);
2386     i965_render_cc_viewport(ctx);
2387     gen7_render_color_calc_state(ctx);
2388     gen7_render_blend_state(ctx);
2389     gen7_render_depth_stencil_state(ctx);
2390     i965_render_upload_constants(ctx, surface);
2391     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
2392 }
2393
2394 static void
2395 gen7_emit_invarient_states(VADriverContextP ctx)
2396 {
2397     struct i965_driver_data *i965 = i965_driver_data(ctx);
2398     struct intel_batchbuffer *batch = i965->batch;
2399
2400     BEGIN_BATCH(batch, 1);
2401     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2402     ADVANCE_BATCH(batch);
2403
2404     BEGIN_BATCH(batch, 4);
2405     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2406     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2407               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2408     OUT_BATCH(batch, 0);
2409     OUT_BATCH(batch, 0);
2410     ADVANCE_BATCH(batch);
2411
2412     BEGIN_BATCH(batch, 2);
2413     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2414     OUT_BATCH(batch, 1);
2415     ADVANCE_BATCH(batch);
2416
2417     /* Set system instruction pointer */
2418     BEGIN_BATCH(batch, 2);
2419     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2420     OUT_BATCH(batch, 0);
2421     ADVANCE_BATCH(batch);
2422 }
2423
2424 static void
2425 gen7_emit_state_base_address(VADriverContextP ctx)
2426 {
2427     struct i965_driver_data *i965 = i965_driver_data(ctx);
2428     struct intel_batchbuffer *batch = i965->batch;
2429     struct i965_render_state *render_state = &i965->render_state;
2430
2431     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2432     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2433     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2434     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2435     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2436     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2437     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2438     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2439     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2440     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2441 }
2442
2443 static void
2444 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2445 {
2446     struct i965_driver_data *i965 = i965_driver_data(ctx);
2447     struct intel_batchbuffer *batch = i965->batch;
2448     struct i965_render_state *render_state = &i965->render_state;
2449
2450     BEGIN_BATCH(batch, 2);
2451     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2452     OUT_RELOC(batch,
2453               render_state->cc.viewport,
2454               I915_GEM_DOMAIN_INSTRUCTION, 0,
2455               0);
2456     ADVANCE_BATCH(batch);
2457
2458     BEGIN_BATCH(batch, 2);
2459     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2460     OUT_BATCH(batch, 0);
2461     ADVANCE_BATCH(batch);
2462 }
2463
2464 /*
2465  * URB layout on GEN7 
2466  * ----------------------------------------
2467  * | PS Push Constants (8KB) | VS entries |
2468  * ----------------------------------------
2469  */
2470 static void
2471 gen7_emit_urb(VADriverContextP ctx)
2472 {
2473     struct i965_driver_data *i965 = i965_driver_data(ctx);
2474     struct intel_batchbuffer *batch = i965->batch;
2475     unsigned int num_urb_entries = 32;
2476
2477     if (IS_HASWELL(i965->intel.device_id))
2478         num_urb_entries = 64;
2479
2480     BEGIN_BATCH(batch, 2);
2481     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2482     OUT_BATCH(batch, 8); /* in 1KBs */
2483     ADVANCE_BATCH(batch);
2484
2485     BEGIN_BATCH(batch, 2);
2486     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2487     OUT_BATCH(batch, 
2488               (num_urb_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
2489               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2490               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2491    ADVANCE_BATCH(batch);
2492
2493    BEGIN_BATCH(batch, 2);
2494    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2495    OUT_BATCH(batch,
2496              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2497              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2498    ADVANCE_BATCH(batch);
2499
2500    BEGIN_BATCH(batch, 2);
2501    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2502    OUT_BATCH(batch,
2503              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2504              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2505    ADVANCE_BATCH(batch);
2506
2507    BEGIN_BATCH(batch, 2);
2508    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2509    OUT_BATCH(batch,
2510              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2511              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2512    ADVANCE_BATCH(batch);
2513 }
2514
2515 static void
2516 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2517 {
2518     struct i965_driver_data *i965 = i965_driver_data(ctx);
2519     struct intel_batchbuffer *batch = i965->batch;
2520     struct i965_render_state *render_state = &i965->render_state;
2521
2522     BEGIN_BATCH(batch, 2);
2523     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2524     OUT_RELOC(batch,
2525               render_state->cc.state,
2526               I915_GEM_DOMAIN_INSTRUCTION, 0,
2527               1);
2528     ADVANCE_BATCH(batch);
2529
2530     BEGIN_BATCH(batch, 2);
2531     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2532     OUT_RELOC(batch,
2533               render_state->cc.blend,
2534               I915_GEM_DOMAIN_INSTRUCTION, 0,
2535               1);
2536     ADVANCE_BATCH(batch);
2537
2538     BEGIN_BATCH(batch, 2);
2539     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2540     OUT_RELOC(batch,
2541               render_state->cc.depth_stencil,
2542               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2543               1);
2544     ADVANCE_BATCH(batch);
2545 }
2546
2547 static void
2548 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2549 {
2550     struct i965_driver_data *i965 = i965_driver_data(ctx);
2551     struct intel_batchbuffer *batch = i965->batch;
2552     struct i965_render_state *render_state = &i965->render_state;
2553
2554     BEGIN_BATCH(batch, 2);
2555     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2556     OUT_RELOC(batch,
2557               render_state->wm.sampler,
2558               I915_GEM_DOMAIN_INSTRUCTION, 0,
2559               0);
2560     ADVANCE_BATCH(batch);
2561 }
2562
2563 static void
2564 gen7_emit_binding_table(VADriverContextP ctx)
2565 {
2566     struct i965_driver_data *i965 = i965_driver_data(ctx);
2567     struct intel_batchbuffer *batch = i965->batch;
2568
2569     BEGIN_BATCH(batch, 2);
2570     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2571     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2572     ADVANCE_BATCH(batch);
2573 }
2574
2575 static void
2576 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2577 {
2578     struct i965_driver_data *i965 = i965_driver_data(ctx);
2579     struct intel_batchbuffer *batch = i965->batch;
2580
2581     BEGIN_BATCH(batch, 7);
2582     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2583     OUT_BATCH(batch,
2584               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2585               (I965_SURFACE_NULL << 29));
2586     OUT_BATCH(batch, 0);
2587     OUT_BATCH(batch, 0);
2588     OUT_BATCH(batch, 0);
2589     OUT_BATCH(batch, 0);
2590     OUT_BATCH(batch, 0);
2591     ADVANCE_BATCH(batch);
2592
2593     BEGIN_BATCH(batch, 3);
2594     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2595     OUT_BATCH(batch, 0);
2596     OUT_BATCH(batch, 0);
2597     ADVANCE_BATCH(batch);
2598 }
2599
2600 static void
2601 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2602 {
2603     i965_render_drawing_rectangle(ctx);
2604 }
2605
2606 static void 
2607 gen7_emit_vs_state(VADriverContextP ctx)
2608 {
2609     struct i965_driver_data *i965 = i965_driver_data(ctx);
2610     struct intel_batchbuffer *batch = i965->batch;
2611
2612     /* disable VS constant buffer */
2613     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2614     OUT_BATCH(batch, 0);
2615     OUT_BATCH(batch, 0);
2616     OUT_BATCH(batch, 0);
2617     OUT_BATCH(batch, 0);
2618     OUT_BATCH(batch, 0);
2619     OUT_BATCH(batch, 0);
2620         
2621     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2622     OUT_BATCH(batch, 0); /* without VS kernel */
2623     OUT_BATCH(batch, 0);
2624     OUT_BATCH(batch, 0);
2625     OUT_BATCH(batch, 0);
2626     OUT_BATCH(batch, 0); /* pass-through */
2627 }
2628
2629 static void 
2630 gen7_emit_bypass_state(VADriverContextP ctx)
2631 {
2632     struct i965_driver_data *i965 = i965_driver_data(ctx);
2633     struct intel_batchbuffer *batch = i965->batch;
2634
2635     /* bypass GS */
2636     BEGIN_BATCH(batch, 7);
2637     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2638     OUT_BATCH(batch, 0);
2639     OUT_BATCH(batch, 0);
2640     OUT_BATCH(batch, 0);
2641     OUT_BATCH(batch, 0);
2642     OUT_BATCH(batch, 0);
2643     OUT_BATCH(batch, 0);
2644     ADVANCE_BATCH(batch);
2645
2646     BEGIN_BATCH(batch, 7);      
2647     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2648     OUT_BATCH(batch, 0); /* without GS kernel */
2649     OUT_BATCH(batch, 0);
2650     OUT_BATCH(batch, 0);
2651     OUT_BATCH(batch, 0);
2652     OUT_BATCH(batch, 0);
2653     OUT_BATCH(batch, 0); /* pass-through */
2654     ADVANCE_BATCH(batch);
2655
2656     BEGIN_BATCH(batch, 2);
2657     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2658     OUT_BATCH(batch, 0);
2659     ADVANCE_BATCH(batch);
2660
2661     /* disable HS */
2662     BEGIN_BATCH(batch, 7);
2663     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2664     OUT_BATCH(batch, 0);
2665     OUT_BATCH(batch, 0);
2666     OUT_BATCH(batch, 0);
2667     OUT_BATCH(batch, 0);
2668     OUT_BATCH(batch, 0);
2669     OUT_BATCH(batch, 0);
2670     ADVANCE_BATCH(batch);
2671
2672     BEGIN_BATCH(batch, 7);
2673     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2674     OUT_BATCH(batch, 0);
2675     OUT_BATCH(batch, 0);
2676     OUT_BATCH(batch, 0);
2677     OUT_BATCH(batch, 0);
2678     OUT_BATCH(batch, 0);
2679     OUT_BATCH(batch, 0);
2680     ADVANCE_BATCH(batch);
2681
2682     BEGIN_BATCH(batch, 2);
2683     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2684     OUT_BATCH(batch, 0);
2685     ADVANCE_BATCH(batch);
2686
2687     /* Disable TE */
2688     BEGIN_BATCH(batch, 4);
2689     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2690     OUT_BATCH(batch, 0);
2691     OUT_BATCH(batch, 0);
2692     OUT_BATCH(batch, 0);
2693     ADVANCE_BATCH(batch);
2694
2695     /* Disable DS */
2696     BEGIN_BATCH(batch, 7);
2697     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2698     OUT_BATCH(batch, 0);
2699     OUT_BATCH(batch, 0);
2700     OUT_BATCH(batch, 0);
2701     OUT_BATCH(batch, 0);
2702     OUT_BATCH(batch, 0);
2703     OUT_BATCH(batch, 0);
2704     ADVANCE_BATCH(batch);
2705
2706     BEGIN_BATCH(batch, 6);
2707     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2708     OUT_BATCH(batch, 0);
2709     OUT_BATCH(batch, 0);
2710     OUT_BATCH(batch, 0);
2711     OUT_BATCH(batch, 0);
2712     OUT_BATCH(batch, 0);
2713     ADVANCE_BATCH(batch);
2714
2715     BEGIN_BATCH(batch, 2);
2716     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2717     OUT_BATCH(batch, 0);
2718     ADVANCE_BATCH(batch);
2719
2720     /* Disable STREAMOUT */
2721     BEGIN_BATCH(batch, 3);
2722     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2723     OUT_BATCH(batch, 0);
2724     OUT_BATCH(batch, 0);
2725     ADVANCE_BATCH(batch);
2726 }
2727
2728 static void 
2729 gen7_emit_clip_state(VADriverContextP ctx)
2730 {
2731     struct i965_driver_data *i965 = i965_driver_data(ctx);
2732     struct intel_batchbuffer *batch = i965->batch;
2733
2734     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2735     OUT_BATCH(batch, 0);
2736     OUT_BATCH(batch, 0); /* pass-through */
2737     OUT_BATCH(batch, 0);
2738 }
2739
2740 static void 
2741 gen7_emit_sf_state(VADriverContextP ctx)
2742 {
2743     struct i965_driver_data *i965 = i965_driver_data(ctx);
2744     struct intel_batchbuffer *batch = i965->batch;
2745
2746     BEGIN_BATCH(batch, 14);
2747     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2748     OUT_BATCH(batch,
2749               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2750               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2751               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2752     OUT_BATCH(batch, 0);
2753     OUT_BATCH(batch, 0);
2754     OUT_BATCH(batch, 0); /* DW4 */
2755     OUT_BATCH(batch, 0);
2756     OUT_BATCH(batch, 0);
2757     OUT_BATCH(batch, 0);
2758     OUT_BATCH(batch, 0);
2759     OUT_BATCH(batch, 0); /* DW9 */
2760     OUT_BATCH(batch, 0);
2761     OUT_BATCH(batch, 0);
2762     OUT_BATCH(batch, 0);
2763     OUT_BATCH(batch, 0);
2764     ADVANCE_BATCH(batch);
2765
2766     BEGIN_BATCH(batch, 7);
2767     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2768     OUT_BATCH(batch, 0);
2769     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2770     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2771     OUT_BATCH(batch, 0);
2772     OUT_BATCH(batch, 0);
2773     OUT_BATCH(batch, 0);
2774     ADVANCE_BATCH(batch);
2775 }
2776
2777 static void 
2778 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2779 {
2780     struct i965_driver_data *i965 = i965_driver_data(ctx);
2781     struct intel_batchbuffer *batch = i965->batch;
2782     struct i965_render_state *render_state = &i965->render_state;
2783     unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
2784     unsigned int num_samples = 0;
2785
2786     if (IS_HASWELL(i965->intel.device_id)) {
2787         max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
2788         num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
2789     }
2790
2791     BEGIN_BATCH(batch, 3);
2792     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2793     OUT_BATCH(batch,
2794               GEN7_WM_DISPATCH_ENABLE |
2795               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2796     OUT_BATCH(batch, 0);
2797     ADVANCE_BATCH(batch);
2798
2799     BEGIN_BATCH(batch, 7);
2800     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2801     OUT_BATCH(batch, 1);
2802     OUT_BATCH(batch, 0);
2803     OUT_RELOC(batch, 
2804               render_state->curbe.bo,
2805               I915_GEM_DOMAIN_INSTRUCTION, 0,
2806               0);
2807     OUT_BATCH(batch, 0);
2808     OUT_BATCH(batch, 0);
2809     OUT_BATCH(batch, 0);
2810     ADVANCE_BATCH(batch);
2811
2812     BEGIN_BATCH(batch, 8);
2813     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2814     OUT_RELOC(batch, 
2815               render_state->render_kernels[kernel].bo,
2816               I915_GEM_DOMAIN_INSTRUCTION, 0,
2817               0);
2818     OUT_BATCH(batch, 
2819               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2820               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2821     OUT_BATCH(batch, 0); /* scratch space base offset */
2822     OUT_BATCH(batch, 
2823               ((86 - 1) << max_threads_shift) | num_samples |
2824               GEN7_PS_PUSH_CONSTANT_ENABLE |
2825               GEN7_PS_ATTRIBUTE_ENABLE |
2826               GEN7_PS_16_DISPATCH_ENABLE);
2827     OUT_BATCH(batch, 
2828               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2829     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2830     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2831     ADVANCE_BATCH(batch);
2832 }
2833
2834 static void
2835 gen7_emit_vertex_element_state(VADriverContextP ctx)
2836 {
2837     struct i965_driver_data *i965 = i965_driver_data(ctx);
2838     struct intel_batchbuffer *batch = i965->batch;
2839
2840     /* Set up our vertex elements, sourced from the single vertex buffer. */
2841     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2842     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2843     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2844               GEN6_VE0_VALID |
2845               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2846               (0 << VE0_OFFSET_SHIFT));
2847     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2848               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2849               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2850               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2851     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2852     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2853               GEN6_VE0_VALID |
2854               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2855               (8 << VE0_OFFSET_SHIFT));
2856     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2857               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2858               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2859               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2860 }
2861
2862 static void
2863 gen7_emit_vertices(VADriverContextP ctx)
2864 {
2865     struct i965_driver_data *i965 = i965_driver_data(ctx);
2866     struct intel_batchbuffer *batch = i965->batch;
2867     struct i965_render_state *render_state = &i965->render_state;
2868
2869     BEGIN_BATCH(batch, 5);
2870     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2871     OUT_BATCH(batch, 
2872               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2873               GEN6_VB0_VERTEXDATA |
2874               GEN7_VB0_ADDRESS_MODIFYENABLE |
2875               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2876     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2877     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2878     OUT_BATCH(batch, 0);
2879     ADVANCE_BATCH(batch);
2880
2881     BEGIN_BATCH(batch, 7);
2882     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2883     OUT_BATCH(batch,
2884               _3DPRIM_RECTLIST |
2885               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2886     OUT_BATCH(batch, 3); /* vertex count per instance */
2887     OUT_BATCH(batch, 0); /* start vertex offset */
2888     OUT_BATCH(batch, 1); /* single instance */
2889     OUT_BATCH(batch, 0); /* start instance location */
2890     OUT_BATCH(batch, 0);
2891     ADVANCE_BATCH(batch);
2892 }
2893
2894 static void
2895 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2896 {
2897     struct i965_driver_data *i965 = i965_driver_data(ctx);
2898     struct intel_batchbuffer *batch = i965->batch;
2899
2900     intel_batchbuffer_start_atomic(batch, 0x1000);
2901     intel_batchbuffer_emit_mi_flush(batch);
2902     gen7_emit_invarient_states(ctx);
2903     gen7_emit_state_base_address(ctx);
2904     gen7_emit_viewport_state_pointers(ctx);
2905     gen7_emit_urb(ctx);
2906     gen7_emit_cc_state_pointers(ctx);
2907     gen7_emit_sampler_state_pointers(ctx);
2908     gen7_emit_bypass_state(ctx);
2909     gen7_emit_vs_state(ctx);
2910     gen7_emit_clip_state(ctx);
2911     gen7_emit_sf_state(ctx);
2912     gen7_emit_wm_state(ctx, kernel);
2913     gen7_emit_binding_table(ctx);
2914     gen7_emit_depth_buffer_state(ctx);
2915     gen7_emit_drawing_rectangle(ctx);
2916     gen7_emit_vertex_element_state(ctx);
2917     gen7_emit_vertices(ctx);
2918     intel_batchbuffer_end_atomic(batch);
2919 }
2920
2921 static void
2922 gen7_render_put_surface(
2923     VADriverContextP   ctx,
2924     VASurfaceID        surface,
2925     const VARectangle *src_rect,
2926     const VARectangle *dst_rect,
2927     unsigned int       flags
2928 )
2929 {
2930     struct i965_driver_data *i965 = i965_driver_data(ctx);
2931     struct intel_batchbuffer *batch = i965->batch;
2932
2933     gen7_render_initialize(ctx);
2934     gen7_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2935     i965_clear_dest_region(ctx);
2936     gen7_render_emit_states(ctx, PS_KERNEL);
2937     intel_batchbuffer_flush(batch);
2938 }
2939
2940 static void
2941 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2942 {
2943     struct i965_driver_data *i965 = i965_driver_data(ctx);
2944     struct i965_render_state *render_state = &i965->render_state;
2945     struct gen6_blend_state *blend_state;
2946
2947     dri_bo_unmap(render_state->cc.state);    
2948     dri_bo_map(render_state->cc.blend, 1);
2949     assert(render_state->cc.blend->virtual);
2950     blend_state = render_state->cc.blend->virtual;
2951     memset(blend_state, 0, sizeof(*blend_state));
2952     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2953     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2954     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2955     blend_state->blend0.blend_enable = 1;
2956     blend_state->blend1.post_blend_clamp_enable = 1;
2957     blend_state->blend1.pre_blend_clamp_enable = 1;
2958     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2959     dri_bo_unmap(render_state->cc.blend);
2960 }
2961
2962 static void
2963 gen7_subpicture_render_setup_states(
2964     VADriverContextP   ctx,
2965     VASurfaceID        surface,
2966     const VARectangle *src_rect,
2967     const VARectangle *dst_rect
2968 )
2969 {
2970     i965_render_dest_surface_state(ctx, 0);
2971     i965_subpic_render_src_surfaces_state(ctx, surface);
2972     i965_render_sampler(ctx);
2973     i965_render_cc_viewport(ctx);
2974     gen7_render_color_calc_state(ctx);
2975     gen7_subpicture_render_blend_state(ctx);
2976     gen7_render_depth_stencil_state(ctx);
2977     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2978 }
2979
2980 static void
2981 gen7_render_put_subpicture(
2982     VADriverContextP   ctx,
2983     VASurfaceID        surface,
2984     const VARectangle *src_rect,
2985     const VARectangle *dst_rect
2986 )
2987 {
2988     struct i965_driver_data *i965 = i965_driver_data(ctx);
2989     struct intel_batchbuffer *batch = i965->batch;
2990     struct object_surface *obj_surface = SURFACE(surface);
2991     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2992
2993     assert(obj_subpic);
2994     gen7_render_initialize(ctx);
2995     gen7_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2996     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2997     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2998     intel_batchbuffer_flush(batch);
2999 }
3000
3001
3002 /*
3003  * global functions
3004  */
3005 VAStatus 
3006 i965_DestroySurfaces(VADriverContextP ctx,
3007                      VASurfaceID *surface_list,
3008                      int num_surfaces);
3009 void
3010 intel_render_put_surface(
3011     VADriverContextP   ctx,
3012     VASurfaceID        surface,
3013     const VARectangle *src_rect,
3014     const VARectangle *dst_rect,
3015     unsigned int       flags
3016 )
3017 {
3018     struct i965_driver_data *i965 = i965_driver_data(ctx);
3019     int has_done_scaling = 0;
3020     VASurfaceID in_surface_id = surface;
3021     VASurfaceID out_surface_id = i965_post_processing(ctx, surface, src_rect, dst_rect, flags, &has_done_scaling);
3022
3023     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
3024
3025     if (out_surface_id != VA_INVALID_ID)
3026         in_surface_id = out_surface_id;
3027
3028     if (IS_GEN7(i965->intel.device_id))
3029         gen7_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3030     else if (IS_GEN6(i965->intel.device_id))
3031         gen6_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3032     else
3033         i965_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3034
3035     if (in_surface_id != surface)
3036         i965_DestroySurfaces(ctx, &in_surface_id, 1);
3037 }
3038
3039 void
3040 intel_render_put_subpicture(
3041     VADriverContextP   ctx,
3042     VASurfaceID        surface,
3043     const VARectangle *src_rect,
3044     const VARectangle *dst_rect
3045 )
3046 {
3047     struct i965_driver_data *i965 = i965_driver_data(ctx);
3048
3049     if (IS_GEN7(i965->intel.device_id))
3050         gen7_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3051     else if (IS_GEN6(i965->intel.device_id))
3052         gen6_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3053     else
3054         i965_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3055 }
3056
3057 Bool 
3058 i965_render_init(VADriverContextP ctx)
3059 {
3060     struct i965_driver_data *i965 = i965_driver_data(ctx);
3061     struct i965_render_state *render_state = &i965->render_state;
3062     int i;
3063
3064     /* kernel */
3065     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
3066                                  sizeof(render_kernels_gen5[0])));
3067     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
3068                                  sizeof(render_kernels_gen6[0])));
3069
3070     if (IS_GEN7(i965->intel.device_id))
3071         memcpy(render_state->render_kernels,
3072                (IS_HASWELL(i965->intel.device_id) ? render_kernels_gen7_haswell : render_kernels_gen7),
3073                sizeof(render_state->render_kernels));
3074     else if (IS_GEN6(i965->intel.device_id))
3075         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
3076     else if (IS_IRONLAKE(i965->intel.device_id))
3077         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
3078     else
3079         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
3080
3081     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3082         struct i965_kernel *kernel = &render_state->render_kernels[i];
3083
3084         if (!kernel->size)
3085             continue;
3086
3087         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
3088                                   kernel->name, 
3089                                   kernel->size, 0x1000);
3090         assert(kernel->bo);
3091         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
3092     }
3093
3094     /* constant buffer */
3095     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
3096                       "constant buffer",
3097                       4096, 64);
3098     assert(render_state->curbe.bo);
3099
3100     return True;
3101 }
3102
3103 Bool 
3104 i965_render_terminate(VADriverContextP ctx)
3105 {
3106     int i;
3107     struct i965_driver_data *i965 = i965_driver_data(ctx);
3108     struct i965_render_state *render_state = &i965->render_state;
3109
3110     dri_bo_unreference(render_state->curbe.bo);
3111     render_state->curbe.bo = NULL;
3112
3113     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3114         struct i965_kernel *kernel = &render_state->render_kernels[i];
3115         
3116         dri_bo_unreference(kernel->bo);
3117         kernel->bo = NULL;
3118     }
3119
3120     dri_bo_unreference(render_state->vb.vertex_buffer);
3121     render_state->vb.vertex_buffer = NULL;
3122     dri_bo_unreference(render_state->vs.state);
3123     render_state->vs.state = NULL;
3124     dri_bo_unreference(render_state->sf.state);
3125     render_state->sf.state = NULL;
3126     dri_bo_unreference(render_state->wm.sampler);
3127     render_state->wm.sampler = NULL;
3128     dri_bo_unreference(render_state->wm.state);
3129     render_state->wm.state = NULL;
3130     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3131     dri_bo_unreference(render_state->cc.viewport);
3132     render_state->cc.viewport = NULL;
3133     dri_bo_unreference(render_state->cc.state);
3134     render_state->cc.state = NULL;
3135     dri_bo_unreference(render_state->cc.blend);
3136     render_state->cc.blend = NULL;
3137     dri_bo_unreference(render_state->cc.depth_stencil);
3138     render_state->cc.depth_stencil = NULL;
3139
3140     if (render_state->draw_region) {
3141         dri_bo_unreference(render_state->draw_region->bo);
3142         free(render_state->draw_region);
3143         render_state->draw_region = NULL;
3144     }
3145
3146     return True;
3147 }
3148