OSDN Git Service

haswell: fix render kernels.
[android-x86/hardware-intel-common-vaapi.git] / src / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include <va/va_drmcommon.h>
40
41 #include "intel_batchbuffer.h"
42 #include "intel_driver.h"
43 #include "i965_defines.h"
44 #include "i965_drv_video.h"
45 #include "i965_structs.h"
46
47 #include "i965_render.h"
48
49 #define SF_KERNEL_NUM_GRF       16
50 #define SF_MAX_THREADS          1
51
52 static const uint32_t sf_kernel_static[][4] = 
53 {
54 #include "shaders/render/exa_sf.g4b"
55 };
56
57 #define PS_KERNEL_NUM_GRF       32
58 #define PS_MAX_THREADS          32
59
60 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
61
62 static const uint32_t ps_kernel_static[][4] = 
63 {
64 #include "shaders/render/exa_wm_xy.g4b"
65 #include "shaders/render/exa_wm_src_affine.g4b"
66 #include "shaders/render/exa_wm_src_sample_planar.g4b"
67 #include "shaders/render/exa_wm_yuv_rgb.g4b"
68 #include "shaders/render/exa_wm_write.g4b"
69 };
70 static const uint32_t ps_subpic_kernel_static[][4] = 
71 {
72 #include "shaders/render/exa_wm_xy.g4b"
73 #include "shaders/render/exa_wm_src_affine.g4b"
74 #include "shaders/render/exa_wm_src_sample_argb.g4b"
75 #include "shaders/render/exa_wm_write.g4b"
76 };
77
78 /* On IRONLAKE */
79 static const uint32_t sf_kernel_static_gen5[][4] = 
80 {
81 #include "shaders/render/exa_sf.g4b.gen5"
82 };
83
84 static const uint32_t ps_kernel_static_gen5[][4] = 
85 {
86 #include "shaders/render/exa_wm_xy.g4b.gen5"
87 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
88 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
89 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
90 #include "shaders/render/exa_wm_write.g4b.gen5"
91 };
92 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
93 {
94 #include "shaders/render/exa_wm_xy.g4b.gen5"
95 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
96 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
97 #include "shaders/render/exa_wm_write.g4b.gen5"
98 };
99
100 /* programs for Sandybridge */
101 static const uint32_t sf_kernel_static_gen6[][4] = 
102 {
103 };
104
105 static const uint32_t ps_kernel_static_gen6[][4] = {
106 #include "shaders/render/exa_wm_src_affine.g6b"
107 #include "shaders/render/exa_wm_src_sample_planar.g6b"
108 #include "shaders/render/exa_wm_yuv_rgb.g6b"
109 #include "shaders/render/exa_wm_write.g6b"
110 };
111
112 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
113 #include "shaders/render/exa_wm_src_affine.g6b"
114 #include "shaders/render/exa_wm_src_sample_argb.g6b"
115 #include "shaders/render/exa_wm_write.g6b"
116 };
117
118 /* programs for Ivybridge */
119 static const uint32_t sf_kernel_static_gen7[][4] = 
120 {
121 };
122
123 static const uint32_t ps_kernel_static_gen7[][4] = {
124 #include "shaders/render/exa_wm_src_affine.g7b"
125 #include "shaders/render/exa_wm_src_sample_planar.g7b"
126 #include "shaders/render/exa_wm_yuv_rgb.g7b"
127 #include "shaders/render/exa_wm_write.g7b"
128 };
129
130 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
131 #include "shaders/render/exa_wm_src_affine.g7b"
132 #include "shaders/render/exa_wm_src_sample_argb.g7b"
133 #include "shaders/render/exa_wm_write.g7b"
134 };
135
136 /* Programs for Haswell */
137 static const uint32_t ps_kernel_static_gen7_haswell[][4] = {
138 #include "shaders/render/exa_wm_src_affine.g7b"
139 #include "shaders/render/exa_wm_src_sample_planar.g7b.haswell"
140 #include "shaders/render/exa_wm_yuv_rgb.g7b"
141 #include "shaders/render/exa_wm_write.g7b"
142 };
143
144 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
145 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
146 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
147 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
148 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
149
150 static uint32_t float_to_uint (float f) 
151 {
152     union {
153         uint32_t i; 
154         float f;
155     } x;
156
157     x.f = f;
158     return x.i;
159 }
160
161 enum 
162 {
163     SF_KERNEL = 0,
164     PS_KERNEL,
165     PS_SUBPIC_KERNEL
166 };
167
168 static struct i965_kernel render_kernels_gen4[] = {
169     {
170         "SF",
171         SF_KERNEL,
172         sf_kernel_static,
173         sizeof(sf_kernel_static),
174         NULL
175     },
176     {
177         "PS",
178         PS_KERNEL,
179         ps_kernel_static,
180         sizeof(ps_kernel_static),
181         NULL
182     },
183
184     {
185         "PS_SUBPIC",
186         PS_SUBPIC_KERNEL,
187         ps_subpic_kernel_static,
188         sizeof(ps_subpic_kernel_static),
189         NULL
190     }
191 };
192
193 static struct i965_kernel render_kernels_gen5[] = {
194     {
195         "SF",
196         SF_KERNEL,
197         sf_kernel_static_gen5,
198         sizeof(sf_kernel_static_gen5),
199         NULL
200     },
201     {
202         "PS",
203         PS_KERNEL,
204         ps_kernel_static_gen5,
205         sizeof(ps_kernel_static_gen5),
206         NULL
207     },
208
209     {
210         "PS_SUBPIC",
211         PS_SUBPIC_KERNEL,
212         ps_subpic_kernel_static_gen5,
213         sizeof(ps_subpic_kernel_static_gen5),
214         NULL
215     }
216 };
217
218 static struct i965_kernel render_kernels_gen6[] = {
219     {
220         "SF",
221         SF_KERNEL,
222         sf_kernel_static_gen6,
223         sizeof(sf_kernel_static_gen6),
224         NULL
225     },
226     {
227         "PS",
228         PS_KERNEL,
229         ps_kernel_static_gen6,
230         sizeof(ps_kernel_static_gen6),
231         NULL
232     },
233
234     {
235         "PS_SUBPIC",
236         PS_SUBPIC_KERNEL,
237         ps_subpic_kernel_static_gen6,
238         sizeof(ps_subpic_kernel_static_gen6),
239         NULL
240     }
241 };
242
243 static struct i965_kernel render_kernels_gen7[] = {
244     {
245         "SF",
246         SF_KERNEL,
247         sf_kernel_static_gen7,
248         sizeof(sf_kernel_static_gen7),
249         NULL
250     },
251     {
252         "PS",
253         PS_KERNEL,
254         ps_kernel_static_gen7,
255         sizeof(ps_kernel_static_gen7),
256         NULL
257     },
258
259     {
260         "PS_SUBPIC",
261         PS_SUBPIC_KERNEL,
262         ps_subpic_kernel_static_gen7,
263         sizeof(ps_subpic_kernel_static_gen7),
264         NULL
265     }
266 };
267
268 static struct i965_kernel render_kernels_gen7_haswell[] = {
269     {
270         "SF",
271         SF_KERNEL,
272         sf_kernel_static_gen7,
273         sizeof(sf_kernel_static_gen7),
274         NULL
275     },
276     {
277         "PS",
278         PS_KERNEL,
279         ps_kernel_static_gen7_haswell,
280         sizeof(ps_kernel_static_gen7_haswell),
281         NULL
282     },
283
284     {
285         "PS_SUBPIC",
286         PS_SUBPIC_KERNEL,
287         ps_subpic_kernel_static_gen7,
288         sizeof(ps_subpic_kernel_static_gen7),
289         NULL
290     }
291 };
292
293 #define URB_VS_ENTRIES        8
294 #define URB_VS_ENTRY_SIZE     1
295
296 #define URB_GS_ENTRIES        0
297 #define URB_GS_ENTRY_SIZE     0
298
299 #define URB_CLIP_ENTRIES      0
300 #define URB_CLIP_ENTRY_SIZE   0
301
302 #define URB_SF_ENTRIES        1
303 #define URB_SF_ENTRY_SIZE     2
304
305 #define URB_CS_ENTRIES        1
306 #define URB_CS_ENTRY_SIZE     1
307
308 static void
309 i965_render_vs_unit(VADriverContextP ctx)
310 {
311     struct i965_driver_data *i965 = i965_driver_data(ctx);
312     struct i965_render_state *render_state = &i965->render_state;
313     struct i965_vs_unit_state *vs_state;
314
315     dri_bo_map(render_state->vs.state, 1);
316     assert(render_state->vs.state->virtual);
317     vs_state = render_state->vs.state->virtual;
318     memset(vs_state, 0, sizeof(*vs_state));
319
320     if (IS_IRONLAKE(i965->intel.device_id))
321         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
322     else
323         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
324
325     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
326     vs_state->vs6.vs_enable = 0;
327     vs_state->vs6.vert_cache_disable = 1;
328     
329     dri_bo_unmap(render_state->vs.state);
330 }
331
332 static void
333 i965_render_sf_unit(VADriverContextP ctx)
334 {
335     struct i965_driver_data *i965 = i965_driver_data(ctx);
336     struct i965_render_state *render_state = &i965->render_state;
337     struct i965_sf_unit_state *sf_state;
338
339     dri_bo_map(render_state->sf.state, 1);
340     assert(render_state->sf.state->virtual);
341     sf_state = render_state->sf.state->virtual;
342     memset(sf_state, 0, sizeof(*sf_state));
343
344     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
345     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
346
347     sf_state->sf1.single_program_flow = 1; /* XXX */
348     sf_state->sf1.binding_table_entry_count = 0;
349     sf_state->sf1.thread_priority = 0;
350     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
351     sf_state->sf1.illegal_op_exception_enable = 1;
352     sf_state->sf1.mask_stack_exception_enable = 1;
353     sf_state->sf1.sw_exception_enable = 1;
354
355     /* scratch space is not used in our kernel */
356     sf_state->thread2.per_thread_scratch_space = 0;
357     sf_state->thread2.scratch_space_base_pointer = 0;
358
359     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
360     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
361     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
362     sf_state->thread3.urb_entry_read_offset = 0;
363     sf_state->thread3.dispatch_grf_start_reg = 3;
364
365     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
366     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
367     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
368     sf_state->thread4.stats_enable = 1;
369
370     sf_state->sf5.viewport_transform = 0; /* skip viewport */
371
372     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
373     sf_state->sf6.scissor = 0;
374
375     sf_state->sf7.trifan_pv = 2;
376
377     sf_state->sf6.dest_org_vbias = 0x8;
378     sf_state->sf6.dest_org_hbias = 0x8;
379
380     dri_bo_emit_reloc(render_state->sf.state,
381                       I915_GEM_DOMAIN_INSTRUCTION, 0,
382                       sf_state->thread0.grf_reg_count << 1,
383                       offsetof(struct i965_sf_unit_state, thread0),
384                       render_state->render_kernels[SF_KERNEL].bo);
385
386     dri_bo_unmap(render_state->sf.state);
387 }
388
389 static void 
390 i965_render_sampler(VADriverContextP ctx)
391 {
392     struct i965_driver_data *i965 = i965_driver_data(ctx);
393     struct i965_render_state *render_state = &i965->render_state;
394     struct i965_sampler_state *sampler_state;
395     int i;
396     
397     assert(render_state->wm.sampler_count > 0);
398     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
399
400     dri_bo_map(render_state->wm.sampler, 1);
401     assert(render_state->wm.sampler->virtual);
402     sampler_state = render_state->wm.sampler->virtual;
403     for (i = 0; i < render_state->wm.sampler_count; i++) {
404         memset(sampler_state, 0, sizeof(*sampler_state));
405         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
406         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
407         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
408         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
409         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
410         sampler_state++;
411     }
412
413     dri_bo_unmap(render_state->wm.sampler);
414 }
415 static void
416 i965_subpic_render_wm_unit(VADriverContextP ctx)
417 {
418     struct i965_driver_data *i965 = i965_driver_data(ctx);
419     struct i965_render_state *render_state = &i965->render_state;
420     struct i965_wm_unit_state *wm_state;
421
422     assert(render_state->wm.sampler);
423
424     dri_bo_map(render_state->wm.state, 1);
425     assert(render_state->wm.state->virtual);
426     wm_state = render_state->wm.state->virtual;
427     memset(wm_state, 0, sizeof(*wm_state));
428
429     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
430     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
431
432     wm_state->thread1.single_program_flow = 1; /* XXX */
433
434     if (IS_IRONLAKE(i965->intel.device_id))
435         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
436     else
437         wm_state->thread1.binding_table_entry_count = 7;
438
439     wm_state->thread2.scratch_space_base_pointer = 0;
440     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
441
442     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
443     wm_state->thread3.const_urb_entry_read_length = 0;
444     wm_state->thread3.const_urb_entry_read_offset = 0;
445     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
446     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
447
448     wm_state->wm4.stats_enable = 0;
449     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
450
451     if (IS_IRONLAKE(i965->intel.device_id)) {
452         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
453         wm_state->wm5.max_threads = 12 * 6 - 1;
454     } else {
455         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
456         wm_state->wm5.max_threads = 10 * 5 - 1;
457     }
458
459     wm_state->wm5.thread_dispatch_enable = 1;
460     wm_state->wm5.enable_16_pix = 1;
461     wm_state->wm5.enable_8_pix = 0;
462     wm_state->wm5.early_depth_test = 1;
463
464     dri_bo_emit_reloc(render_state->wm.state,
465                       I915_GEM_DOMAIN_INSTRUCTION, 0,
466                       wm_state->thread0.grf_reg_count << 1,
467                       offsetof(struct i965_wm_unit_state, thread0),
468                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
469
470     dri_bo_emit_reloc(render_state->wm.state,
471                       I915_GEM_DOMAIN_INSTRUCTION, 0,
472                       wm_state->wm4.sampler_count << 2,
473                       offsetof(struct i965_wm_unit_state, wm4),
474                       render_state->wm.sampler);
475
476     dri_bo_unmap(render_state->wm.state);
477 }
478
479
480 static void
481 i965_render_wm_unit(VADriverContextP ctx)
482 {
483     struct i965_driver_data *i965 = i965_driver_data(ctx);
484     struct i965_render_state *render_state = &i965->render_state;
485     struct i965_wm_unit_state *wm_state;
486
487     assert(render_state->wm.sampler);
488
489     dri_bo_map(render_state->wm.state, 1);
490     assert(render_state->wm.state->virtual);
491     wm_state = render_state->wm.state->virtual;
492     memset(wm_state, 0, sizeof(*wm_state));
493
494     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
495     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
496
497     wm_state->thread1.single_program_flow = 1; /* XXX */
498
499     if (IS_IRONLAKE(i965->intel.device_id))
500         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
501     else
502         wm_state->thread1.binding_table_entry_count = 7;
503
504     wm_state->thread2.scratch_space_base_pointer = 0;
505     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
506
507     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
508     wm_state->thread3.const_urb_entry_read_length = 1;
509     wm_state->thread3.const_urb_entry_read_offset = 0;
510     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
511     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
512
513     wm_state->wm4.stats_enable = 0;
514     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
515
516     if (IS_IRONLAKE(i965->intel.device_id)) {
517         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
518         wm_state->wm5.max_threads = 12 * 6 - 1;
519     } else {
520         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
521         wm_state->wm5.max_threads = 10 * 5 - 1;
522     }
523
524     wm_state->wm5.thread_dispatch_enable = 1;
525     wm_state->wm5.enable_16_pix = 1;
526     wm_state->wm5.enable_8_pix = 0;
527     wm_state->wm5.early_depth_test = 1;
528
529     dri_bo_emit_reloc(render_state->wm.state,
530                       I915_GEM_DOMAIN_INSTRUCTION, 0,
531                       wm_state->thread0.grf_reg_count << 1,
532                       offsetof(struct i965_wm_unit_state, thread0),
533                       render_state->render_kernels[PS_KERNEL].bo);
534
535     dri_bo_emit_reloc(render_state->wm.state,
536                       I915_GEM_DOMAIN_INSTRUCTION, 0,
537                       wm_state->wm4.sampler_count << 2,
538                       offsetof(struct i965_wm_unit_state, wm4),
539                       render_state->wm.sampler);
540
541     dri_bo_unmap(render_state->wm.state);
542 }
543
544 static void 
545 i965_render_cc_viewport(VADriverContextP ctx)
546 {
547     struct i965_driver_data *i965 = i965_driver_data(ctx);
548     struct i965_render_state *render_state = &i965->render_state;
549     struct i965_cc_viewport *cc_viewport;
550
551     dri_bo_map(render_state->cc.viewport, 1);
552     assert(render_state->cc.viewport->virtual);
553     cc_viewport = render_state->cc.viewport->virtual;
554     memset(cc_viewport, 0, sizeof(*cc_viewport));
555     
556     cc_viewport->min_depth = -1.e35;
557     cc_viewport->max_depth = 1.e35;
558
559     dri_bo_unmap(render_state->cc.viewport);
560 }
561
562 static void 
563 i965_subpic_render_cc_unit(VADriverContextP ctx)
564 {
565     struct i965_driver_data *i965 = i965_driver_data(ctx);
566     struct i965_render_state *render_state = &i965->render_state;
567     struct i965_cc_unit_state *cc_state;
568
569     assert(render_state->cc.viewport);
570
571     dri_bo_map(render_state->cc.state, 1);
572     assert(render_state->cc.state->virtual);
573     cc_state = render_state->cc.state->virtual;
574     memset(cc_state, 0, sizeof(*cc_state));
575
576     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
577     cc_state->cc2.depth_test = 0;       /* disable depth test */
578     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
579     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
580     cc_state->cc3.blend_enable = 1;     /* enable color blend */
581     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
582     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
583     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
584     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
585
586     cc_state->cc5.dither_enable = 0;    /* disable dither */
587     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
588     cc_state->cc5.statistics_enable = 1;
589     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
590     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
591     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
592
593     cc_state->cc6.clamp_post_alpha_blend = 0; 
594     cc_state->cc6.clamp_pre_alpha_blend  =0; 
595     
596     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
597     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
598     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
599     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
600    
601     /*alpha test reference*/
602     cc_state->cc7.alpha_ref.f =0.0 ;
603
604
605     dri_bo_emit_reloc(render_state->cc.state,
606                       I915_GEM_DOMAIN_INSTRUCTION, 0,
607                       0,
608                       offsetof(struct i965_cc_unit_state, cc4),
609                       render_state->cc.viewport);
610
611     dri_bo_unmap(render_state->cc.state);
612 }
613
614
615 static void 
616 i965_render_cc_unit(VADriverContextP ctx)
617 {
618     struct i965_driver_data *i965 = i965_driver_data(ctx);
619     struct i965_render_state *render_state = &i965->render_state;
620     struct i965_cc_unit_state *cc_state;
621
622     assert(render_state->cc.viewport);
623
624     dri_bo_map(render_state->cc.state, 1);
625     assert(render_state->cc.state->virtual);
626     cc_state = render_state->cc.state->virtual;
627     memset(cc_state, 0, sizeof(*cc_state));
628
629     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
630     cc_state->cc2.depth_test = 0;       /* disable depth test */
631     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
632     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
633     cc_state->cc3.blend_enable = 0;     /* disable color blend */
634     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
635     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
636
637     cc_state->cc5.dither_enable = 0;    /* disable dither */
638     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
639     cc_state->cc5.statistics_enable = 1;
640     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
641     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
642     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
643
644     dri_bo_emit_reloc(render_state->cc.state,
645                       I915_GEM_DOMAIN_INSTRUCTION, 0,
646                       0,
647                       offsetof(struct i965_cc_unit_state, cc4),
648                       render_state->cc.viewport);
649
650     dri_bo_unmap(render_state->cc.state);
651 }
652
653 static void
654 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
655 {
656     switch (tiling) {
657     case I915_TILING_NONE:
658         ss->ss3.tiled_surface = 0;
659         ss->ss3.tile_walk = 0;
660         break;
661     case I915_TILING_X:
662         ss->ss3.tiled_surface = 1;
663         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
664         break;
665     case I915_TILING_Y:
666         ss->ss3.tiled_surface = 1;
667         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
668         break;
669     }
670 }
671
672 static void
673 i965_render_set_surface_state(
674     struct i965_surface_state *ss,
675     dri_bo                    *bo,
676     unsigned long              offset,
677     unsigned int               width,
678     unsigned int               height,
679     unsigned int               pitch,
680     unsigned int               format,
681     unsigned int               flags
682 )
683 {
684     unsigned int tiling;
685     unsigned int swizzle;
686
687     memset(ss, 0, sizeof(*ss));
688
689     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
690     case I965_PP_FLAG_BOTTOM_FIELD:
691         ss->ss0.vert_line_stride_ofs = 1;
692         /* fall-through */
693     case I965_PP_FLAG_TOP_FIELD:
694         ss->ss0.vert_line_stride = 1;
695         height /= 2;
696         break;
697     }
698
699     ss->ss0.surface_type = I965_SURFACE_2D;
700     ss->ss0.surface_format = format;
701     ss->ss0.color_blend = 1;
702
703     ss->ss1.base_addr = bo->offset + offset;
704
705     ss->ss2.width = width - 1;
706     ss->ss2.height = height - 1;
707
708     ss->ss3.pitch = pitch - 1;
709
710     dri_bo_get_tiling(bo, &tiling, &swizzle);
711     i965_render_set_surface_tiling(ss, tiling);
712 }
713
714 static void
715 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
716 {
717    switch (tiling) {
718    case I915_TILING_NONE:
719       ss->ss0.tiled_surface = 0;
720       ss->ss0.tile_walk = 0;
721       break;
722    case I915_TILING_X:
723       ss->ss0.tiled_surface = 1;
724       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
725       break;
726    case I915_TILING_Y:
727       ss->ss0.tiled_surface = 1;
728       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
729       break;
730    }
731 }
732
733 /* Set "Shader Channel Select" */
734 static void
735 gen7_render_set_surface_scs(struct gen7_surface_state *ss)
736 {
737     ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
738     ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
739     ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
740     ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
741 }
742
743 static void
744 gen7_render_set_surface_state(
745     struct gen7_surface_state *ss,
746     dri_bo                    *bo,
747     unsigned long              offset,
748     int                        width,
749     int                        height,
750     int                        pitch,
751     int                        format,
752     unsigned int               flags
753 )
754 {
755     unsigned int tiling;
756     unsigned int swizzle;
757
758     memset(ss, 0, sizeof(*ss));
759
760     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
761     case I965_PP_FLAG_BOTTOM_FIELD:
762         ss->ss0.vert_line_stride_ofs = 1;
763         /* fall-through */
764     case I965_PP_FLAG_TOP_FIELD:
765         ss->ss0.vert_line_stride = 1;
766         height /= 2;
767         break;
768     }
769
770     ss->ss0.surface_type = I965_SURFACE_2D;
771     ss->ss0.surface_format = format;
772
773     ss->ss1.base_addr = bo->offset + offset;
774
775     ss->ss2.width = width - 1;
776     ss->ss2.height = height - 1;
777
778     ss->ss3.pitch = pitch - 1;
779
780     dri_bo_get_tiling(bo, &tiling, &swizzle);
781     gen7_render_set_surface_tiling(ss, tiling);
782 }
783
784 static void
785 i965_render_src_surface_state(
786     VADriverContextP ctx, 
787     int              index,
788     dri_bo          *region,
789     unsigned long    offset,
790     int              w,
791     int              h,
792     int              pitch,
793     int              format,
794     unsigned int     flags
795 )
796 {
797     struct i965_driver_data *i965 = i965_driver_data(ctx);  
798     struct i965_render_state *render_state = &i965->render_state;
799     void *ss;
800     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
801
802     assert(index < MAX_RENDER_SURFACES);
803
804     dri_bo_map(ss_bo, 1);
805     assert(ss_bo->virtual);
806     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
807
808     if (IS_GEN7(i965->intel.device_id)) {
809         gen7_render_set_surface_state(ss,
810                                       region, offset,
811                                       w, h,
812                                       pitch, format, flags);
813         if (IS_HASWELL(i965->intel.device_id))
814             gen7_render_set_surface_scs(ss);
815         dri_bo_emit_reloc(ss_bo,
816                           I915_GEM_DOMAIN_SAMPLER, 0,
817                           offset,
818                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
819                           region);
820     } else {
821         i965_render_set_surface_state(ss,
822                                       region, offset,
823                                       w, h,
824                                       pitch, format, flags);
825         dri_bo_emit_reloc(ss_bo,
826                           I915_GEM_DOMAIN_SAMPLER, 0,
827                           offset,
828                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
829                           region);
830     }
831
832     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
833     dri_bo_unmap(ss_bo);
834     render_state->wm.sampler_count++;
835 }
836
837 static void
838 i965_render_src_surfaces_state(
839     VADriverContextP ctx,
840     VASurfaceID      surface,
841     unsigned int     flags
842 )
843 {
844     struct i965_driver_data *i965 = i965_driver_data(ctx);  
845     struct object_surface *obj_surface;
846     int region_pitch;
847     int rw, rh;
848     dri_bo *region;
849
850     obj_surface = SURFACE(surface);
851     assert(obj_surface);
852
853     region_pitch = obj_surface->width;
854     rw = obj_surface->orig_width;
855     rh = obj_surface->orig_height;
856     region = obj_surface->bo;
857
858     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);     /* Y */
859     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);
860
861     if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) {
862         i965_render_src_surface_state(ctx, 3, region,
863                                       region_pitch * obj_surface->y_cb_offset,
864                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
865                                       I965_SURFACEFORMAT_R8G8_UNORM, flags); /* UV */
866         i965_render_src_surface_state(ctx, 4, region,
867                                       region_pitch * obj_surface->y_cb_offset,
868                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
869                                       I965_SURFACEFORMAT_R8G8_UNORM, flags);
870     } else {
871         i965_render_src_surface_state(ctx, 3, region,
872                                       region_pitch * obj_surface->y_cb_offset,
873                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
874                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* U */
875         i965_render_src_surface_state(ctx, 4, region,
876                                       region_pitch * obj_surface->y_cb_offset,
877                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
878                                       I965_SURFACEFORMAT_R8_UNORM, flags);
879         i965_render_src_surface_state(ctx, 5, region,
880                                       region_pitch * obj_surface->y_cr_offset,
881                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
882                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* V */
883         i965_render_src_surface_state(ctx, 6, region,
884                                       region_pitch * obj_surface->y_cr_offset,
885                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
886                                       I965_SURFACEFORMAT_R8_UNORM, flags);
887     }
888 }
889
890 static void
891 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
892                               VASurfaceID surface)
893 {
894     struct i965_driver_data *i965 = i965_driver_data(ctx);  
895     struct object_surface *obj_surface = SURFACE(surface);
896     dri_bo *subpic_region;
897     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
898     struct object_image *obj_image = IMAGE(obj_subpic->image);
899     assert(obj_surface);
900     assert(obj_surface->bo);
901     subpic_region = obj_image->bo;
902     /*subpicture surface*/
903     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
904     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
905 }
906
907 static void
908 i965_render_dest_surface_state(VADriverContextP ctx, int index)
909 {
910     struct i965_driver_data *i965 = i965_driver_data(ctx);  
911     struct i965_render_state *render_state = &i965->render_state;
912     struct intel_region *dest_region = render_state->draw_region;
913     void *ss;
914     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
915     int format;
916     assert(index < MAX_RENDER_SURFACES);
917
918     if (dest_region->cpp == 2) {
919         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
920     } else {
921         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
922     }
923
924     dri_bo_map(ss_bo, 1);
925     assert(ss_bo->virtual);
926     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
927
928     if (IS_GEN7(i965->intel.device_id)) {
929         gen7_render_set_surface_state(ss,
930                                       dest_region->bo, 0,
931                                       dest_region->width, dest_region->height,
932                                       dest_region->pitch, format, 0);
933         if (IS_HASWELL(i965->intel.device_id))
934             gen7_render_set_surface_scs(ss);
935         dri_bo_emit_reloc(ss_bo,
936                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
937                           0,
938                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
939                           dest_region->bo);
940     } else {
941         i965_render_set_surface_state(ss,
942                                       dest_region->bo, 0,
943                                       dest_region->width, dest_region->height,
944                                       dest_region->pitch, format, 0);
945         dri_bo_emit_reloc(ss_bo,
946                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
947                           0,
948                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
949                           dest_region->bo);
950     }
951
952     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
953     dri_bo_unmap(ss_bo);
954 }
955
956 static void
957 i965_fill_vertex_buffer(
958     VADriverContextP ctx,
959     float tex_coords[4], /* [(u1,v1);(u2,v2)] */
960     float vid_coords[4]  /* [(x1,y1);(x2,y2)] */
961 )
962 {
963     struct i965_driver_data * const i965 = i965_driver_data(ctx);
964     float vb[12];
965
966     enum { X1, Y1, X2, Y2 };
967
968     static const unsigned int g_rotation_indices[][6] = {
969         [VA_ROTATION_NONE] = { X1, Y1, X2, Y1, X2, Y2 },
970         [VA_ROTATION_90]   = { X2, Y1, X2, Y2, X1, Y2 },
971         [VA_ROTATION_180]  = { X2, Y2, X1, Y2, X1, Y1 },
972         [VA_ROTATION_270]  = { X1, Y2, X1, Y1, X2, Y1 },
973     };
974
975     const unsigned int * const rotation_indices =
976         g_rotation_indices[i965->rotation_attrib->value];
977
978     vb[0]  = tex_coords[X1]; /* top-left corner */
979     vb[1]  = tex_coords[Y1];
980     vb[2]  = vid_coords[rotation_indices[0]];
981     vb[3]  = vid_coords[rotation_indices[1]];
982
983     vb[4]  = tex_coords[X2]; /* top-right corner */
984     vb[5]  = tex_coords[Y1];
985     vb[6]  = vid_coords[rotation_indices[2]];
986     vb[7]  = vid_coords[rotation_indices[3]];
987
988     vb[8]  = tex_coords[X2]; /* bottom-right corner */
989     vb[9]  = tex_coords[Y2];
990     vb[10] = vid_coords[rotation_indices[4]];
991     vb[11] = vid_coords[rotation_indices[5]];
992
993     dri_bo_subdata(i965->render_state.vb.vertex_buffer, 0, sizeof(vb), vb);
994 }
995
996 static void 
997 i965_subpic_render_upload_vertex(VADriverContextP ctx,
998                                  VASurfaceID surface,
999                                  const VARectangle *output_rect)
1000 {    
1001     struct i965_driver_data  *i965         = i965_driver_data(ctx);
1002     struct object_surface    *obj_surface  = SURFACE(surface);
1003     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic);
1004     float tex_coords[4], vid_coords[4];
1005     VARectangle dst_rect;
1006
1007     if (obj_subpic->flags & VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD)
1008         dst_rect = obj_subpic->dst_rect;
1009     else {
1010         const float sx  = (float)output_rect->width  / obj_surface->orig_width;
1011         const float sy  = (float)output_rect->height / obj_surface->orig_height;
1012         dst_rect.x      = output_rect->x + sx * obj_subpic->dst_rect.x;
1013         dst_rect.y      = output_rect->y + sy * obj_subpic->dst_rect.y;
1014         dst_rect.width  = sx * obj_subpic->dst_rect.width;
1015         dst_rect.height = sy * obj_subpic->dst_rect.height;
1016     }
1017
1018     tex_coords[0] = (float)obj_subpic->src_rect.x / obj_subpic->width;
1019     tex_coords[1] = (float)obj_subpic->src_rect.y / obj_subpic->height;
1020     tex_coords[2] = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / obj_subpic->width;
1021     tex_coords[3] = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / obj_subpic->height;
1022
1023     vid_coords[0] = dst_rect.x;
1024     vid_coords[1] = dst_rect.y;
1025     vid_coords[2] = (float)(dst_rect.x + dst_rect.width);
1026     vid_coords[3] = (float)(dst_rect.y + dst_rect.height);
1027
1028     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1029 }
1030
1031 static void 
1032 i965_render_upload_vertex(
1033     VADriverContextP   ctx,
1034     VASurfaceID        surface,
1035     const VARectangle *src_rect,
1036     const VARectangle *dst_rect
1037 )
1038 {
1039     struct i965_driver_data *i965 = i965_driver_data(ctx);
1040     struct i965_render_state *render_state = &i965->render_state;
1041     struct intel_region *dest_region = render_state->draw_region;
1042     struct object_surface *obj_surface;
1043     float tex_coords[4], vid_coords[4];
1044     int width, height;
1045
1046     obj_surface = SURFACE(surface);
1047     assert(surface);
1048
1049     width  = obj_surface->orig_width;
1050     height = obj_surface->orig_height;
1051
1052     tex_coords[0] = (float)src_rect->x / width;
1053     tex_coords[1] = (float)src_rect->y / height;
1054     tex_coords[2] = (float)(src_rect->x + src_rect->width) / width;
1055     tex_coords[3] = (float)(src_rect->y + src_rect->height) / height;
1056
1057     vid_coords[0] = dest_region->x + dst_rect->x;
1058     vid_coords[1] = dest_region->y + dst_rect->y;
1059     vid_coords[2] = vid_coords[0] + dst_rect->width;
1060     vid_coords[3] = vid_coords[1] + dst_rect->height;
1061
1062     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1063 }
1064
1065 static void
1066 i965_render_upload_constants(VADriverContextP ctx,
1067                              VASurfaceID surface)
1068 {
1069     struct i965_driver_data *i965 = i965_driver_data(ctx);
1070     struct i965_render_state *render_state = &i965->render_state;
1071     unsigned short *constant_buffer;
1072     struct object_surface *obj_surface = SURFACE(surface);
1073
1074     dri_bo_map(render_state->curbe.bo, 1);
1075     assert(render_state->curbe.bo->virtual);
1076     constant_buffer = render_state->curbe.bo->virtual;
1077
1078     if (obj_surface->subsampling == SUBSAMPLE_YUV400) {
1079         assert(obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '1') ||
1080                obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '3'));
1081         *constant_buffer = 2;
1082     } else {
1083         if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'))
1084             *constant_buffer = 1;
1085         else
1086             *constant_buffer = 0;
1087     }
1088
1089     dri_bo_unmap(render_state->curbe.bo);
1090 }
1091
1092 static void
1093 i965_surface_render_state_setup(
1094     VADriverContextP   ctx,
1095     VASurfaceID        surface,
1096     const VARectangle *src_rect,
1097     const VARectangle *dst_rect,
1098     unsigned int       flags
1099 )
1100 {
1101     i965_render_vs_unit(ctx);
1102     i965_render_sf_unit(ctx);
1103     i965_render_dest_surface_state(ctx, 0);
1104     i965_render_src_surfaces_state(ctx, surface, flags);
1105     i965_render_sampler(ctx);
1106     i965_render_wm_unit(ctx);
1107     i965_render_cc_viewport(ctx);
1108     i965_render_cc_unit(ctx);
1109     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1110     i965_render_upload_constants(ctx, surface);
1111 }
1112
1113 static void
1114 i965_subpic_render_state_setup(
1115     VADriverContextP   ctx,
1116     VASurfaceID        surface,
1117     const VARectangle *src_rect,
1118     const VARectangle *dst_rect
1119 )
1120 {
1121     i965_render_vs_unit(ctx);
1122     i965_render_sf_unit(ctx);
1123     i965_render_dest_surface_state(ctx, 0);
1124     i965_subpic_render_src_surfaces_state(ctx, surface);
1125     i965_render_sampler(ctx);
1126     i965_subpic_render_wm_unit(ctx);
1127     i965_render_cc_viewport(ctx);
1128     i965_subpic_render_cc_unit(ctx);
1129     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
1130 }
1131
1132
1133 static void
1134 i965_render_pipeline_select(VADriverContextP ctx)
1135 {
1136     struct i965_driver_data *i965 = i965_driver_data(ctx);
1137     struct intel_batchbuffer *batch = i965->batch;
1138  
1139     BEGIN_BATCH(batch, 1);
1140     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1141     ADVANCE_BATCH(batch);
1142 }
1143
1144 static void
1145 i965_render_state_sip(VADriverContextP ctx)
1146 {
1147     struct i965_driver_data *i965 = i965_driver_data(ctx);
1148     struct intel_batchbuffer *batch = i965->batch;
1149
1150     BEGIN_BATCH(batch, 2);
1151     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1152     OUT_BATCH(batch, 0);
1153     ADVANCE_BATCH(batch);
1154 }
1155
1156 static void
1157 i965_render_state_base_address(VADriverContextP ctx)
1158 {
1159     struct i965_driver_data *i965 = i965_driver_data(ctx);
1160     struct intel_batchbuffer *batch = i965->batch;
1161     struct i965_render_state *render_state = &i965->render_state;
1162
1163     if (IS_IRONLAKE(i965->intel.device_id)) {
1164         BEGIN_BATCH(batch, 8);
1165         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1166         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1167         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1168         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1169         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1170         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1171         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1172         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1173         ADVANCE_BATCH(batch);
1174     } else {
1175         BEGIN_BATCH(batch, 6);
1176         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1177         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1178         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1179         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1180         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1181         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1182         ADVANCE_BATCH(batch);
1183     }
1184 }
1185
1186 static void
1187 i965_render_binding_table_pointers(VADriverContextP ctx)
1188 {
1189     struct i965_driver_data *i965 = i965_driver_data(ctx);
1190     struct intel_batchbuffer *batch = i965->batch;
1191
1192     BEGIN_BATCH(batch, 6);
1193     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1194     OUT_BATCH(batch, 0); /* vs */
1195     OUT_BATCH(batch, 0); /* gs */
1196     OUT_BATCH(batch, 0); /* clip */
1197     OUT_BATCH(batch, 0); /* sf */
1198     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1199     ADVANCE_BATCH(batch);
1200 }
1201
1202 static void 
1203 i965_render_constant_color(VADriverContextP ctx)
1204 {
1205     struct i965_driver_data *i965 = i965_driver_data(ctx);
1206     struct intel_batchbuffer *batch = i965->batch;
1207
1208     BEGIN_BATCH(batch, 5);
1209     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1210     OUT_BATCH(batch, float_to_uint(1.0));
1211     OUT_BATCH(batch, float_to_uint(0.0));
1212     OUT_BATCH(batch, float_to_uint(1.0));
1213     OUT_BATCH(batch, float_to_uint(1.0));
1214     ADVANCE_BATCH(batch);
1215 }
1216
1217 static void
1218 i965_render_pipelined_pointers(VADriverContextP ctx)
1219 {
1220     struct i965_driver_data *i965 = i965_driver_data(ctx);
1221     struct intel_batchbuffer *batch = i965->batch;
1222     struct i965_render_state *render_state = &i965->render_state;
1223
1224     BEGIN_BATCH(batch, 7);
1225     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1226     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1227     OUT_BATCH(batch, 0);  /* disable GS */
1228     OUT_BATCH(batch, 0);  /* disable CLIP */
1229     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1230     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1231     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1232     ADVANCE_BATCH(batch);
1233 }
1234
1235 static void
1236 i965_render_urb_layout(VADriverContextP ctx)
1237 {
1238     struct i965_driver_data *i965 = i965_driver_data(ctx);
1239     struct intel_batchbuffer *batch = i965->batch;
1240     int urb_vs_start, urb_vs_size;
1241     int urb_gs_start, urb_gs_size;
1242     int urb_clip_start, urb_clip_size;
1243     int urb_sf_start, urb_sf_size;
1244     int urb_cs_start, urb_cs_size;
1245
1246     urb_vs_start = 0;
1247     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1248     urb_gs_start = urb_vs_start + urb_vs_size;
1249     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1250     urb_clip_start = urb_gs_start + urb_gs_size;
1251     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1252     urb_sf_start = urb_clip_start + urb_clip_size;
1253     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1254     urb_cs_start = urb_sf_start + urb_sf_size;
1255     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1256
1257     BEGIN_BATCH(batch, 3);
1258     OUT_BATCH(batch, 
1259               CMD_URB_FENCE |
1260               UF0_CS_REALLOC |
1261               UF0_SF_REALLOC |
1262               UF0_CLIP_REALLOC |
1263               UF0_GS_REALLOC |
1264               UF0_VS_REALLOC |
1265               1);
1266     OUT_BATCH(batch, 
1267               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1268               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1269               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1270     OUT_BATCH(batch,
1271               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1272               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1273     ADVANCE_BATCH(batch);
1274 }
1275
1276 static void 
1277 i965_render_cs_urb_layout(VADriverContextP ctx)
1278 {
1279     struct i965_driver_data *i965 = i965_driver_data(ctx);
1280     struct intel_batchbuffer *batch = i965->batch;
1281
1282     BEGIN_BATCH(batch, 2);
1283     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1284     OUT_BATCH(batch,
1285               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1286               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1287     ADVANCE_BATCH(batch);
1288 }
1289
1290 static void
1291 i965_render_constant_buffer(VADriverContextP ctx)
1292 {
1293     struct i965_driver_data *i965 = i965_driver_data(ctx);
1294     struct intel_batchbuffer *batch = i965->batch;
1295     struct i965_render_state *render_state = &i965->render_state;
1296
1297     BEGIN_BATCH(batch, 2);
1298     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1299     OUT_RELOC(batch, render_state->curbe.bo,
1300               I915_GEM_DOMAIN_INSTRUCTION, 0,
1301               URB_CS_ENTRY_SIZE - 1);
1302     ADVANCE_BATCH(batch);    
1303 }
1304
1305 static void
1306 i965_render_drawing_rectangle(VADriverContextP ctx)
1307 {
1308     struct i965_driver_data *i965 = i965_driver_data(ctx);
1309     struct intel_batchbuffer *batch = i965->batch;
1310     struct i965_render_state *render_state = &i965->render_state;
1311     struct intel_region *dest_region = render_state->draw_region;
1312
1313     BEGIN_BATCH(batch, 4);
1314     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1315     OUT_BATCH(batch, 0x00000000);
1316     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1317     OUT_BATCH(batch, 0x00000000);         
1318     ADVANCE_BATCH(batch);
1319 }
1320
1321 static void
1322 i965_render_vertex_elements(VADriverContextP ctx)
1323 {
1324     struct i965_driver_data *i965 = i965_driver_data(ctx);
1325     struct intel_batchbuffer *batch = i965->batch;
1326
1327     if (IS_IRONLAKE(i965->intel.device_id)) {
1328         BEGIN_BATCH(batch, 5);
1329         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1330         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1331         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1332                   VE0_VALID |
1333                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1334                   (0 << VE0_OFFSET_SHIFT));
1335         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1336                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1337                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1338                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1339         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1340         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1341                   VE0_VALID |
1342                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1343                   (8 << VE0_OFFSET_SHIFT));
1344         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1345                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1346                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1347                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1348         ADVANCE_BATCH(batch);
1349     } else {
1350         BEGIN_BATCH(batch, 5);
1351         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1352         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1353         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1354                   VE0_VALID |
1355                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1356                   (0 << VE0_OFFSET_SHIFT));
1357         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1358                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1359                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1360                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1361                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1362         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1363         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1364                   VE0_VALID |
1365                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1366                   (8 << VE0_OFFSET_SHIFT));
1367         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1368                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1369                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1370                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1371                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1372         ADVANCE_BATCH(batch);
1373     }
1374 }
1375
1376 static void
1377 i965_render_upload_image_palette(
1378     VADriverContextP ctx,
1379     VAImageID        image_id,
1380     unsigned int     alpha
1381 )
1382 {
1383     struct i965_driver_data *i965 = i965_driver_data(ctx);
1384     struct intel_batchbuffer *batch = i965->batch;
1385     unsigned int i;
1386
1387     struct object_image *obj_image = IMAGE(image_id);
1388     assert(obj_image);
1389
1390     if (obj_image->image.num_palette_entries == 0)
1391         return;
1392
1393     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1394     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1395     /*fill palette*/
1396     //int32_t out[16]; //0-23:color 23-31:alpha
1397     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1398         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1399     ADVANCE_BATCH(batch);
1400 }
1401
1402 static void
1403 i965_render_startup(VADriverContextP ctx)
1404 {
1405     struct i965_driver_data *i965 = i965_driver_data(ctx);
1406     struct intel_batchbuffer *batch = i965->batch;
1407     struct i965_render_state *render_state = &i965->render_state;
1408
1409     BEGIN_BATCH(batch, 11);
1410     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1411     OUT_BATCH(batch, 
1412               (0 << VB0_BUFFER_INDEX_SHIFT) |
1413               VB0_VERTEXDATA |
1414               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1415     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1416
1417     if (IS_IRONLAKE(i965->intel.device_id))
1418         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1419     else
1420         OUT_BATCH(batch, 3);
1421
1422     OUT_BATCH(batch, 0);
1423
1424     OUT_BATCH(batch, 
1425               CMD_3DPRIMITIVE |
1426               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1427               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1428               (0 << 9) |
1429               4);
1430     OUT_BATCH(batch, 3); /* vertex count per instance */
1431     OUT_BATCH(batch, 0); /* start vertex offset */
1432     OUT_BATCH(batch, 1); /* single instance */
1433     OUT_BATCH(batch, 0); /* start instance location */
1434     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1435     ADVANCE_BATCH(batch);
1436 }
1437
1438 static void 
1439 i965_clear_dest_region(VADriverContextP ctx)
1440 {
1441     struct i965_driver_data *i965 = i965_driver_data(ctx);
1442     struct intel_batchbuffer *batch = i965->batch;
1443     struct i965_render_state *render_state = &i965->render_state;
1444     struct intel_region *dest_region = render_state->draw_region;
1445     unsigned int blt_cmd, br13;
1446     int pitch;
1447
1448     blt_cmd = XY_COLOR_BLT_CMD;
1449     br13 = 0xf0 << 16;
1450     pitch = dest_region->pitch;
1451
1452     if (dest_region->cpp == 4) {
1453         br13 |= BR13_8888;
1454         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1455     } else {
1456         assert(dest_region->cpp == 2);
1457         br13 |= BR13_565;
1458     }
1459
1460     if (dest_region->tiling != I915_TILING_NONE) {
1461         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1462         pitch /= 4;
1463     }
1464
1465     br13 |= pitch;
1466
1467     if (IS_GEN6(i965->intel.device_id) ||
1468         IS_GEN7(i965->intel.device_id)) {
1469         intel_batchbuffer_start_atomic_blt(batch, 24);
1470         BEGIN_BLT_BATCH(batch, 6);
1471     } else {
1472         intel_batchbuffer_start_atomic(batch, 24);
1473         BEGIN_BATCH(batch, 6);
1474     }
1475
1476     OUT_BATCH(batch, blt_cmd);
1477     OUT_BATCH(batch, br13);
1478     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1479     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1480               (dest_region->x + dest_region->width));
1481     OUT_RELOC(batch, dest_region->bo, 
1482               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1483               0);
1484     OUT_BATCH(batch, 0x0);
1485     ADVANCE_BATCH(batch);
1486     intel_batchbuffer_end_atomic(batch);
1487 }
1488
1489 static void
1490 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1491 {
1492     struct i965_driver_data *i965 = i965_driver_data(ctx);
1493     struct intel_batchbuffer *batch = i965->batch;
1494
1495     i965_clear_dest_region(ctx);
1496     intel_batchbuffer_start_atomic(batch, 0x1000);
1497     intel_batchbuffer_emit_mi_flush(batch);
1498     i965_render_pipeline_select(ctx);
1499     i965_render_state_sip(ctx);
1500     i965_render_state_base_address(ctx);
1501     i965_render_binding_table_pointers(ctx);
1502     i965_render_constant_color(ctx);
1503     i965_render_pipelined_pointers(ctx);
1504     i965_render_urb_layout(ctx);
1505     i965_render_cs_urb_layout(ctx);
1506     i965_render_constant_buffer(ctx);
1507     i965_render_drawing_rectangle(ctx);
1508     i965_render_vertex_elements(ctx);
1509     i965_render_startup(ctx);
1510     intel_batchbuffer_end_atomic(batch);
1511 }
1512
1513 static void
1514 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1515 {
1516     struct i965_driver_data *i965 = i965_driver_data(ctx);
1517     struct intel_batchbuffer *batch = i965->batch;
1518
1519     intel_batchbuffer_start_atomic(batch, 0x1000);
1520     intel_batchbuffer_emit_mi_flush(batch);
1521     i965_render_pipeline_select(ctx);
1522     i965_render_state_sip(ctx);
1523     i965_render_state_base_address(ctx);
1524     i965_render_binding_table_pointers(ctx);
1525     i965_render_constant_color(ctx);
1526     i965_render_pipelined_pointers(ctx);
1527     i965_render_urb_layout(ctx);
1528     i965_render_cs_urb_layout(ctx);
1529     i965_render_drawing_rectangle(ctx);
1530     i965_render_vertex_elements(ctx);
1531     i965_render_startup(ctx);
1532     intel_batchbuffer_end_atomic(batch);
1533 }
1534
1535
1536 static void 
1537 i965_render_initialize(VADriverContextP ctx)
1538 {
1539     struct i965_driver_data *i965 = i965_driver_data(ctx);
1540     struct i965_render_state *render_state = &i965->render_state;
1541     dri_bo *bo;
1542
1543     /* VERTEX BUFFER */
1544     dri_bo_unreference(render_state->vb.vertex_buffer);
1545     bo = dri_bo_alloc(i965->intel.bufmgr,
1546                       "vertex buffer",
1547                       4096,
1548                       4096);
1549     assert(bo);
1550     render_state->vb.vertex_buffer = bo;
1551
1552     /* VS */
1553     dri_bo_unreference(render_state->vs.state);
1554     bo = dri_bo_alloc(i965->intel.bufmgr,
1555                       "vs state",
1556                       sizeof(struct i965_vs_unit_state),
1557                       64);
1558     assert(bo);
1559     render_state->vs.state = bo;
1560
1561     /* GS */
1562     /* CLIP */
1563     /* SF */
1564     dri_bo_unreference(render_state->sf.state);
1565     bo = dri_bo_alloc(i965->intel.bufmgr,
1566                       "sf state",
1567                       sizeof(struct i965_sf_unit_state),
1568                       64);
1569     assert(bo);
1570     render_state->sf.state = bo;
1571
1572     /* WM */
1573     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1574     bo = dri_bo_alloc(i965->intel.bufmgr,
1575                       "surface state & binding table",
1576                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1577                       4096);
1578     assert(bo);
1579     render_state->wm.surface_state_binding_table_bo = bo;
1580
1581     dri_bo_unreference(render_state->wm.sampler);
1582     bo = dri_bo_alloc(i965->intel.bufmgr,
1583                       "sampler state",
1584                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1585                       64);
1586     assert(bo);
1587     render_state->wm.sampler = bo;
1588     render_state->wm.sampler_count = 0;
1589
1590     dri_bo_unreference(render_state->wm.state);
1591     bo = dri_bo_alloc(i965->intel.bufmgr,
1592                       "wm state",
1593                       sizeof(struct i965_wm_unit_state),
1594                       64);
1595     assert(bo);
1596     render_state->wm.state = bo;
1597
1598     /* COLOR CALCULATOR */
1599     dri_bo_unreference(render_state->cc.state);
1600     bo = dri_bo_alloc(i965->intel.bufmgr,
1601                       "color calc state",
1602                       sizeof(struct i965_cc_unit_state),
1603                       64);
1604     assert(bo);
1605     render_state->cc.state = bo;
1606
1607     dri_bo_unreference(render_state->cc.viewport);
1608     bo = dri_bo_alloc(i965->intel.bufmgr,
1609                       "cc viewport",
1610                       sizeof(struct i965_cc_viewport),
1611                       64);
1612     assert(bo);
1613     render_state->cc.viewport = bo;
1614 }
1615
1616 static void
1617 i965_render_put_surface(
1618     VADriverContextP   ctx,
1619     VASurfaceID        surface,
1620     const VARectangle *src_rect,
1621     const VARectangle *dst_rect,
1622     unsigned int       flags
1623 )
1624 {
1625     struct i965_driver_data *i965 = i965_driver_data(ctx);
1626     struct intel_batchbuffer *batch = i965->batch;
1627
1628     i965_render_initialize(ctx);
1629     i965_surface_render_state_setup(ctx, surface, src_rect, dst_rect, flags);
1630     i965_surface_render_pipeline_setup(ctx);
1631     intel_batchbuffer_flush(batch);
1632 }
1633
1634 static void
1635 i965_render_put_subpicture(
1636     VADriverContextP   ctx,
1637     VASurfaceID        surface,
1638     const VARectangle *src_rect,
1639     const VARectangle *dst_rect
1640 )
1641 {
1642     struct i965_driver_data *i965 = i965_driver_data(ctx);
1643     struct intel_batchbuffer *batch = i965->batch;
1644     struct object_surface *obj_surface = SURFACE(surface);
1645     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
1646
1647     assert(obj_subpic);
1648
1649     i965_render_initialize(ctx);
1650     i965_subpic_render_state_setup(ctx, surface, src_rect, dst_rect);
1651     i965_subpic_render_pipeline_setup(ctx);
1652     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1653     intel_batchbuffer_flush(batch);
1654 }
1655
1656 /*
1657  * for GEN6+
1658  */
1659 static void 
1660 gen6_render_initialize(VADriverContextP ctx)
1661 {
1662     struct i965_driver_data *i965 = i965_driver_data(ctx);
1663     struct i965_render_state *render_state = &i965->render_state;
1664     dri_bo *bo;
1665
1666     /* VERTEX BUFFER */
1667     dri_bo_unreference(render_state->vb.vertex_buffer);
1668     bo = dri_bo_alloc(i965->intel.bufmgr,
1669                       "vertex buffer",
1670                       4096,
1671                       4096);
1672     assert(bo);
1673     render_state->vb.vertex_buffer = bo;
1674
1675     /* WM */
1676     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1677     bo = dri_bo_alloc(i965->intel.bufmgr,
1678                       "surface state & binding table",
1679                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1680                       4096);
1681     assert(bo);
1682     render_state->wm.surface_state_binding_table_bo = bo;
1683
1684     dri_bo_unreference(render_state->wm.sampler);
1685     bo = dri_bo_alloc(i965->intel.bufmgr,
1686                       "sampler state",
1687                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1688                       4096);
1689     assert(bo);
1690     render_state->wm.sampler = bo;
1691     render_state->wm.sampler_count = 0;
1692
1693     /* COLOR CALCULATOR */
1694     dri_bo_unreference(render_state->cc.state);
1695     bo = dri_bo_alloc(i965->intel.bufmgr,
1696                       "color calc state",
1697                       sizeof(struct gen6_color_calc_state),
1698                       4096);
1699     assert(bo);
1700     render_state->cc.state = bo;
1701
1702     /* CC VIEWPORT */
1703     dri_bo_unreference(render_state->cc.viewport);
1704     bo = dri_bo_alloc(i965->intel.bufmgr,
1705                       "cc viewport",
1706                       sizeof(struct i965_cc_viewport),
1707                       4096);
1708     assert(bo);
1709     render_state->cc.viewport = bo;
1710
1711     /* BLEND STATE */
1712     dri_bo_unreference(render_state->cc.blend);
1713     bo = dri_bo_alloc(i965->intel.bufmgr,
1714                       "blend state",
1715                       sizeof(struct gen6_blend_state),
1716                       4096);
1717     assert(bo);
1718     render_state->cc.blend = bo;
1719
1720     /* DEPTH & STENCIL STATE */
1721     dri_bo_unreference(render_state->cc.depth_stencil);
1722     bo = dri_bo_alloc(i965->intel.bufmgr,
1723                       "depth & stencil state",
1724                       sizeof(struct gen6_depth_stencil_state),
1725                       4096);
1726     assert(bo);
1727     render_state->cc.depth_stencil = bo;
1728 }
1729
1730 static void
1731 gen6_render_color_calc_state(VADriverContextP ctx)
1732 {
1733     struct i965_driver_data *i965 = i965_driver_data(ctx);
1734     struct i965_render_state *render_state = &i965->render_state;
1735     struct gen6_color_calc_state *color_calc_state;
1736     
1737     dri_bo_map(render_state->cc.state, 1);
1738     assert(render_state->cc.state->virtual);
1739     color_calc_state = render_state->cc.state->virtual;
1740     memset(color_calc_state, 0, sizeof(*color_calc_state));
1741     color_calc_state->constant_r = 1.0;
1742     color_calc_state->constant_g = 0.0;
1743     color_calc_state->constant_b = 1.0;
1744     color_calc_state->constant_a = 1.0;
1745     dri_bo_unmap(render_state->cc.state);
1746 }
1747
1748 static void
1749 gen6_render_blend_state(VADriverContextP ctx)
1750 {
1751     struct i965_driver_data *i965 = i965_driver_data(ctx);
1752     struct i965_render_state *render_state = &i965->render_state;
1753     struct gen6_blend_state *blend_state;
1754     
1755     dri_bo_map(render_state->cc.blend, 1);
1756     assert(render_state->cc.blend->virtual);
1757     blend_state = render_state->cc.blend->virtual;
1758     memset(blend_state, 0, sizeof(*blend_state));
1759     blend_state->blend1.logic_op_enable = 1;
1760     blend_state->blend1.logic_op_func = 0xc;
1761     dri_bo_unmap(render_state->cc.blend);
1762 }
1763
1764 static void
1765 gen6_render_depth_stencil_state(VADriverContextP ctx)
1766 {
1767     struct i965_driver_data *i965 = i965_driver_data(ctx);
1768     struct i965_render_state *render_state = &i965->render_state;
1769     struct gen6_depth_stencil_state *depth_stencil_state;
1770     
1771     dri_bo_map(render_state->cc.depth_stencil, 1);
1772     assert(render_state->cc.depth_stencil->virtual);
1773     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1774     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1775     dri_bo_unmap(render_state->cc.depth_stencil);
1776 }
1777
1778 static void
1779 gen6_render_setup_states(
1780     VADriverContextP   ctx,
1781     VASurfaceID        surface,
1782     const VARectangle *src_rect,
1783     const VARectangle *dst_rect,
1784     unsigned int       flags
1785 )
1786 {
1787     i965_render_dest_surface_state(ctx, 0);
1788     i965_render_src_surfaces_state(ctx, surface, flags);
1789     i965_render_sampler(ctx);
1790     i965_render_cc_viewport(ctx);
1791     gen6_render_color_calc_state(ctx);
1792     gen6_render_blend_state(ctx);
1793     gen6_render_depth_stencil_state(ctx);
1794     i965_render_upload_constants(ctx, surface);
1795     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1796 }
1797
1798 static void
1799 gen6_emit_invarient_states(VADriverContextP ctx)
1800 {
1801     struct i965_driver_data *i965 = i965_driver_data(ctx);
1802     struct intel_batchbuffer *batch = i965->batch;
1803
1804     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1805
1806     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1807     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1808               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1809     OUT_BATCH(batch, 0);
1810
1811     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1812     OUT_BATCH(batch, 1);
1813
1814     /* Set system instruction pointer */
1815     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1816     OUT_BATCH(batch, 0);
1817 }
1818
1819 static void
1820 gen6_emit_state_base_address(VADriverContextP ctx)
1821 {
1822     struct i965_driver_data *i965 = i965_driver_data(ctx);
1823     struct intel_batchbuffer *batch = i965->batch;
1824     struct i965_render_state *render_state = &i965->render_state;
1825
1826     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1827     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1828     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1829     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1830     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1831     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1832     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1833     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1834     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1835     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1836 }
1837
1838 static void
1839 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1840 {
1841     struct i965_driver_data *i965 = i965_driver_data(ctx);
1842     struct intel_batchbuffer *batch = i965->batch;
1843     struct i965_render_state *render_state = &i965->render_state;
1844
1845     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1846               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1847               (4 - 2));
1848     OUT_BATCH(batch, 0);
1849     OUT_BATCH(batch, 0);
1850     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1851 }
1852
1853 static void
1854 gen6_emit_urb(VADriverContextP ctx)
1855 {
1856     struct i965_driver_data *i965 = i965_driver_data(ctx);
1857     struct intel_batchbuffer *batch = i965->batch;
1858
1859     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1860     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1861               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1862     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1863               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1864 }
1865
1866 static void
1867 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1868 {
1869     struct i965_driver_data *i965 = i965_driver_data(ctx);
1870     struct intel_batchbuffer *batch = i965->batch;
1871     struct i965_render_state *render_state = &i965->render_state;
1872
1873     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1874     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1875     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1876     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1877 }
1878
1879 static void
1880 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1881 {
1882     struct i965_driver_data *i965 = i965_driver_data(ctx);
1883     struct intel_batchbuffer *batch = i965->batch;
1884     struct i965_render_state *render_state = &i965->render_state;
1885
1886     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1887               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1888               (4 - 2));
1889     OUT_BATCH(batch, 0); /* VS */
1890     OUT_BATCH(batch, 0); /* GS */
1891     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1892 }
1893
1894 static void
1895 gen6_emit_binding_table(VADriverContextP ctx)
1896 {
1897     struct i965_driver_data *i965 = i965_driver_data(ctx);
1898     struct intel_batchbuffer *batch = i965->batch;
1899
1900     /* Binding table pointers */
1901     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1902               GEN6_BINDING_TABLE_MODIFY_PS |
1903               (4 - 2));
1904     OUT_BATCH(batch, 0);                /* vs */
1905     OUT_BATCH(batch, 0);                /* gs */
1906     /* Only the PS uses the binding table */
1907     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1908 }
1909
1910 static void
1911 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1912 {
1913     struct i965_driver_data *i965 = i965_driver_data(ctx);
1914     struct intel_batchbuffer *batch = i965->batch;
1915
1916     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1917     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1918               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1919     OUT_BATCH(batch, 0);
1920     OUT_BATCH(batch, 0);
1921     OUT_BATCH(batch, 0);
1922     OUT_BATCH(batch, 0);
1923     OUT_BATCH(batch, 0);
1924
1925     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1926     OUT_BATCH(batch, 0);
1927 }
1928
1929 static void
1930 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1931 {
1932     i965_render_drawing_rectangle(ctx);
1933 }
1934
1935 static void 
1936 gen6_emit_vs_state(VADriverContextP ctx)
1937 {
1938     struct i965_driver_data *i965 = i965_driver_data(ctx);
1939     struct intel_batchbuffer *batch = i965->batch;
1940
1941     /* disable VS constant buffer */
1942     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1943     OUT_BATCH(batch, 0);
1944     OUT_BATCH(batch, 0);
1945     OUT_BATCH(batch, 0);
1946     OUT_BATCH(batch, 0);
1947         
1948     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1949     OUT_BATCH(batch, 0); /* without VS kernel */
1950     OUT_BATCH(batch, 0);
1951     OUT_BATCH(batch, 0);
1952     OUT_BATCH(batch, 0);
1953     OUT_BATCH(batch, 0); /* pass-through */
1954 }
1955
1956 static void 
1957 gen6_emit_gs_state(VADriverContextP ctx)
1958 {
1959     struct i965_driver_data *i965 = i965_driver_data(ctx);
1960     struct intel_batchbuffer *batch = i965->batch;
1961
1962     /* disable GS constant buffer */
1963     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1964     OUT_BATCH(batch, 0);
1965     OUT_BATCH(batch, 0);
1966     OUT_BATCH(batch, 0);
1967     OUT_BATCH(batch, 0);
1968         
1969     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1970     OUT_BATCH(batch, 0); /* without GS kernel */
1971     OUT_BATCH(batch, 0);
1972     OUT_BATCH(batch, 0);
1973     OUT_BATCH(batch, 0);
1974     OUT_BATCH(batch, 0);
1975     OUT_BATCH(batch, 0); /* pass-through */
1976 }
1977
1978 static void 
1979 gen6_emit_clip_state(VADriverContextP ctx)
1980 {
1981     struct i965_driver_data *i965 = i965_driver_data(ctx);
1982     struct intel_batchbuffer *batch = i965->batch;
1983
1984     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
1985     OUT_BATCH(batch, 0);
1986     OUT_BATCH(batch, 0); /* pass-through */
1987     OUT_BATCH(batch, 0);
1988 }
1989
1990 static void 
1991 gen6_emit_sf_state(VADriverContextP ctx)
1992 {
1993     struct i965_driver_data *i965 = i965_driver_data(ctx);
1994     struct intel_batchbuffer *batch = i965->batch;
1995
1996     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
1997     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
1998               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
1999               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
2000     OUT_BATCH(batch, 0);
2001     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2002     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
2003     OUT_BATCH(batch, 0);
2004     OUT_BATCH(batch, 0);
2005     OUT_BATCH(batch, 0);
2006     OUT_BATCH(batch, 0);
2007     OUT_BATCH(batch, 0); /* DW9 */
2008     OUT_BATCH(batch, 0);
2009     OUT_BATCH(batch, 0);
2010     OUT_BATCH(batch, 0);
2011     OUT_BATCH(batch, 0);
2012     OUT_BATCH(batch, 0); /* DW14 */
2013     OUT_BATCH(batch, 0);
2014     OUT_BATCH(batch, 0);
2015     OUT_BATCH(batch, 0);
2016     OUT_BATCH(batch, 0);
2017     OUT_BATCH(batch, 0); /* DW19 */
2018 }
2019
2020 static void 
2021 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
2022 {
2023     struct i965_driver_data *i965 = i965_driver_data(ctx);
2024     struct intel_batchbuffer *batch = i965->batch;
2025     struct i965_render_state *render_state = &i965->render_state;
2026
2027     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
2028               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
2029               (5 - 2));
2030     OUT_RELOC(batch, 
2031               render_state->curbe.bo,
2032               I915_GEM_DOMAIN_INSTRUCTION, 0,
2033               0);
2034     OUT_BATCH(batch, 0);
2035     OUT_BATCH(batch, 0);
2036     OUT_BATCH(batch, 0);
2037
2038     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
2039     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
2040               I915_GEM_DOMAIN_INSTRUCTION, 0,
2041               0);
2042     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
2043               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2044     OUT_BATCH(batch, 0);
2045     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
2046     OUT_BATCH(batch, ((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
2047               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
2048               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
2049     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
2050               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2051     OUT_BATCH(batch, 0);
2052     OUT_BATCH(batch, 0);
2053 }
2054
2055 static void
2056 gen6_emit_vertex_element_state(VADriverContextP ctx)
2057 {
2058     struct i965_driver_data *i965 = i965_driver_data(ctx);
2059     struct intel_batchbuffer *batch = i965->batch;
2060
2061     /* Set up our vertex elements, sourced from the single vertex buffer. */
2062     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2063     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2064     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2065               GEN6_VE0_VALID |
2066               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2067               (0 << VE0_OFFSET_SHIFT));
2068     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2069               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2070               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2071               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2072     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2073     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2074               GEN6_VE0_VALID |
2075               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2076               (8 << VE0_OFFSET_SHIFT));
2077     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2078               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2079               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2080               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2081 }
2082
2083 static void
2084 gen6_emit_vertices(VADriverContextP ctx)
2085 {
2086     struct i965_driver_data *i965 = i965_driver_data(ctx);
2087     struct intel_batchbuffer *batch = i965->batch;
2088     struct i965_render_state *render_state = &i965->render_state;
2089
2090     BEGIN_BATCH(batch, 11);
2091     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
2092     OUT_BATCH(batch, 
2093               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2094               GEN6_VB0_VERTEXDATA |
2095               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2096     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2097     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2098     OUT_BATCH(batch, 0);
2099
2100     OUT_BATCH(batch, 
2101               CMD_3DPRIMITIVE |
2102               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2103               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2104               (0 << 9) |
2105               4);
2106     OUT_BATCH(batch, 3); /* vertex count per instance */
2107     OUT_BATCH(batch, 0); /* start vertex offset */
2108     OUT_BATCH(batch, 1); /* single instance */
2109     OUT_BATCH(batch, 0); /* start instance location */
2110     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2111     ADVANCE_BATCH(batch);
2112 }
2113
2114 static void
2115 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2116 {
2117     struct i965_driver_data *i965 = i965_driver_data(ctx);
2118     struct intel_batchbuffer *batch = i965->batch;
2119
2120     intel_batchbuffer_start_atomic(batch, 0x1000);
2121     intel_batchbuffer_emit_mi_flush(batch);
2122     gen6_emit_invarient_states(ctx);
2123     gen6_emit_state_base_address(ctx);
2124     gen6_emit_viewport_state_pointers(ctx);
2125     gen6_emit_urb(ctx);
2126     gen6_emit_cc_state_pointers(ctx);
2127     gen6_emit_sampler_state_pointers(ctx);
2128     gen6_emit_vs_state(ctx);
2129     gen6_emit_gs_state(ctx);
2130     gen6_emit_clip_state(ctx);
2131     gen6_emit_sf_state(ctx);
2132     gen6_emit_wm_state(ctx, kernel);
2133     gen6_emit_binding_table(ctx);
2134     gen6_emit_depth_buffer_state(ctx);
2135     gen6_emit_drawing_rectangle(ctx);
2136     gen6_emit_vertex_element_state(ctx);
2137     gen6_emit_vertices(ctx);
2138     intel_batchbuffer_end_atomic(batch);
2139 }
2140
2141 static void
2142 gen6_render_put_surface(
2143     VADriverContextP   ctx,
2144     VASurfaceID        surface,
2145     const VARectangle *src_rect,
2146     const VARectangle *dst_rect,
2147     unsigned int       flags
2148 )
2149 {
2150     struct i965_driver_data *i965 = i965_driver_data(ctx);
2151     struct intel_batchbuffer *batch = i965->batch;
2152
2153     gen6_render_initialize(ctx);
2154     gen6_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2155     i965_clear_dest_region(ctx);
2156     gen6_render_emit_states(ctx, PS_KERNEL);
2157     intel_batchbuffer_flush(batch);
2158 }
2159
2160 static void
2161 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2162 {
2163     struct i965_driver_data *i965 = i965_driver_data(ctx);
2164     struct i965_render_state *render_state = &i965->render_state;
2165     struct gen6_blend_state *blend_state;
2166
2167     dri_bo_unmap(render_state->cc.state);    
2168     dri_bo_map(render_state->cc.blend, 1);
2169     assert(render_state->cc.blend->virtual);
2170     blend_state = render_state->cc.blend->virtual;
2171     memset(blend_state, 0, sizeof(*blend_state));
2172     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2173     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2174     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2175     blend_state->blend0.blend_enable = 1;
2176     blend_state->blend1.post_blend_clamp_enable = 1;
2177     blend_state->blend1.pre_blend_clamp_enable = 1;
2178     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2179     dri_bo_unmap(render_state->cc.blend);
2180 }
2181
2182 static void
2183 gen6_subpicture_render_setup_states(
2184     VADriverContextP   ctx,
2185     VASurfaceID        surface,
2186     const VARectangle *src_rect,
2187     const VARectangle *dst_rect
2188 )
2189 {
2190     i965_render_dest_surface_state(ctx, 0);
2191     i965_subpic_render_src_surfaces_state(ctx, surface);
2192     i965_render_sampler(ctx);
2193     i965_render_cc_viewport(ctx);
2194     gen6_render_color_calc_state(ctx);
2195     gen6_subpicture_render_blend_state(ctx);
2196     gen6_render_depth_stencil_state(ctx);
2197     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2198 }
2199
2200 static void
2201 gen6_render_put_subpicture(
2202     VADriverContextP   ctx,
2203     VASurfaceID        surface,
2204     const VARectangle *src_rect,
2205     const VARectangle *dst_rect
2206 )
2207 {
2208     struct i965_driver_data *i965 = i965_driver_data(ctx);
2209     struct intel_batchbuffer *batch = i965->batch;
2210     struct object_surface *obj_surface = SURFACE(surface);
2211     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2212
2213     assert(obj_subpic);
2214     gen6_render_initialize(ctx);
2215     gen6_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2216     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2217     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2218     intel_batchbuffer_flush(batch);
2219 }
2220
2221 /*
2222  * for GEN7
2223  */
2224 static void 
2225 gen7_render_initialize(VADriverContextP ctx)
2226 {
2227     struct i965_driver_data *i965 = i965_driver_data(ctx);
2228     struct i965_render_state *render_state = &i965->render_state;
2229     dri_bo *bo;
2230
2231     /* VERTEX BUFFER */
2232     dri_bo_unreference(render_state->vb.vertex_buffer);
2233     bo = dri_bo_alloc(i965->intel.bufmgr,
2234                       "vertex buffer",
2235                       4096,
2236                       4096);
2237     assert(bo);
2238     render_state->vb.vertex_buffer = bo;
2239
2240     /* WM */
2241     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2242     bo = dri_bo_alloc(i965->intel.bufmgr,
2243                       "surface state & binding table",
2244                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2245                       4096);
2246     assert(bo);
2247     render_state->wm.surface_state_binding_table_bo = bo;
2248
2249     dri_bo_unreference(render_state->wm.sampler);
2250     bo = dri_bo_alloc(i965->intel.bufmgr,
2251                       "sampler state",
2252                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2253                       4096);
2254     assert(bo);
2255     render_state->wm.sampler = bo;
2256     render_state->wm.sampler_count = 0;
2257
2258     /* COLOR CALCULATOR */
2259     dri_bo_unreference(render_state->cc.state);
2260     bo = dri_bo_alloc(i965->intel.bufmgr,
2261                       "color calc state",
2262                       sizeof(struct gen6_color_calc_state),
2263                       4096);
2264     assert(bo);
2265     render_state->cc.state = bo;
2266
2267     /* CC VIEWPORT */
2268     dri_bo_unreference(render_state->cc.viewport);
2269     bo = dri_bo_alloc(i965->intel.bufmgr,
2270                       "cc viewport",
2271                       sizeof(struct i965_cc_viewport),
2272                       4096);
2273     assert(bo);
2274     render_state->cc.viewport = bo;
2275
2276     /* BLEND STATE */
2277     dri_bo_unreference(render_state->cc.blend);
2278     bo = dri_bo_alloc(i965->intel.bufmgr,
2279                       "blend state",
2280                       sizeof(struct gen6_blend_state),
2281                       4096);
2282     assert(bo);
2283     render_state->cc.blend = bo;
2284
2285     /* DEPTH & STENCIL STATE */
2286     dri_bo_unreference(render_state->cc.depth_stencil);
2287     bo = dri_bo_alloc(i965->intel.bufmgr,
2288                       "depth & stencil state",
2289                       sizeof(struct gen6_depth_stencil_state),
2290                       4096);
2291     assert(bo);
2292     render_state->cc.depth_stencil = bo;
2293 }
2294
2295 static void
2296 gen7_render_color_calc_state(VADriverContextP ctx)
2297 {
2298     struct i965_driver_data *i965 = i965_driver_data(ctx);
2299     struct i965_render_state *render_state = &i965->render_state;
2300     struct gen6_color_calc_state *color_calc_state;
2301     
2302     dri_bo_map(render_state->cc.state, 1);
2303     assert(render_state->cc.state->virtual);
2304     color_calc_state = render_state->cc.state->virtual;
2305     memset(color_calc_state, 0, sizeof(*color_calc_state));
2306     color_calc_state->constant_r = 1.0;
2307     color_calc_state->constant_g = 0.0;
2308     color_calc_state->constant_b = 1.0;
2309     color_calc_state->constant_a = 1.0;
2310     dri_bo_unmap(render_state->cc.state);
2311 }
2312
2313 static void
2314 gen7_render_blend_state(VADriverContextP ctx)
2315 {
2316     struct i965_driver_data *i965 = i965_driver_data(ctx);
2317     struct i965_render_state *render_state = &i965->render_state;
2318     struct gen6_blend_state *blend_state;
2319     
2320     dri_bo_map(render_state->cc.blend, 1);
2321     assert(render_state->cc.blend->virtual);
2322     blend_state = render_state->cc.blend->virtual;
2323     memset(blend_state, 0, sizeof(*blend_state));
2324     blend_state->blend1.logic_op_enable = 1;
2325     blend_state->blend1.logic_op_func = 0xc;
2326     blend_state->blend1.pre_blend_clamp_enable = 1;
2327     dri_bo_unmap(render_state->cc.blend);
2328 }
2329
2330 static void
2331 gen7_render_depth_stencil_state(VADriverContextP ctx)
2332 {
2333     struct i965_driver_data *i965 = i965_driver_data(ctx);
2334     struct i965_render_state *render_state = &i965->render_state;
2335     struct gen6_depth_stencil_state *depth_stencil_state;
2336     
2337     dri_bo_map(render_state->cc.depth_stencil, 1);
2338     assert(render_state->cc.depth_stencil->virtual);
2339     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2340     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2341     dri_bo_unmap(render_state->cc.depth_stencil);
2342 }
2343
2344 static void 
2345 gen7_render_sampler(VADriverContextP ctx)
2346 {
2347     struct i965_driver_data *i965 = i965_driver_data(ctx);
2348     struct i965_render_state *render_state = &i965->render_state;
2349     struct gen7_sampler_state *sampler_state;
2350     int i;
2351     
2352     assert(render_state->wm.sampler_count > 0);
2353     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2354
2355     dri_bo_map(render_state->wm.sampler, 1);
2356     assert(render_state->wm.sampler->virtual);
2357     sampler_state = render_state->wm.sampler->virtual;
2358     for (i = 0; i < render_state->wm.sampler_count; i++) {
2359         memset(sampler_state, 0, sizeof(*sampler_state));
2360         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2361         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2362         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2363         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2364         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2365         sampler_state++;
2366     }
2367
2368     dri_bo_unmap(render_state->wm.sampler);
2369 }
2370
2371 static void
2372 gen7_render_setup_states(
2373     VADriverContextP   ctx,
2374     VASurfaceID        surface,
2375     const VARectangle *src_rect,
2376     const VARectangle *dst_rect,
2377     unsigned int       flags
2378 )
2379 {
2380     i965_render_dest_surface_state(ctx, 0);
2381     i965_render_src_surfaces_state(ctx, surface, flags);
2382     gen7_render_sampler(ctx);
2383     i965_render_cc_viewport(ctx);
2384     gen7_render_color_calc_state(ctx);
2385     gen7_render_blend_state(ctx);
2386     gen7_render_depth_stencil_state(ctx);
2387     i965_render_upload_constants(ctx, surface);
2388     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
2389 }
2390
2391 static void
2392 gen7_emit_invarient_states(VADriverContextP ctx)
2393 {
2394     struct i965_driver_data *i965 = i965_driver_data(ctx);
2395     struct intel_batchbuffer *batch = i965->batch;
2396
2397     BEGIN_BATCH(batch, 1);
2398     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2399     ADVANCE_BATCH(batch);
2400
2401     BEGIN_BATCH(batch, 4);
2402     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2403     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2404               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2405     OUT_BATCH(batch, 0);
2406     OUT_BATCH(batch, 0);
2407     ADVANCE_BATCH(batch);
2408
2409     BEGIN_BATCH(batch, 2);
2410     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2411     OUT_BATCH(batch, 1);
2412     ADVANCE_BATCH(batch);
2413
2414     /* Set system instruction pointer */
2415     BEGIN_BATCH(batch, 2);
2416     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2417     OUT_BATCH(batch, 0);
2418     ADVANCE_BATCH(batch);
2419 }
2420
2421 static void
2422 gen7_emit_state_base_address(VADriverContextP ctx)
2423 {
2424     struct i965_driver_data *i965 = i965_driver_data(ctx);
2425     struct intel_batchbuffer *batch = i965->batch;
2426     struct i965_render_state *render_state = &i965->render_state;
2427
2428     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2429     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2430     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2431     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2432     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2433     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2434     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2435     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2436     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2437     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2438 }
2439
2440 static void
2441 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2442 {
2443     struct i965_driver_data *i965 = i965_driver_data(ctx);
2444     struct intel_batchbuffer *batch = i965->batch;
2445     struct i965_render_state *render_state = &i965->render_state;
2446
2447     BEGIN_BATCH(batch, 2);
2448     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2449     OUT_RELOC(batch,
2450               render_state->cc.viewport,
2451               I915_GEM_DOMAIN_INSTRUCTION, 0,
2452               0);
2453     ADVANCE_BATCH(batch);
2454
2455     BEGIN_BATCH(batch, 2);
2456     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2457     OUT_BATCH(batch, 0);
2458     ADVANCE_BATCH(batch);
2459 }
2460
2461 /*
2462  * URB layout on GEN7 
2463  * ----------------------------------------
2464  * | PS Push Constants (8KB) | VS entries |
2465  * ----------------------------------------
2466  */
2467 static void
2468 gen7_emit_urb(VADriverContextP ctx)
2469 {
2470     struct i965_driver_data *i965 = i965_driver_data(ctx);
2471     struct intel_batchbuffer *batch = i965->batch;
2472     unsigned int num_urb_entries = 32;
2473
2474     if (IS_HASWELL(i965->intel.device_id))
2475         num_urb_entries = 64;
2476
2477     BEGIN_BATCH(batch, 2);
2478     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2479     OUT_BATCH(batch, 8); /* in 1KBs */
2480     ADVANCE_BATCH(batch);
2481
2482     BEGIN_BATCH(batch, 2);
2483     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2484     OUT_BATCH(batch, 
2485               (num_urb_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
2486               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2487               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2488    ADVANCE_BATCH(batch);
2489
2490    BEGIN_BATCH(batch, 2);
2491    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2492    OUT_BATCH(batch,
2493              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2494              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2495    ADVANCE_BATCH(batch);
2496
2497    BEGIN_BATCH(batch, 2);
2498    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2499    OUT_BATCH(batch,
2500              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2501              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2502    ADVANCE_BATCH(batch);
2503
2504    BEGIN_BATCH(batch, 2);
2505    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2506    OUT_BATCH(batch,
2507              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2508              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2509    ADVANCE_BATCH(batch);
2510 }
2511
2512 static void
2513 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2514 {
2515     struct i965_driver_data *i965 = i965_driver_data(ctx);
2516     struct intel_batchbuffer *batch = i965->batch;
2517     struct i965_render_state *render_state = &i965->render_state;
2518
2519     BEGIN_BATCH(batch, 2);
2520     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2521     OUT_RELOC(batch,
2522               render_state->cc.state,
2523               I915_GEM_DOMAIN_INSTRUCTION, 0,
2524               1);
2525     ADVANCE_BATCH(batch);
2526
2527     BEGIN_BATCH(batch, 2);
2528     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2529     OUT_RELOC(batch,
2530               render_state->cc.blend,
2531               I915_GEM_DOMAIN_INSTRUCTION, 0,
2532               1);
2533     ADVANCE_BATCH(batch);
2534
2535     BEGIN_BATCH(batch, 2);
2536     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2537     OUT_RELOC(batch,
2538               render_state->cc.depth_stencil,
2539               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2540               1);
2541     ADVANCE_BATCH(batch);
2542 }
2543
2544 static void
2545 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2546 {
2547     struct i965_driver_data *i965 = i965_driver_data(ctx);
2548     struct intel_batchbuffer *batch = i965->batch;
2549     struct i965_render_state *render_state = &i965->render_state;
2550
2551     BEGIN_BATCH(batch, 2);
2552     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2553     OUT_RELOC(batch,
2554               render_state->wm.sampler,
2555               I915_GEM_DOMAIN_INSTRUCTION, 0,
2556               0);
2557     ADVANCE_BATCH(batch);
2558 }
2559
2560 static void
2561 gen7_emit_binding_table(VADriverContextP ctx)
2562 {
2563     struct i965_driver_data *i965 = i965_driver_data(ctx);
2564     struct intel_batchbuffer *batch = i965->batch;
2565
2566     BEGIN_BATCH(batch, 2);
2567     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2568     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2569     ADVANCE_BATCH(batch);
2570 }
2571
2572 static void
2573 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2574 {
2575     struct i965_driver_data *i965 = i965_driver_data(ctx);
2576     struct intel_batchbuffer *batch = i965->batch;
2577
2578     BEGIN_BATCH(batch, 7);
2579     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2580     OUT_BATCH(batch,
2581               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2582               (I965_SURFACE_NULL << 29));
2583     OUT_BATCH(batch, 0);
2584     OUT_BATCH(batch, 0);
2585     OUT_BATCH(batch, 0);
2586     OUT_BATCH(batch, 0);
2587     OUT_BATCH(batch, 0);
2588     ADVANCE_BATCH(batch);
2589
2590     BEGIN_BATCH(batch, 3);
2591     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2592     OUT_BATCH(batch, 0);
2593     OUT_BATCH(batch, 0);
2594     ADVANCE_BATCH(batch);
2595 }
2596
2597 static void
2598 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2599 {
2600     i965_render_drawing_rectangle(ctx);
2601 }
2602
2603 static void 
2604 gen7_emit_vs_state(VADriverContextP ctx)
2605 {
2606     struct i965_driver_data *i965 = i965_driver_data(ctx);
2607     struct intel_batchbuffer *batch = i965->batch;
2608
2609     /* disable VS constant buffer */
2610     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2611     OUT_BATCH(batch, 0);
2612     OUT_BATCH(batch, 0);
2613     OUT_BATCH(batch, 0);
2614     OUT_BATCH(batch, 0);
2615     OUT_BATCH(batch, 0);
2616     OUT_BATCH(batch, 0);
2617         
2618     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2619     OUT_BATCH(batch, 0); /* without VS kernel */
2620     OUT_BATCH(batch, 0);
2621     OUT_BATCH(batch, 0);
2622     OUT_BATCH(batch, 0);
2623     OUT_BATCH(batch, 0); /* pass-through */
2624 }
2625
2626 static void 
2627 gen7_emit_bypass_state(VADriverContextP ctx)
2628 {
2629     struct i965_driver_data *i965 = i965_driver_data(ctx);
2630     struct intel_batchbuffer *batch = i965->batch;
2631
2632     /* bypass GS */
2633     BEGIN_BATCH(batch, 7);
2634     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2635     OUT_BATCH(batch, 0);
2636     OUT_BATCH(batch, 0);
2637     OUT_BATCH(batch, 0);
2638     OUT_BATCH(batch, 0);
2639     OUT_BATCH(batch, 0);
2640     OUT_BATCH(batch, 0);
2641     ADVANCE_BATCH(batch);
2642
2643     BEGIN_BATCH(batch, 7);      
2644     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2645     OUT_BATCH(batch, 0); /* without GS kernel */
2646     OUT_BATCH(batch, 0);
2647     OUT_BATCH(batch, 0);
2648     OUT_BATCH(batch, 0);
2649     OUT_BATCH(batch, 0);
2650     OUT_BATCH(batch, 0); /* pass-through */
2651     ADVANCE_BATCH(batch);
2652
2653     BEGIN_BATCH(batch, 2);
2654     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2655     OUT_BATCH(batch, 0);
2656     ADVANCE_BATCH(batch);
2657
2658     /* disable HS */
2659     BEGIN_BATCH(batch, 7);
2660     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2661     OUT_BATCH(batch, 0);
2662     OUT_BATCH(batch, 0);
2663     OUT_BATCH(batch, 0);
2664     OUT_BATCH(batch, 0);
2665     OUT_BATCH(batch, 0);
2666     OUT_BATCH(batch, 0);
2667     ADVANCE_BATCH(batch);
2668
2669     BEGIN_BATCH(batch, 7);
2670     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2671     OUT_BATCH(batch, 0);
2672     OUT_BATCH(batch, 0);
2673     OUT_BATCH(batch, 0);
2674     OUT_BATCH(batch, 0);
2675     OUT_BATCH(batch, 0);
2676     OUT_BATCH(batch, 0);
2677     ADVANCE_BATCH(batch);
2678
2679     BEGIN_BATCH(batch, 2);
2680     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2681     OUT_BATCH(batch, 0);
2682     ADVANCE_BATCH(batch);
2683
2684     /* Disable TE */
2685     BEGIN_BATCH(batch, 4);
2686     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2687     OUT_BATCH(batch, 0);
2688     OUT_BATCH(batch, 0);
2689     OUT_BATCH(batch, 0);
2690     ADVANCE_BATCH(batch);
2691
2692     /* Disable DS */
2693     BEGIN_BATCH(batch, 7);
2694     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2695     OUT_BATCH(batch, 0);
2696     OUT_BATCH(batch, 0);
2697     OUT_BATCH(batch, 0);
2698     OUT_BATCH(batch, 0);
2699     OUT_BATCH(batch, 0);
2700     OUT_BATCH(batch, 0);
2701     ADVANCE_BATCH(batch);
2702
2703     BEGIN_BATCH(batch, 6);
2704     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2705     OUT_BATCH(batch, 0);
2706     OUT_BATCH(batch, 0);
2707     OUT_BATCH(batch, 0);
2708     OUT_BATCH(batch, 0);
2709     OUT_BATCH(batch, 0);
2710     ADVANCE_BATCH(batch);
2711
2712     BEGIN_BATCH(batch, 2);
2713     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2714     OUT_BATCH(batch, 0);
2715     ADVANCE_BATCH(batch);
2716
2717     /* Disable STREAMOUT */
2718     BEGIN_BATCH(batch, 3);
2719     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2720     OUT_BATCH(batch, 0);
2721     OUT_BATCH(batch, 0);
2722     ADVANCE_BATCH(batch);
2723 }
2724
2725 static void 
2726 gen7_emit_clip_state(VADriverContextP ctx)
2727 {
2728     struct i965_driver_data *i965 = i965_driver_data(ctx);
2729     struct intel_batchbuffer *batch = i965->batch;
2730
2731     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2732     OUT_BATCH(batch, 0);
2733     OUT_BATCH(batch, 0); /* pass-through */
2734     OUT_BATCH(batch, 0);
2735 }
2736
2737 static void 
2738 gen7_emit_sf_state(VADriverContextP ctx)
2739 {
2740     struct i965_driver_data *i965 = i965_driver_data(ctx);
2741     struct intel_batchbuffer *batch = i965->batch;
2742
2743     BEGIN_BATCH(batch, 14);
2744     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2745     OUT_BATCH(batch,
2746               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2747               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2748               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2749     OUT_BATCH(batch, 0);
2750     OUT_BATCH(batch, 0);
2751     OUT_BATCH(batch, 0); /* DW4 */
2752     OUT_BATCH(batch, 0);
2753     OUT_BATCH(batch, 0);
2754     OUT_BATCH(batch, 0);
2755     OUT_BATCH(batch, 0);
2756     OUT_BATCH(batch, 0); /* DW9 */
2757     OUT_BATCH(batch, 0);
2758     OUT_BATCH(batch, 0);
2759     OUT_BATCH(batch, 0);
2760     OUT_BATCH(batch, 0);
2761     ADVANCE_BATCH(batch);
2762
2763     BEGIN_BATCH(batch, 7);
2764     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2765     OUT_BATCH(batch, 0);
2766     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2767     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2768     OUT_BATCH(batch, 0);
2769     OUT_BATCH(batch, 0);
2770     OUT_BATCH(batch, 0);
2771     ADVANCE_BATCH(batch);
2772 }
2773
2774 static void 
2775 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2776 {
2777     struct i965_driver_data *i965 = i965_driver_data(ctx);
2778     struct intel_batchbuffer *batch = i965->batch;
2779     struct i965_render_state *render_state = &i965->render_state;
2780     unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
2781     unsigned int num_samples = 0;
2782
2783     if (IS_HASWELL(i965->intel.device_id)) {
2784         max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
2785         num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
2786     }
2787
2788     BEGIN_BATCH(batch, 3);
2789     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2790     OUT_BATCH(batch,
2791               GEN7_WM_DISPATCH_ENABLE |
2792               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2793     OUT_BATCH(batch, 0);
2794     ADVANCE_BATCH(batch);
2795
2796     BEGIN_BATCH(batch, 7);
2797     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2798     OUT_BATCH(batch, 1);
2799     OUT_BATCH(batch, 0);
2800     OUT_RELOC(batch, 
2801               render_state->curbe.bo,
2802               I915_GEM_DOMAIN_INSTRUCTION, 0,
2803               0);
2804     OUT_BATCH(batch, 0);
2805     OUT_BATCH(batch, 0);
2806     OUT_BATCH(batch, 0);
2807     ADVANCE_BATCH(batch);
2808
2809     BEGIN_BATCH(batch, 8);
2810     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2811     OUT_RELOC(batch, 
2812               render_state->render_kernels[kernel].bo,
2813               I915_GEM_DOMAIN_INSTRUCTION, 0,
2814               0);
2815     OUT_BATCH(batch, 
2816               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2817               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2818     OUT_BATCH(batch, 0); /* scratch space base offset */
2819     OUT_BATCH(batch, 
2820               ((86 - 1) << max_threads_shift) | num_samples |
2821               GEN7_PS_PUSH_CONSTANT_ENABLE |
2822               GEN7_PS_ATTRIBUTE_ENABLE |
2823               GEN7_PS_16_DISPATCH_ENABLE);
2824     OUT_BATCH(batch, 
2825               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2826     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2827     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2828     ADVANCE_BATCH(batch);
2829 }
2830
2831 static void
2832 gen7_emit_vertex_element_state(VADriverContextP ctx)
2833 {
2834     struct i965_driver_data *i965 = i965_driver_data(ctx);
2835     struct intel_batchbuffer *batch = i965->batch;
2836
2837     /* Set up our vertex elements, sourced from the single vertex buffer. */
2838     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2839     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2840     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2841               GEN6_VE0_VALID |
2842               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2843               (0 << VE0_OFFSET_SHIFT));
2844     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2845               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2846               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2847               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2848     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2849     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2850               GEN6_VE0_VALID |
2851               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2852               (8 << VE0_OFFSET_SHIFT));
2853     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2854               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2855               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2856               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2857 }
2858
2859 static void
2860 gen7_emit_vertices(VADriverContextP ctx)
2861 {
2862     struct i965_driver_data *i965 = i965_driver_data(ctx);
2863     struct intel_batchbuffer *batch = i965->batch;
2864     struct i965_render_state *render_state = &i965->render_state;
2865
2866     BEGIN_BATCH(batch, 5);
2867     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2868     OUT_BATCH(batch, 
2869               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2870               GEN6_VB0_VERTEXDATA |
2871               GEN7_VB0_ADDRESS_MODIFYENABLE |
2872               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2873     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2874     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2875     OUT_BATCH(batch, 0);
2876     ADVANCE_BATCH(batch);
2877
2878     BEGIN_BATCH(batch, 7);
2879     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2880     OUT_BATCH(batch,
2881               _3DPRIM_RECTLIST |
2882               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2883     OUT_BATCH(batch, 3); /* vertex count per instance */
2884     OUT_BATCH(batch, 0); /* start vertex offset */
2885     OUT_BATCH(batch, 1); /* single instance */
2886     OUT_BATCH(batch, 0); /* start instance location */
2887     OUT_BATCH(batch, 0);
2888     ADVANCE_BATCH(batch);
2889 }
2890
2891 static void
2892 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2893 {
2894     struct i965_driver_data *i965 = i965_driver_data(ctx);
2895     struct intel_batchbuffer *batch = i965->batch;
2896
2897     intel_batchbuffer_start_atomic(batch, 0x1000);
2898     intel_batchbuffer_emit_mi_flush(batch);
2899     gen7_emit_invarient_states(ctx);
2900     gen7_emit_state_base_address(ctx);
2901     gen7_emit_viewport_state_pointers(ctx);
2902     gen7_emit_urb(ctx);
2903     gen7_emit_cc_state_pointers(ctx);
2904     gen7_emit_sampler_state_pointers(ctx);
2905     gen7_emit_bypass_state(ctx);
2906     gen7_emit_vs_state(ctx);
2907     gen7_emit_clip_state(ctx);
2908     gen7_emit_sf_state(ctx);
2909     gen7_emit_wm_state(ctx, kernel);
2910     gen7_emit_binding_table(ctx);
2911     gen7_emit_depth_buffer_state(ctx);
2912     gen7_emit_drawing_rectangle(ctx);
2913     gen7_emit_vertex_element_state(ctx);
2914     gen7_emit_vertices(ctx);
2915     intel_batchbuffer_end_atomic(batch);
2916 }
2917
2918 static void
2919 gen7_render_put_surface(
2920     VADriverContextP   ctx,
2921     VASurfaceID        surface,
2922     const VARectangle *src_rect,
2923     const VARectangle *dst_rect,
2924     unsigned int       flags
2925 )
2926 {
2927     struct i965_driver_data *i965 = i965_driver_data(ctx);
2928     struct intel_batchbuffer *batch = i965->batch;
2929
2930     gen7_render_initialize(ctx);
2931     gen7_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2932     i965_clear_dest_region(ctx);
2933     gen7_render_emit_states(ctx, PS_KERNEL);
2934     intel_batchbuffer_flush(batch);
2935 }
2936
2937 static void
2938 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2939 {
2940     struct i965_driver_data *i965 = i965_driver_data(ctx);
2941     struct i965_render_state *render_state = &i965->render_state;
2942     struct gen6_blend_state *blend_state;
2943
2944     dri_bo_unmap(render_state->cc.state);    
2945     dri_bo_map(render_state->cc.blend, 1);
2946     assert(render_state->cc.blend->virtual);
2947     blend_state = render_state->cc.blend->virtual;
2948     memset(blend_state, 0, sizeof(*blend_state));
2949     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2950     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2951     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2952     blend_state->blend0.blend_enable = 1;
2953     blend_state->blend1.post_blend_clamp_enable = 1;
2954     blend_state->blend1.pre_blend_clamp_enable = 1;
2955     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2956     dri_bo_unmap(render_state->cc.blend);
2957 }
2958
2959 static void
2960 gen7_subpicture_render_setup_states(
2961     VADriverContextP   ctx,
2962     VASurfaceID        surface,
2963     const VARectangle *src_rect,
2964     const VARectangle *dst_rect
2965 )
2966 {
2967     i965_render_dest_surface_state(ctx, 0);
2968     i965_subpic_render_src_surfaces_state(ctx, surface);
2969     i965_render_sampler(ctx);
2970     i965_render_cc_viewport(ctx);
2971     gen7_render_color_calc_state(ctx);
2972     gen7_subpicture_render_blend_state(ctx);
2973     gen7_render_depth_stencil_state(ctx);
2974     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2975 }
2976
2977 static void
2978 gen7_render_put_subpicture(
2979     VADriverContextP   ctx,
2980     VASurfaceID        surface,
2981     const VARectangle *src_rect,
2982     const VARectangle *dst_rect
2983 )
2984 {
2985     struct i965_driver_data *i965 = i965_driver_data(ctx);
2986     struct intel_batchbuffer *batch = i965->batch;
2987     struct object_surface *obj_surface = SURFACE(surface);
2988     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2989
2990     assert(obj_subpic);
2991     gen7_render_initialize(ctx);
2992     gen7_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2993     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2994     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2995     intel_batchbuffer_flush(batch);
2996 }
2997
2998
2999 /*
3000  * global functions
3001  */
3002 VAStatus 
3003 i965_DestroySurfaces(VADriverContextP ctx,
3004                      VASurfaceID *surface_list,
3005                      int num_surfaces);
3006 void
3007 intel_render_put_surface(
3008     VADriverContextP   ctx,
3009     VASurfaceID        surface,
3010     const VARectangle *src_rect,
3011     const VARectangle *dst_rect,
3012     unsigned int       flags
3013 )
3014 {
3015     struct i965_driver_data *i965 = i965_driver_data(ctx);
3016     int has_done_scaling = 0;
3017     VASurfaceID in_surface_id = surface;
3018     VASurfaceID out_surface_id = i965_post_processing(ctx, surface, src_rect, dst_rect, flags, &has_done_scaling);
3019
3020     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
3021
3022     if (out_surface_id != VA_INVALID_ID)
3023         in_surface_id = out_surface_id;
3024
3025     if (IS_GEN7(i965->intel.device_id))
3026         gen7_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3027     else if (IS_GEN6(i965->intel.device_id))
3028         gen6_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3029     else
3030         i965_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
3031
3032     if (in_surface_id != surface)
3033         i965_DestroySurfaces(ctx, &in_surface_id, 1);
3034 }
3035
3036 void
3037 intel_render_put_subpicture(
3038     VADriverContextP   ctx,
3039     VASurfaceID        surface,
3040     const VARectangle *src_rect,
3041     const VARectangle *dst_rect
3042 )
3043 {
3044     struct i965_driver_data *i965 = i965_driver_data(ctx);
3045
3046     if (IS_GEN7(i965->intel.device_id))
3047         gen7_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3048     else if (IS_GEN6(i965->intel.device_id))
3049         gen6_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3050     else
3051         i965_render_put_subpicture(ctx, surface, src_rect, dst_rect);
3052 }
3053
3054 Bool 
3055 i965_render_init(VADriverContextP ctx)
3056 {
3057     struct i965_driver_data *i965 = i965_driver_data(ctx);
3058     struct i965_render_state *render_state = &i965->render_state;
3059     int i;
3060
3061     /* kernel */
3062     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
3063                                  sizeof(render_kernels_gen5[0])));
3064     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
3065                                  sizeof(render_kernels_gen6[0])));
3066
3067     if (IS_GEN7(i965->intel.device_id))
3068         memcpy(render_state->render_kernels,
3069                (IS_HASWELL(i965->intel.device_id) ? render_kernels_gen7_haswell : render_kernels_gen7),
3070                sizeof(render_state->render_kernels));
3071     else if (IS_GEN6(i965->intel.device_id))
3072         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
3073     else if (IS_IRONLAKE(i965->intel.device_id))
3074         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
3075     else
3076         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
3077
3078     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3079         struct i965_kernel *kernel = &render_state->render_kernels[i];
3080
3081         if (!kernel->size)
3082             continue;
3083
3084         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
3085                                   kernel->name, 
3086                                   kernel->size, 0x1000);
3087         assert(kernel->bo);
3088         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
3089     }
3090
3091     /* constant buffer */
3092     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
3093                       "constant buffer",
3094                       4096, 64);
3095     assert(render_state->curbe.bo);
3096
3097     return True;
3098 }
3099
3100 Bool 
3101 i965_render_terminate(VADriverContextP ctx)
3102 {
3103     int i;
3104     struct i965_driver_data *i965 = i965_driver_data(ctx);
3105     struct i965_render_state *render_state = &i965->render_state;
3106
3107     dri_bo_unreference(render_state->curbe.bo);
3108     render_state->curbe.bo = NULL;
3109
3110     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3111         struct i965_kernel *kernel = &render_state->render_kernels[i];
3112         
3113         dri_bo_unreference(kernel->bo);
3114         kernel->bo = NULL;
3115     }
3116
3117     dri_bo_unreference(render_state->vb.vertex_buffer);
3118     render_state->vb.vertex_buffer = NULL;
3119     dri_bo_unreference(render_state->vs.state);
3120     render_state->vs.state = NULL;
3121     dri_bo_unreference(render_state->sf.state);
3122     render_state->sf.state = NULL;
3123     dri_bo_unreference(render_state->wm.sampler);
3124     render_state->wm.sampler = NULL;
3125     dri_bo_unreference(render_state->wm.state);
3126     render_state->wm.state = NULL;
3127     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3128     dri_bo_unreference(render_state->cc.viewport);
3129     render_state->cc.viewport = NULL;
3130     dri_bo_unreference(render_state->cc.state);
3131     render_state->cc.state = NULL;
3132     dri_bo_unreference(render_state->cc.blend);
3133     render_state->cc.blend = NULL;
3134     dri_bo_unreference(render_state->cc.depth_stencil);
3135     render_state->cc.depth_stencil = NULL;
3136
3137     if (render_state->draw_region) {
3138         dri_bo_unreference(render_state->draw_region->bo);
3139         free(render_state->draw_region);
3140         render_state->draw_region = NULL;
3141     }
3142
3143     return True;
3144 }
3145