OSDN Git Service

render: add support for display rotation attribute.
[android-x86/hardware-intel-common-vaapi.git] / src / i965_render.c
1 /*
2  * Copyright © 2006 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *    Xiang Haihao <haihao.xiang@intel.com>
27  *
28  */
29
30 /*
31  * Most of rendering codes are ported from xf86-video-intel/src/i965_video.c
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <assert.h>
38
39 #include <va/va_drmcommon.h>
40
41 #include "intel_batchbuffer.h"
42 #include "intel_driver.h"
43 #include "i965_defines.h"
44 #include "i965_drv_video.h"
45 #include "i965_structs.h"
46
47 #include "i965_render.h"
48
49 #define SF_KERNEL_NUM_GRF       16
50 #define SF_MAX_THREADS          1
51
52 static const uint32_t sf_kernel_static[][4] = 
53 {
54 #include "shaders/render/exa_sf.g4b"
55 };
56
57 #define PS_KERNEL_NUM_GRF       32
58 #define PS_MAX_THREADS          32
59
60 #define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
61
62 static const uint32_t ps_kernel_static[][4] = 
63 {
64 #include "shaders/render/exa_wm_xy.g4b"
65 #include "shaders/render/exa_wm_src_affine.g4b"
66 #include "shaders/render/exa_wm_src_sample_planar.g4b"
67 #include "shaders/render/exa_wm_yuv_rgb.g4b"
68 #include "shaders/render/exa_wm_write.g4b"
69 };
70 static const uint32_t ps_subpic_kernel_static[][4] = 
71 {
72 #include "shaders/render/exa_wm_xy.g4b"
73 #include "shaders/render/exa_wm_src_affine.g4b"
74 #include "shaders/render/exa_wm_src_sample_argb.g4b"
75 #include "shaders/render/exa_wm_write.g4b"
76 };
77
78 /* On IRONLAKE */
79 static const uint32_t sf_kernel_static_gen5[][4] = 
80 {
81 #include "shaders/render/exa_sf.g4b.gen5"
82 };
83
84 static const uint32_t ps_kernel_static_gen5[][4] = 
85 {
86 #include "shaders/render/exa_wm_xy.g4b.gen5"
87 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
88 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
89 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
90 #include "shaders/render/exa_wm_write.g4b.gen5"
91 };
92 static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
93 {
94 #include "shaders/render/exa_wm_xy.g4b.gen5"
95 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
96 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
97 #include "shaders/render/exa_wm_write.g4b.gen5"
98 };
99
100 /* programs for Sandybridge */
101 static const uint32_t sf_kernel_static_gen6[][4] = 
102 {
103 };
104
105 static const uint32_t ps_kernel_static_gen6[][4] = {
106 #include "shaders/render/exa_wm_src_affine.g6b"
107 #include "shaders/render/exa_wm_src_sample_planar.g6b"
108 #include "shaders/render/exa_wm_yuv_rgb.g6b"
109 #include "shaders/render/exa_wm_write.g6b"
110 };
111
112 static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
113 #include "shaders/render/exa_wm_src_affine.g6b"
114 #include "shaders/render/exa_wm_src_sample_argb.g6b"
115 #include "shaders/render/exa_wm_write.g6b"
116 };
117
118 /* programs for Ivybridge */
119 static const uint32_t sf_kernel_static_gen7[][4] = 
120 {
121 };
122
123 static const uint32_t ps_kernel_static_gen7[][4] = {
124 #include "shaders/render/exa_wm_src_affine.g7b"
125 #include "shaders/render/exa_wm_src_sample_planar.g7b"
126 #include "shaders/render/exa_wm_yuv_rgb.g7b"
127 #include "shaders/render/exa_wm_write.g7b"
128 };
129
130 static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
131 #include "shaders/render/exa_wm_src_affine.g7b"
132 #include "shaders/render/exa_wm_src_sample_argb.g7b"
133 #include "shaders/render/exa_wm_write.g7b"
134 };
135
136 #define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
137 #define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
138 #define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
139 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
140 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
141
142 static uint32_t float_to_uint (float f) 
143 {
144     union {
145         uint32_t i; 
146         float f;
147     } x;
148
149     x.f = f;
150     return x.i;
151 }
152
153 enum 
154 {
155     SF_KERNEL = 0,
156     PS_KERNEL,
157     PS_SUBPIC_KERNEL
158 };
159
160 static struct i965_kernel render_kernels_gen4[] = {
161     {
162         "SF",
163         SF_KERNEL,
164         sf_kernel_static,
165         sizeof(sf_kernel_static),
166         NULL
167     },
168     {
169         "PS",
170         PS_KERNEL,
171         ps_kernel_static,
172         sizeof(ps_kernel_static),
173         NULL
174     },
175
176     {
177         "PS_SUBPIC",
178         PS_SUBPIC_KERNEL,
179         ps_subpic_kernel_static,
180         sizeof(ps_subpic_kernel_static),
181         NULL
182     }
183 };
184
185 static struct i965_kernel render_kernels_gen5[] = {
186     {
187         "SF",
188         SF_KERNEL,
189         sf_kernel_static_gen5,
190         sizeof(sf_kernel_static_gen5),
191         NULL
192     },
193     {
194         "PS",
195         PS_KERNEL,
196         ps_kernel_static_gen5,
197         sizeof(ps_kernel_static_gen5),
198         NULL
199     },
200
201     {
202         "PS_SUBPIC",
203         PS_SUBPIC_KERNEL,
204         ps_subpic_kernel_static_gen5,
205         sizeof(ps_subpic_kernel_static_gen5),
206         NULL
207     }
208 };
209
210 static struct i965_kernel render_kernels_gen6[] = {
211     {
212         "SF",
213         SF_KERNEL,
214         sf_kernel_static_gen6,
215         sizeof(sf_kernel_static_gen6),
216         NULL
217     },
218     {
219         "PS",
220         PS_KERNEL,
221         ps_kernel_static_gen6,
222         sizeof(ps_kernel_static_gen6),
223         NULL
224     },
225
226     {
227         "PS_SUBPIC",
228         PS_SUBPIC_KERNEL,
229         ps_subpic_kernel_static_gen6,
230         sizeof(ps_subpic_kernel_static_gen6),
231         NULL
232     }
233 };
234
235 static struct i965_kernel render_kernels_gen7[] = {
236     {
237         "SF",
238         SF_KERNEL,
239         sf_kernel_static_gen7,
240         sizeof(sf_kernel_static_gen7),
241         NULL
242     },
243     {
244         "PS",
245         PS_KERNEL,
246         ps_kernel_static_gen7,
247         sizeof(ps_kernel_static_gen7),
248         NULL
249     },
250
251     {
252         "PS_SUBPIC",
253         PS_SUBPIC_KERNEL,
254         ps_subpic_kernel_static_gen7,
255         sizeof(ps_subpic_kernel_static_gen7),
256         NULL
257     }
258 };
259
260 #define URB_VS_ENTRIES        8
261 #define URB_VS_ENTRY_SIZE     1
262
263 #define URB_GS_ENTRIES        0
264 #define URB_GS_ENTRY_SIZE     0
265
266 #define URB_CLIP_ENTRIES      0
267 #define URB_CLIP_ENTRY_SIZE   0
268
269 #define URB_SF_ENTRIES        1
270 #define URB_SF_ENTRY_SIZE     2
271
272 #define URB_CS_ENTRIES        1
273 #define URB_CS_ENTRY_SIZE     1
274
275 static void
276 i965_render_vs_unit(VADriverContextP ctx)
277 {
278     struct i965_driver_data *i965 = i965_driver_data(ctx);
279     struct i965_render_state *render_state = &i965->render_state;
280     struct i965_vs_unit_state *vs_state;
281
282     dri_bo_map(render_state->vs.state, 1);
283     assert(render_state->vs.state->virtual);
284     vs_state = render_state->vs.state->virtual;
285     memset(vs_state, 0, sizeof(*vs_state));
286
287     if (IS_IRONLAKE(i965->intel.device_id))
288         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
289     else
290         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
291
292     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
293     vs_state->vs6.vs_enable = 0;
294     vs_state->vs6.vert_cache_disable = 1;
295     
296     dri_bo_unmap(render_state->vs.state);
297 }
298
299 static void
300 i965_render_sf_unit(VADriverContextP ctx)
301 {
302     struct i965_driver_data *i965 = i965_driver_data(ctx);
303     struct i965_render_state *render_state = &i965->render_state;
304     struct i965_sf_unit_state *sf_state;
305
306     dri_bo_map(render_state->sf.state, 1);
307     assert(render_state->sf.state->virtual);
308     sf_state = render_state->sf.state->virtual;
309     memset(sf_state, 0, sizeof(*sf_state));
310
311     sf_state->thread0.grf_reg_count = I965_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
312     sf_state->thread0.kernel_start_pointer = render_state->render_kernels[SF_KERNEL].bo->offset >> 6;
313
314     sf_state->sf1.single_program_flow = 1; /* XXX */
315     sf_state->sf1.binding_table_entry_count = 0;
316     sf_state->sf1.thread_priority = 0;
317     sf_state->sf1.floating_point_mode = 0; /* Mesa does this */
318     sf_state->sf1.illegal_op_exception_enable = 1;
319     sf_state->sf1.mask_stack_exception_enable = 1;
320     sf_state->sf1.sw_exception_enable = 1;
321
322     /* scratch space is not used in our kernel */
323     sf_state->thread2.per_thread_scratch_space = 0;
324     sf_state->thread2.scratch_space_base_pointer = 0;
325
326     sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */
327     sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */
328     sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */
329     sf_state->thread3.urb_entry_read_offset = 0;
330     sf_state->thread3.dispatch_grf_start_reg = 3;
331
332     sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
333     sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
334     sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
335     sf_state->thread4.stats_enable = 1;
336
337     sf_state->sf5.viewport_transform = 0; /* skip viewport */
338
339     sf_state->sf6.cull_mode = I965_CULLMODE_NONE;
340     sf_state->sf6.scissor = 0;
341
342     sf_state->sf7.trifan_pv = 2;
343
344     sf_state->sf6.dest_org_vbias = 0x8;
345     sf_state->sf6.dest_org_hbias = 0x8;
346
347     dri_bo_emit_reloc(render_state->sf.state,
348                       I915_GEM_DOMAIN_INSTRUCTION, 0,
349                       sf_state->thread0.grf_reg_count << 1,
350                       offsetof(struct i965_sf_unit_state, thread0),
351                       render_state->render_kernels[SF_KERNEL].bo);
352
353     dri_bo_unmap(render_state->sf.state);
354 }
355
356 static void 
357 i965_render_sampler(VADriverContextP ctx)
358 {
359     struct i965_driver_data *i965 = i965_driver_data(ctx);
360     struct i965_render_state *render_state = &i965->render_state;
361     struct i965_sampler_state *sampler_state;
362     int i;
363     
364     assert(render_state->wm.sampler_count > 0);
365     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
366
367     dri_bo_map(render_state->wm.sampler, 1);
368     assert(render_state->wm.sampler->virtual);
369     sampler_state = render_state->wm.sampler->virtual;
370     for (i = 0; i < render_state->wm.sampler_count; i++) {
371         memset(sampler_state, 0, sizeof(*sampler_state));
372         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
373         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
374         sampler_state->ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
375         sampler_state->ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
376         sampler_state->ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
377         sampler_state++;
378     }
379
380     dri_bo_unmap(render_state->wm.sampler);
381 }
382 static void
383 i965_subpic_render_wm_unit(VADriverContextP ctx)
384 {
385     struct i965_driver_data *i965 = i965_driver_data(ctx);
386     struct i965_render_state *render_state = &i965->render_state;
387     struct i965_wm_unit_state *wm_state;
388
389     assert(render_state->wm.sampler);
390
391     dri_bo_map(render_state->wm.state, 1);
392     assert(render_state->wm.state->virtual);
393     wm_state = render_state->wm.state->virtual;
394     memset(wm_state, 0, sizeof(*wm_state));
395
396     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
397     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_SUBPIC_KERNEL].bo->offset >> 6;
398
399     wm_state->thread1.single_program_flow = 1; /* XXX */
400
401     if (IS_IRONLAKE(i965->intel.device_id))
402         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
403     else
404         wm_state->thread1.binding_table_entry_count = 7;
405
406     wm_state->thread2.scratch_space_base_pointer = 0;
407     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
408
409     wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
410     wm_state->thread3.const_urb_entry_read_length = 0;
411     wm_state->thread3.const_urb_entry_read_offset = 0;
412     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
413     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
414
415     wm_state->wm4.stats_enable = 0;
416     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
417
418     if (IS_IRONLAKE(i965->intel.device_id)) {
419         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
420         wm_state->wm5.max_threads = 12 * 6 - 1;
421     } else {
422         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
423         wm_state->wm5.max_threads = 10 * 5 - 1;
424     }
425
426     wm_state->wm5.thread_dispatch_enable = 1;
427     wm_state->wm5.enable_16_pix = 1;
428     wm_state->wm5.enable_8_pix = 0;
429     wm_state->wm5.early_depth_test = 1;
430
431     dri_bo_emit_reloc(render_state->wm.state,
432                       I915_GEM_DOMAIN_INSTRUCTION, 0,
433                       wm_state->thread0.grf_reg_count << 1,
434                       offsetof(struct i965_wm_unit_state, thread0),
435                       render_state->render_kernels[PS_SUBPIC_KERNEL].bo);
436
437     dri_bo_emit_reloc(render_state->wm.state,
438                       I915_GEM_DOMAIN_INSTRUCTION, 0,
439                       wm_state->wm4.sampler_count << 2,
440                       offsetof(struct i965_wm_unit_state, wm4),
441                       render_state->wm.sampler);
442
443     dri_bo_unmap(render_state->wm.state);
444 }
445
446
447 static void
448 i965_render_wm_unit(VADriverContextP ctx)
449 {
450     struct i965_driver_data *i965 = i965_driver_data(ctx);
451     struct i965_render_state *render_state = &i965->render_state;
452     struct i965_wm_unit_state *wm_state;
453
454     assert(render_state->wm.sampler);
455
456     dri_bo_map(render_state->wm.state, 1);
457     assert(render_state->wm.state->virtual);
458     wm_state = render_state->wm.state->virtual;
459     memset(wm_state, 0, sizeof(*wm_state));
460
461     wm_state->thread0.grf_reg_count = I965_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
462     wm_state->thread0.kernel_start_pointer = render_state->render_kernels[PS_KERNEL].bo->offset >> 6;
463
464     wm_state->thread1.single_program_flow = 1; /* XXX */
465
466     if (IS_IRONLAKE(i965->intel.device_id))
467         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
468     else
469         wm_state->thread1.binding_table_entry_count = 7;
470
471     wm_state->thread2.scratch_space_base_pointer = 0;
472     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
473
474     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
475     wm_state->thread3.const_urb_entry_read_length = 1;
476     wm_state->thread3.const_urb_entry_read_offset = 0;
477     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
478     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
479
480     wm_state->wm4.stats_enable = 0;
481     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
482
483     if (IS_IRONLAKE(i965->intel.device_id)) {
484         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
485         wm_state->wm5.max_threads = 12 * 6 - 1;
486     } else {
487         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
488         wm_state->wm5.max_threads = 10 * 5 - 1;
489     }
490
491     wm_state->wm5.thread_dispatch_enable = 1;
492     wm_state->wm5.enable_16_pix = 1;
493     wm_state->wm5.enable_8_pix = 0;
494     wm_state->wm5.early_depth_test = 1;
495
496     dri_bo_emit_reloc(render_state->wm.state,
497                       I915_GEM_DOMAIN_INSTRUCTION, 0,
498                       wm_state->thread0.grf_reg_count << 1,
499                       offsetof(struct i965_wm_unit_state, thread0),
500                       render_state->render_kernels[PS_KERNEL].bo);
501
502     dri_bo_emit_reloc(render_state->wm.state,
503                       I915_GEM_DOMAIN_INSTRUCTION, 0,
504                       wm_state->wm4.sampler_count << 2,
505                       offsetof(struct i965_wm_unit_state, wm4),
506                       render_state->wm.sampler);
507
508     dri_bo_unmap(render_state->wm.state);
509 }
510
511 static void 
512 i965_render_cc_viewport(VADriverContextP ctx)
513 {
514     struct i965_driver_data *i965 = i965_driver_data(ctx);
515     struct i965_render_state *render_state = &i965->render_state;
516     struct i965_cc_viewport *cc_viewport;
517
518     dri_bo_map(render_state->cc.viewport, 1);
519     assert(render_state->cc.viewport->virtual);
520     cc_viewport = render_state->cc.viewport->virtual;
521     memset(cc_viewport, 0, sizeof(*cc_viewport));
522     
523     cc_viewport->min_depth = -1.e35;
524     cc_viewport->max_depth = 1.e35;
525
526     dri_bo_unmap(render_state->cc.viewport);
527 }
528
529 static void 
530 i965_subpic_render_cc_unit(VADriverContextP ctx)
531 {
532     struct i965_driver_data *i965 = i965_driver_data(ctx);
533     struct i965_render_state *render_state = &i965->render_state;
534     struct i965_cc_unit_state *cc_state;
535
536     assert(render_state->cc.viewport);
537
538     dri_bo_map(render_state->cc.state, 1);
539     assert(render_state->cc.state->virtual);
540     cc_state = render_state->cc.state->virtual;
541     memset(cc_state, 0, sizeof(*cc_state));
542
543     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
544     cc_state->cc2.depth_test = 0;       /* disable depth test */
545     cc_state->cc2.logicop_enable = 0;   /* disable logic op */
546     cc_state->cc3.ia_blend_enable = 0 ;  /* blend alpha just like colors */
547     cc_state->cc3.blend_enable = 1;     /* enable color blend */
548     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
549     cc_state->cc3.alpha_test_format = 0;//0:ALPHATEST_UNORM8;       /*store alpha value with UNORM8 */
550     cc_state->cc3.alpha_test_func = 5;//COMPAREFUNCTION_LESS;       /*pass if less than the reference */
551     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
552
553     cc_state->cc5.dither_enable = 0;    /* disable dither */
554     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
555     cc_state->cc5.statistics_enable = 1;
556     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
557     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
558     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
559
560     cc_state->cc6.clamp_post_alpha_blend = 0; 
561     cc_state->cc6.clamp_pre_alpha_blend  =0; 
562     
563     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
564     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
565     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
566     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
567    
568     /*alpha test reference*/
569     cc_state->cc7.alpha_ref.f =0.0 ;
570
571
572     dri_bo_emit_reloc(render_state->cc.state,
573                       I915_GEM_DOMAIN_INSTRUCTION, 0,
574                       0,
575                       offsetof(struct i965_cc_unit_state, cc4),
576                       render_state->cc.viewport);
577
578     dri_bo_unmap(render_state->cc.state);
579 }
580
581
582 static void 
583 i965_render_cc_unit(VADriverContextP ctx)
584 {
585     struct i965_driver_data *i965 = i965_driver_data(ctx);
586     struct i965_render_state *render_state = &i965->render_state;
587     struct i965_cc_unit_state *cc_state;
588
589     assert(render_state->cc.viewport);
590
591     dri_bo_map(render_state->cc.state, 1);
592     assert(render_state->cc.state->virtual);
593     cc_state = render_state->cc.state->virtual;
594     memset(cc_state, 0, sizeof(*cc_state));
595
596     cc_state->cc0.stencil_enable = 0;   /* disable stencil */
597     cc_state->cc2.depth_test = 0;       /* disable depth test */
598     cc_state->cc2.logicop_enable = 1;   /* enable logic op */
599     cc_state->cc3.ia_blend_enable = 0;  /* blend alpha just like colors */
600     cc_state->cc3.blend_enable = 0;     /* disable color blend */
601     cc_state->cc3.alpha_test = 0;       /* disable alpha test */
602     cc_state->cc4.cc_viewport_state_offset = render_state->cc.viewport->offset >> 5;
603
604     cc_state->cc5.dither_enable = 0;    /* disable dither */
605     cc_state->cc5.logicop_func = 0xc;   /* WHITE */
606     cc_state->cc5.statistics_enable = 1;
607     cc_state->cc5.ia_blend_function = I965_BLENDFUNCTION_ADD;
608     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_ONE;
609     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_ONE;
610
611     dri_bo_emit_reloc(render_state->cc.state,
612                       I915_GEM_DOMAIN_INSTRUCTION, 0,
613                       0,
614                       offsetof(struct i965_cc_unit_state, cc4),
615                       render_state->cc.viewport);
616
617     dri_bo_unmap(render_state->cc.state);
618 }
619
620 static void
621 i965_render_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
622 {
623     switch (tiling) {
624     case I915_TILING_NONE:
625         ss->ss3.tiled_surface = 0;
626         ss->ss3.tile_walk = 0;
627         break;
628     case I915_TILING_X:
629         ss->ss3.tiled_surface = 1;
630         ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
631         break;
632     case I915_TILING_Y:
633         ss->ss3.tiled_surface = 1;
634         ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
635         break;
636     }
637 }
638
639 static void
640 i965_render_set_surface_state(
641     struct i965_surface_state *ss,
642     dri_bo                    *bo,
643     unsigned long              offset,
644     unsigned int               width,
645     unsigned int               height,
646     unsigned int               pitch,
647     unsigned int               format,
648     unsigned int               flags
649 )
650 {
651     unsigned int tiling;
652     unsigned int swizzle;
653
654     memset(ss, 0, sizeof(*ss));
655
656     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
657     case I965_PP_FLAG_BOTTOM_FIELD:
658         ss->ss0.vert_line_stride_ofs = 1;
659         /* fall-through */
660     case I965_PP_FLAG_TOP_FIELD:
661         ss->ss0.vert_line_stride = 1;
662         height /= 2;
663         break;
664     }
665
666     ss->ss0.surface_type = I965_SURFACE_2D;
667     ss->ss0.surface_format = format;
668     ss->ss0.color_blend = 1;
669
670     ss->ss1.base_addr = bo->offset + offset;
671
672     ss->ss2.width = width - 1;
673     ss->ss2.height = height - 1;
674
675     ss->ss3.pitch = pitch - 1;
676
677     dri_bo_get_tiling(bo, &tiling, &swizzle);
678     i965_render_set_surface_tiling(ss, tiling);
679 }
680
681 static void
682 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
683 {
684    switch (tiling) {
685    case I915_TILING_NONE:
686       ss->ss0.tiled_surface = 0;
687       ss->ss0.tile_walk = 0;
688       break;
689    case I915_TILING_X:
690       ss->ss0.tiled_surface = 1;
691       ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
692       break;
693    case I915_TILING_Y:
694       ss->ss0.tiled_surface = 1;
695       ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
696       break;
697    }
698 }
699
700 static void
701 gen7_render_set_surface_state(
702     struct gen7_surface_state *ss,
703     dri_bo                    *bo,
704     unsigned long              offset,
705     int                        width,
706     int                        height,
707     int                        pitch,
708     int                        format,
709     unsigned int               flags
710 )
711 {
712     unsigned int tiling;
713     unsigned int swizzle;
714
715     memset(ss, 0, sizeof(*ss));
716
717     switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
718     case I965_PP_FLAG_BOTTOM_FIELD:
719         ss->ss0.vert_line_stride_ofs = 1;
720         /* fall-through */
721     case I965_PP_FLAG_TOP_FIELD:
722         ss->ss0.vert_line_stride = 1;
723         height /= 2;
724         break;
725     }
726
727     ss->ss0.surface_type = I965_SURFACE_2D;
728     ss->ss0.surface_format = format;
729
730     ss->ss1.base_addr = bo->offset + offset;
731
732     ss->ss2.width = width - 1;
733     ss->ss2.height = height - 1;
734
735     ss->ss3.pitch = pitch - 1;
736
737     dri_bo_get_tiling(bo, &tiling, &swizzle);
738     gen7_render_set_surface_tiling(ss, tiling);
739 }
740
741 static void
742 i965_render_src_surface_state(
743     VADriverContextP ctx, 
744     int              index,
745     dri_bo          *region,
746     unsigned long    offset,
747     int              w,
748     int              h,
749     int              pitch,
750     int              format,
751     unsigned int     flags
752 )
753 {
754     struct i965_driver_data *i965 = i965_driver_data(ctx);  
755     struct i965_render_state *render_state = &i965->render_state;
756     void *ss;
757     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
758
759     assert(index < MAX_RENDER_SURFACES);
760
761     dri_bo_map(ss_bo, 1);
762     assert(ss_bo->virtual);
763     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
764
765     if (IS_GEN7(i965->intel.device_id)) {
766         gen7_render_set_surface_state(ss,
767                                       region, offset,
768                                       w, h,
769                                       pitch, format, flags);
770         dri_bo_emit_reloc(ss_bo,
771                           I915_GEM_DOMAIN_SAMPLER, 0,
772                           offset,
773                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
774                           region);
775     } else {
776         i965_render_set_surface_state(ss,
777                                       region, offset,
778                                       w, h,
779                                       pitch, format, flags);
780         dri_bo_emit_reloc(ss_bo,
781                           I915_GEM_DOMAIN_SAMPLER, 0,
782                           offset,
783                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
784                           region);
785     }
786
787     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
788     dri_bo_unmap(ss_bo);
789     render_state->wm.sampler_count++;
790 }
791
792 static void
793 i965_render_src_surfaces_state(
794     VADriverContextP ctx,
795     VASurfaceID      surface,
796     unsigned int     flags
797 )
798 {
799     struct i965_driver_data *i965 = i965_driver_data(ctx);  
800     struct object_surface *obj_surface;
801     int region_pitch;
802     int rw, rh;
803     dri_bo *region;
804
805     obj_surface = SURFACE(surface);
806     assert(obj_surface);
807
808     region_pitch = obj_surface->width;
809     rw = obj_surface->orig_width;
810     rh = obj_surface->orig_height;
811     region = obj_surface->bo;
812
813     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);     /* Y */
814     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);
815
816     if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) {
817         i965_render_src_surface_state(ctx, 3, region,
818                                       region_pitch * obj_surface->y_cb_offset,
819                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
820                                       I965_SURFACEFORMAT_R8G8_UNORM, flags); /* UV */
821         i965_render_src_surface_state(ctx, 4, region,
822                                       region_pitch * obj_surface->y_cb_offset,
823                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
824                                       I965_SURFACEFORMAT_R8G8_UNORM, flags);
825     } else {
826         i965_render_src_surface_state(ctx, 3, region,
827                                       region_pitch * obj_surface->y_cb_offset,
828                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
829                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* U */
830         i965_render_src_surface_state(ctx, 4, region,
831                                       region_pitch * obj_surface->y_cb_offset,
832                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
833                                       I965_SURFACEFORMAT_R8_UNORM, flags);
834         i965_render_src_surface_state(ctx, 5, region,
835                                       region_pitch * obj_surface->y_cr_offset,
836                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
837                                       I965_SURFACEFORMAT_R8_UNORM, flags); /* V */
838         i965_render_src_surface_state(ctx, 6, region,
839                                       region_pitch * obj_surface->y_cr_offset,
840                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
841                                       I965_SURFACEFORMAT_R8_UNORM, flags);
842     }
843 }
844
845 static void
846 i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
847                               VASurfaceID surface)
848 {
849     struct i965_driver_data *i965 = i965_driver_data(ctx);  
850     struct object_surface *obj_surface = SURFACE(surface);
851     dri_bo *subpic_region;
852     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
853     struct object_image *obj_image = IMAGE(obj_subpic->image);
854     assert(obj_surface);
855     assert(obj_surface->bo);
856     subpic_region = obj_image->bo;
857     /*subpicture surface*/
858     i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
859     i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
860 }
861
862 static void
863 i965_render_dest_surface_state(VADriverContextP ctx, int index)
864 {
865     struct i965_driver_data *i965 = i965_driver_data(ctx);  
866     struct i965_render_state *render_state = &i965->render_state;
867     struct intel_region *dest_region = render_state->draw_region;
868     void *ss;
869     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
870     int format;
871     assert(index < MAX_RENDER_SURFACES);
872
873     if (dest_region->cpp == 2) {
874         format = I965_SURFACEFORMAT_B5G6R5_UNORM;
875     } else {
876         format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
877     }
878
879     dri_bo_map(ss_bo, 1);
880     assert(ss_bo->virtual);
881     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
882
883     if (IS_GEN7(i965->intel.device_id)) {
884         gen7_render_set_surface_state(ss,
885                                       dest_region->bo, 0,
886                                       dest_region->width, dest_region->height,
887                                       dest_region->pitch, format, 0);
888         dri_bo_emit_reloc(ss_bo,
889                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
890                           0,
891                           SURFACE_STATE_OFFSET(index) + offsetof(struct gen7_surface_state, ss1),
892                           dest_region->bo);
893     } else {
894         i965_render_set_surface_state(ss,
895                                       dest_region->bo, 0,
896                                       dest_region->width, dest_region->height,
897                                       dest_region->pitch, format, 0);
898         dri_bo_emit_reloc(ss_bo,
899                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
900                           0,
901                           SURFACE_STATE_OFFSET(index) + offsetof(struct i965_surface_state, ss1),
902                           dest_region->bo);
903     }
904
905     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
906     dri_bo_unmap(ss_bo);
907 }
908
909 static void
910 i965_fill_vertex_buffer(
911     VADriverContextP ctx,
912     float tex_coords[4], /* [(u1,v1);(u2,v2)] */
913     float vid_coords[4]  /* [(x1,y1);(x2,y2)] */
914 )
915 {
916     struct i965_driver_data * const i965 = i965_driver_data(ctx);
917     float vb[12];
918
919     enum { X1, Y1, X2, Y2 };
920
921     static const unsigned int g_rotation_indices[][6] = {
922         [VA_ROTATION_NONE] = { X1, Y1, X2, Y1, X2, Y2 },
923         [VA_ROTATION_90]   = { X2, Y1, X2, Y2, X1, Y2 },
924         [VA_ROTATION_180]  = { X2, Y2, X1, Y2, X1, Y1 },
925         [VA_ROTATION_270]  = { X1, Y2, X1, Y1, X2, Y1 },
926     };
927
928     const unsigned int * const rotation_indices =
929         g_rotation_indices[i965->rotation_attrib->value];
930
931     vb[0]  = tex_coords[X1]; /* top-left corner */
932     vb[1]  = tex_coords[Y1];
933     vb[2]  = vid_coords[rotation_indices[0]];
934     vb[3]  = vid_coords[rotation_indices[1]];
935
936     vb[4]  = tex_coords[X2]; /* top-right corner */
937     vb[5]  = tex_coords[Y1];
938     vb[6]  = vid_coords[rotation_indices[2]];
939     vb[7]  = vid_coords[rotation_indices[3]];
940
941     vb[8]  = tex_coords[X2]; /* bottom-right corner */
942     vb[9]  = tex_coords[Y2];
943     vb[10] = vid_coords[rotation_indices[4]];
944     vb[11] = vid_coords[rotation_indices[5]];
945
946     dri_bo_subdata(i965->render_state.vb.vertex_buffer, 0, sizeof(vb), vb);
947 }
948
949 static void 
950 i965_subpic_render_upload_vertex(VADriverContextP ctx,
951                                  VASurfaceID surface,
952                                  const VARectangle *output_rect)
953 {    
954     struct i965_driver_data  *i965         = i965_driver_data(ctx);
955     struct object_surface    *obj_surface  = SURFACE(surface);
956     struct object_subpic     *obj_subpic   = SUBPIC(obj_surface->subpic);
957     float tex_coords[4], vid_coords[4];
958     VARectangle dst_rect;
959
960     if (obj_subpic->flags & VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD)
961         dst_rect = obj_subpic->dst_rect;
962     else {
963         const float sx  = (float)output_rect->width  / obj_surface->orig_width;
964         const float sy  = (float)output_rect->height / obj_surface->orig_height;
965         dst_rect.x      = output_rect->x + sx * obj_subpic->dst_rect.x;
966         dst_rect.y      = output_rect->y + sy * obj_subpic->dst_rect.y;
967         dst_rect.width  = sx * obj_subpic->dst_rect.width;
968         dst_rect.height = sy * obj_subpic->dst_rect.height;
969     }
970
971     tex_coords[0] = (float)obj_subpic->src_rect.x / obj_subpic->width;
972     tex_coords[1] = (float)obj_subpic->src_rect.y / obj_subpic->height;
973     tex_coords[2] = (float)(obj_subpic->src_rect.x + obj_subpic->src_rect.width) / obj_subpic->width;
974     tex_coords[3] = (float)(obj_subpic->src_rect.y + obj_subpic->src_rect.height) / obj_subpic->height;
975
976     vid_coords[0] = dst_rect.x;
977     vid_coords[1] = dst_rect.y;
978     vid_coords[2] = (float)(dst_rect.x + dst_rect.width);
979     vid_coords[3] = (float)(dst_rect.y + dst_rect.height);
980
981     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
982 }
983
984 static void 
985 i965_render_upload_vertex(
986     VADriverContextP   ctx,
987     VASurfaceID        surface,
988     const VARectangle *src_rect,
989     const VARectangle *dst_rect
990 )
991 {
992     struct i965_driver_data *i965 = i965_driver_data(ctx);
993     struct i965_render_state *render_state = &i965->render_state;
994     struct intel_region *dest_region = render_state->draw_region;
995     struct object_surface *obj_surface;
996     float tex_coords[4], vid_coords[4];
997     int width, height;
998
999     obj_surface = SURFACE(surface);
1000     assert(surface);
1001
1002     width  = obj_surface->orig_width;
1003     height = obj_surface->orig_height;
1004
1005     tex_coords[0] = (float)src_rect->x / width;
1006     tex_coords[1] = (float)src_rect->y / height;
1007     tex_coords[2] = (float)(src_rect->x + src_rect->width) / width;
1008     tex_coords[3] = (float)(src_rect->y + src_rect->height) / height;
1009
1010     vid_coords[0] = dest_region->x + dst_rect->x;
1011     vid_coords[1] = dest_region->y + dst_rect->y;
1012     vid_coords[2] = vid_coords[0] + dst_rect->width;
1013     vid_coords[3] = vid_coords[1] + dst_rect->height;
1014
1015     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
1016 }
1017
1018 static void
1019 i965_render_upload_constants(VADriverContextP ctx,
1020                              VASurfaceID surface)
1021 {
1022     struct i965_driver_data *i965 = i965_driver_data(ctx);
1023     struct i965_render_state *render_state = &i965->render_state;
1024     unsigned short *constant_buffer;
1025     struct object_surface *obj_surface = SURFACE(surface);
1026
1027     dri_bo_map(render_state->curbe.bo, 1);
1028     assert(render_state->curbe.bo->virtual);
1029     constant_buffer = render_state->curbe.bo->virtual;
1030
1031     if (obj_surface->subsampling == SUBSAMPLE_YUV400) {
1032         assert(obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '1') ||
1033                obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '3'));
1034         *constant_buffer = 2;
1035     } else {
1036         if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'))
1037             *constant_buffer = 1;
1038         else
1039             *constant_buffer = 0;
1040     }
1041
1042     dri_bo_unmap(render_state->curbe.bo);
1043 }
1044
1045 static void
1046 i965_surface_render_state_setup(
1047     VADriverContextP   ctx,
1048     VASurfaceID        surface,
1049     const VARectangle *src_rect,
1050     const VARectangle *dst_rect,
1051     unsigned int       flags
1052 )
1053 {
1054     i965_render_vs_unit(ctx);
1055     i965_render_sf_unit(ctx);
1056     i965_render_dest_surface_state(ctx, 0);
1057     i965_render_src_surfaces_state(ctx, surface, flags);
1058     i965_render_sampler(ctx);
1059     i965_render_wm_unit(ctx);
1060     i965_render_cc_viewport(ctx);
1061     i965_render_cc_unit(ctx);
1062     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1063     i965_render_upload_constants(ctx, surface);
1064 }
1065
1066 static void
1067 i965_subpic_render_state_setup(
1068     VADriverContextP   ctx,
1069     VASurfaceID        surface,
1070     const VARectangle *src_rect,
1071     const VARectangle *dst_rect
1072 )
1073 {
1074     i965_render_vs_unit(ctx);
1075     i965_render_sf_unit(ctx);
1076     i965_render_dest_surface_state(ctx, 0);
1077     i965_subpic_render_src_surfaces_state(ctx, surface);
1078     i965_render_sampler(ctx);
1079     i965_subpic_render_wm_unit(ctx);
1080     i965_render_cc_viewport(ctx);
1081     i965_subpic_render_cc_unit(ctx);
1082     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
1083 }
1084
1085
1086 static void
1087 i965_render_pipeline_select(VADriverContextP ctx)
1088 {
1089     struct i965_driver_data *i965 = i965_driver_data(ctx);
1090     struct intel_batchbuffer *batch = i965->batch;
1091  
1092     BEGIN_BATCH(batch, 1);
1093     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1094     ADVANCE_BATCH(batch);
1095 }
1096
1097 static void
1098 i965_render_state_sip(VADriverContextP ctx)
1099 {
1100     struct i965_driver_data *i965 = i965_driver_data(ctx);
1101     struct intel_batchbuffer *batch = i965->batch;
1102
1103     BEGIN_BATCH(batch, 2);
1104     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1105     OUT_BATCH(batch, 0);
1106     ADVANCE_BATCH(batch);
1107 }
1108
1109 static void
1110 i965_render_state_base_address(VADriverContextP ctx)
1111 {
1112     struct i965_driver_data *i965 = i965_driver_data(ctx);
1113     struct intel_batchbuffer *batch = i965->batch;
1114     struct i965_render_state *render_state = &i965->render_state;
1115
1116     if (IS_IRONLAKE(i965->intel.device_id)) {
1117         BEGIN_BATCH(batch, 8);
1118         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
1119         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1120         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1121         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1122         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1123         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1124         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1125         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1126         ADVANCE_BATCH(batch);
1127     } else {
1128         BEGIN_BATCH(batch, 6);
1129         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
1130         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1131         OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1132         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1133         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1134         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1135         ADVANCE_BATCH(batch);
1136     }
1137 }
1138
1139 static void
1140 i965_render_binding_table_pointers(VADriverContextP ctx)
1141 {
1142     struct i965_driver_data *i965 = i965_driver_data(ctx);
1143     struct intel_batchbuffer *batch = i965->batch;
1144
1145     BEGIN_BATCH(batch, 6);
1146     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS | 4);
1147     OUT_BATCH(batch, 0); /* vs */
1148     OUT_BATCH(batch, 0); /* gs */
1149     OUT_BATCH(batch, 0); /* clip */
1150     OUT_BATCH(batch, 0); /* sf */
1151     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1152     ADVANCE_BATCH(batch);
1153 }
1154
1155 static void 
1156 i965_render_constant_color(VADriverContextP ctx)
1157 {
1158     struct i965_driver_data *i965 = i965_driver_data(ctx);
1159     struct intel_batchbuffer *batch = i965->batch;
1160
1161     BEGIN_BATCH(batch, 5);
1162     OUT_BATCH(batch, CMD_CONSTANT_COLOR | 3);
1163     OUT_BATCH(batch, float_to_uint(1.0));
1164     OUT_BATCH(batch, float_to_uint(0.0));
1165     OUT_BATCH(batch, float_to_uint(1.0));
1166     OUT_BATCH(batch, float_to_uint(1.0));
1167     ADVANCE_BATCH(batch);
1168 }
1169
1170 static void
1171 i965_render_pipelined_pointers(VADriverContextP ctx)
1172 {
1173     struct i965_driver_data *i965 = i965_driver_data(ctx);
1174     struct intel_batchbuffer *batch = i965->batch;
1175     struct i965_render_state *render_state = &i965->render_state;
1176
1177     BEGIN_BATCH(batch, 7);
1178     OUT_BATCH(batch, CMD_PIPELINED_POINTERS | 5);
1179     OUT_RELOC(batch, render_state->vs.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1180     OUT_BATCH(batch, 0);  /* disable GS */
1181     OUT_BATCH(batch, 0);  /* disable CLIP */
1182     OUT_RELOC(batch, render_state->sf.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1183     OUT_RELOC(batch, render_state->wm.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1184     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1185     ADVANCE_BATCH(batch);
1186 }
1187
1188 static void
1189 i965_render_urb_layout(VADriverContextP ctx)
1190 {
1191     struct i965_driver_data *i965 = i965_driver_data(ctx);
1192     struct intel_batchbuffer *batch = i965->batch;
1193     int urb_vs_start, urb_vs_size;
1194     int urb_gs_start, urb_gs_size;
1195     int urb_clip_start, urb_clip_size;
1196     int urb_sf_start, urb_sf_size;
1197     int urb_cs_start, urb_cs_size;
1198
1199     urb_vs_start = 0;
1200     urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
1201     urb_gs_start = urb_vs_start + urb_vs_size;
1202     urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
1203     urb_clip_start = urb_gs_start + urb_gs_size;
1204     urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
1205     urb_sf_start = urb_clip_start + urb_clip_size;
1206     urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
1207     urb_cs_start = urb_sf_start + urb_sf_size;
1208     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
1209
1210     BEGIN_BATCH(batch, 3);
1211     OUT_BATCH(batch, 
1212               CMD_URB_FENCE |
1213               UF0_CS_REALLOC |
1214               UF0_SF_REALLOC |
1215               UF0_CLIP_REALLOC |
1216               UF0_GS_REALLOC |
1217               UF0_VS_REALLOC |
1218               1);
1219     OUT_BATCH(batch, 
1220               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
1221               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
1222               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
1223     OUT_BATCH(batch,
1224               ((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
1225               ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
1226     ADVANCE_BATCH(batch);
1227 }
1228
1229 static void 
1230 i965_render_cs_urb_layout(VADriverContextP ctx)
1231 {
1232     struct i965_driver_data *i965 = i965_driver_data(ctx);
1233     struct intel_batchbuffer *batch = i965->batch;
1234
1235     BEGIN_BATCH(batch, 2);
1236     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
1237     OUT_BATCH(batch,
1238               ((URB_CS_ENTRY_SIZE - 1) << 4) |          /* URB Entry Allocation Size */
1239               (URB_CS_ENTRIES << 0));                /* Number of URB Entries */
1240     ADVANCE_BATCH(batch);
1241 }
1242
1243 static void
1244 i965_render_constant_buffer(VADriverContextP ctx)
1245 {
1246     struct i965_driver_data *i965 = i965_driver_data(ctx);
1247     struct intel_batchbuffer *batch = i965->batch;
1248     struct i965_render_state *render_state = &i965->render_state;
1249
1250     BEGIN_BATCH(batch, 2);
1251     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
1252     OUT_RELOC(batch, render_state->curbe.bo,
1253               I915_GEM_DOMAIN_INSTRUCTION, 0,
1254               URB_CS_ENTRY_SIZE - 1);
1255     ADVANCE_BATCH(batch);    
1256 }
1257
1258 static void
1259 i965_render_drawing_rectangle(VADriverContextP ctx)
1260 {
1261     struct i965_driver_data *i965 = i965_driver_data(ctx);
1262     struct intel_batchbuffer *batch = i965->batch;
1263     struct i965_render_state *render_state = &i965->render_state;
1264     struct intel_region *dest_region = render_state->draw_region;
1265
1266     BEGIN_BATCH(batch, 4);
1267     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
1268     OUT_BATCH(batch, 0x00000000);
1269     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
1270     OUT_BATCH(batch, 0x00000000);         
1271     ADVANCE_BATCH(batch);
1272 }
1273
1274 static void
1275 i965_render_vertex_elements(VADriverContextP ctx)
1276 {
1277     struct i965_driver_data *i965 = i965_driver_data(ctx);
1278     struct intel_batchbuffer *batch = i965->batch;
1279
1280     if (IS_IRONLAKE(i965->intel.device_id)) {
1281         BEGIN_BATCH(batch, 5);
1282         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1283         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1284         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1285                   VE0_VALID |
1286                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1287                   (0 << VE0_OFFSET_SHIFT));
1288         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1289                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1290                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1291                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1292         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1293         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1294                   VE0_VALID |
1295                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1296                   (8 << VE0_OFFSET_SHIFT));
1297         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1298                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1299                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1300                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
1301         ADVANCE_BATCH(batch);
1302     } else {
1303         BEGIN_BATCH(batch, 5);
1304         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
1305         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
1306         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1307                   VE0_VALID |
1308                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1309                   (0 << VE0_OFFSET_SHIFT));
1310         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1311                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1312                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1313                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1314                   (0 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1315         /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
1316         OUT_BATCH(batch, (0 << VE0_VERTEX_BUFFER_INDEX_SHIFT) |
1317                   VE0_VALID |
1318                   (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
1319                   (8 << VE0_OFFSET_SHIFT));
1320         OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
1321                   (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
1322                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
1323                   (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT) |
1324                   (4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT));
1325         ADVANCE_BATCH(batch);
1326     }
1327 }
1328
1329 static void
1330 i965_render_upload_image_palette(
1331     VADriverContextP ctx,
1332     VAImageID        image_id,
1333     unsigned int     alpha
1334 )
1335 {
1336     struct i965_driver_data *i965 = i965_driver_data(ctx);
1337     struct intel_batchbuffer *batch = i965->batch;
1338     unsigned int i;
1339
1340     struct object_image *obj_image = IMAGE(image_id);
1341     assert(obj_image);
1342
1343     if (obj_image->image.num_palette_entries == 0)
1344         return;
1345
1346     BEGIN_BATCH(batch, 1 + obj_image->image.num_palette_entries);
1347     OUT_BATCH(batch, CMD_SAMPLER_PALETTE_LOAD | (obj_image->image.num_palette_entries - 1));
1348     /*fill palette*/
1349     //int32_t out[16]; //0-23:color 23-31:alpha
1350     for (i = 0; i < obj_image->image.num_palette_entries; i++)
1351         OUT_BATCH(batch, (alpha << 24) | obj_image->palette[i]);
1352     ADVANCE_BATCH(batch);
1353 }
1354
1355 static void
1356 i965_render_startup(VADriverContextP ctx)
1357 {
1358     struct i965_driver_data *i965 = i965_driver_data(ctx);
1359     struct intel_batchbuffer *batch = i965->batch;
1360     struct i965_render_state *render_state = &i965->render_state;
1361
1362     BEGIN_BATCH(batch, 11);
1363     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
1364     OUT_BATCH(batch, 
1365               (0 << VB0_BUFFER_INDEX_SHIFT) |
1366               VB0_VERTEXDATA |
1367               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
1368     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
1369
1370     if (IS_IRONLAKE(i965->intel.device_id))
1371         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
1372     else
1373         OUT_BATCH(batch, 3);
1374
1375     OUT_BATCH(batch, 0);
1376
1377     OUT_BATCH(batch, 
1378               CMD_3DPRIMITIVE |
1379               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
1380               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
1381               (0 << 9) |
1382               4);
1383     OUT_BATCH(batch, 3); /* vertex count per instance */
1384     OUT_BATCH(batch, 0); /* start vertex offset */
1385     OUT_BATCH(batch, 1); /* single instance */
1386     OUT_BATCH(batch, 0); /* start instance location */
1387     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
1388     ADVANCE_BATCH(batch);
1389 }
1390
1391 static void 
1392 i965_clear_dest_region(VADriverContextP ctx)
1393 {
1394     struct i965_driver_data *i965 = i965_driver_data(ctx);
1395     struct intel_batchbuffer *batch = i965->batch;
1396     struct i965_render_state *render_state = &i965->render_state;
1397     struct intel_region *dest_region = render_state->draw_region;
1398     unsigned int blt_cmd, br13;
1399     int pitch;
1400
1401     blt_cmd = XY_COLOR_BLT_CMD;
1402     br13 = 0xf0 << 16;
1403     pitch = dest_region->pitch;
1404
1405     if (dest_region->cpp == 4) {
1406         br13 |= BR13_8888;
1407         blt_cmd |= (XY_COLOR_BLT_WRITE_RGB | XY_COLOR_BLT_WRITE_ALPHA);
1408     } else {
1409         assert(dest_region->cpp == 2);
1410         br13 |= BR13_565;
1411     }
1412
1413     if (dest_region->tiling != I915_TILING_NONE) {
1414         blt_cmd |= XY_COLOR_BLT_DST_TILED;
1415         pitch /= 4;
1416     }
1417
1418     br13 |= pitch;
1419
1420     if (IS_GEN6(i965->intel.device_id) ||
1421         IS_GEN7(i965->intel.device_id)) {
1422         intel_batchbuffer_start_atomic_blt(batch, 24);
1423         BEGIN_BLT_BATCH(batch, 6);
1424     } else {
1425         intel_batchbuffer_start_atomic(batch, 24);
1426         BEGIN_BATCH(batch, 6);
1427     }
1428
1429     OUT_BATCH(batch, blt_cmd);
1430     OUT_BATCH(batch, br13);
1431     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
1432     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
1433               (dest_region->x + dest_region->width));
1434     OUT_RELOC(batch, dest_region->bo, 
1435               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1436               0);
1437     OUT_BATCH(batch, 0x0);
1438     ADVANCE_BATCH(batch);
1439     intel_batchbuffer_end_atomic(batch);
1440 }
1441
1442 static void
1443 i965_surface_render_pipeline_setup(VADriverContextP ctx)
1444 {
1445     struct i965_driver_data *i965 = i965_driver_data(ctx);
1446     struct intel_batchbuffer *batch = i965->batch;
1447
1448     i965_clear_dest_region(ctx);
1449     intel_batchbuffer_start_atomic(batch, 0x1000);
1450     intel_batchbuffer_emit_mi_flush(batch);
1451     i965_render_pipeline_select(ctx);
1452     i965_render_state_sip(ctx);
1453     i965_render_state_base_address(ctx);
1454     i965_render_binding_table_pointers(ctx);
1455     i965_render_constant_color(ctx);
1456     i965_render_pipelined_pointers(ctx);
1457     i965_render_urb_layout(ctx);
1458     i965_render_cs_urb_layout(ctx);
1459     i965_render_constant_buffer(ctx);
1460     i965_render_drawing_rectangle(ctx);
1461     i965_render_vertex_elements(ctx);
1462     i965_render_startup(ctx);
1463     intel_batchbuffer_end_atomic(batch);
1464 }
1465
1466 static void
1467 i965_subpic_render_pipeline_setup(VADriverContextP ctx)
1468 {
1469     struct i965_driver_data *i965 = i965_driver_data(ctx);
1470     struct intel_batchbuffer *batch = i965->batch;
1471
1472     intel_batchbuffer_start_atomic(batch, 0x1000);
1473     intel_batchbuffer_emit_mi_flush(batch);
1474     i965_render_pipeline_select(ctx);
1475     i965_render_state_sip(ctx);
1476     i965_render_state_base_address(ctx);
1477     i965_render_binding_table_pointers(ctx);
1478     i965_render_constant_color(ctx);
1479     i965_render_pipelined_pointers(ctx);
1480     i965_render_urb_layout(ctx);
1481     i965_render_cs_urb_layout(ctx);
1482     i965_render_drawing_rectangle(ctx);
1483     i965_render_vertex_elements(ctx);
1484     i965_render_startup(ctx);
1485     intel_batchbuffer_end_atomic(batch);
1486 }
1487
1488
1489 static void 
1490 i965_render_initialize(VADriverContextP ctx)
1491 {
1492     struct i965_driver_data *i965 = i965_driver_data(ctx);
1493     struct i965_render_state *render_state = &i965->render_state;
1494     dri_bo *bo;
1495
1496     /* VERTEX BUFFER */
1497     dri_bo_unreference(render_state->vb.vertex_buffer);
1498     bo = dri_bo_alloc(i965->intel.bufmgr,
1499                       "vertex buffer",
1500                       4096,
1501                       4096);
1502     assert(bo);
1503     render_state->vb.vertex_buffer = bo;
1504
1505     /* VS */
1506     dri_bo_unreference(render_state->vs.state);
1507     bo = dri_bo_alloc(i965->intel.bufmgr,
1508                       "vs state",
1509                       sizeof(struct i965_vs_unit_state),
1510                       64);
1511     assert(bo);
1512     render_state->vs.state = bo;
1513
1514     /* GS */
1515     /* CLIP */
1516     /* SF */
1517     dri_bo_unreference(render_state->sf.state);
1518     bo = dri_bo_alloc(i965->intel.bufmgr,
1519                       "sf state",
1520                       sizeof(struct i965_sf_unit_state),
1521                       64);
1522     assert(bo);
1523     render_state->sf.state = bo;
1524
1525     /* WM */
1526     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1527     bo = dri_bo_alloc(i965->intel.bufmgr,
1528                       "surface state & binding table",
1529                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1530                       4096);
1531     assert(bo);
1532     render_state->wm.surface_state_binding_table_bo = bo;
1533
1534     dri_bo_unreference(render_state->wm.sampler);
1535     bo = dri_bo_alloc(i965->intel.bufmgr,
1536                       "sampler state",
1537                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1538                       64);
1539     assert(bo);
1540     render_state->wm.sampler = bo;
1541     render_state->wm.sampler_count = 0;
1542
1543     dri_bo_unreference(render_state->wm.state);
1544     bo = dri_bo_alloc(i965->intel.bufmgr,
1545                       "wm state",
1546                       sizeof(struct i965_wm_unit_state),
1547                       64);
1548     assert(bo);
1549     render_state->wm.state = bo;
1550
1551     /* COLOR CALCULATOR */
1552     dri_bo_unreference(render_state->cc.state);
1553     bo = dri_bo_alloc(i965->intel.bufmgr,
1554                       "color calc state",
1555                       sizeof(struct i965_cc_unit_state),
1556                       64);
1557     assert(bo);
1558     render_state->cc.state = bo;
1559
1560     dri_bo_unreference(render_state->cc.viewport);
1561     bo = dri_bo_alloc(i965->intel.bufmgr,
1562                       "cc viewport",
1563                       sizeof(struct i965_cc_viewport),
1564                       64);
1565     assert(bo);
1566     render_state->cc.viewport = bo;
1567 }
1568
1569 static void
1570 i965_render_put_surface(
1571     VADriverContextP   ctx,
1572     VASurfaceID        surface,
1573     const VARectangle *src_rect,
1574     const VARectangle *dst_rect,
1575     unsigned int       flags
1576 )
1577 {
1578     struct i965_driver_data *i965 = i965_driver_data(ctx);
1579     struct intel_batchbuffer *batch = i965->batch;
1580
1581     i965_render_initialize(ctx);
1582     i965_surface_render_state_setup(ctx, surface, src_rect, dst_rect, flags);
1583     i965_surface_render_pipeline_setup(ctx);
1584     intel_batchbuffer_flush(batch);
1585 }
1586
1587 static void
1588 i965_render_put_subpicture(
1589     VADriverContextP   ctx,
1590     VASurfaceID        surface,
1591     const VARectangle *src_rect,
1592     const VARectangle *dst_rect
1593 )
1594 {
1595     struct i965_driver_data *i965 = i965_driver_data(ctx);
1596     struct intel_batchbuffer *batch = i965->batch;
1597     struct object_surface *obj_surface = SURFACE(surface);
1598     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
1599
1600     assert(obj_subpic);
1601
1602     i965_render_initialize(ctx);
1603     i965_subpic_render_state_setup(ctx, surface, src_rect, dst_rect);
1604     i965_subpic_render_pipeline_setup(ctx);
1605     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
1606     intel_batchbuffer_flush(batch);
1607 }
1608
1609 /*
1610  * for GEN6+
1611  */
1612 static void 
1613 gen6_render_initialize(VADriverContextP ctx)
1614 {
1615     struct i965_driver_data *i965 = i965_driver_data(ctx);
1616     struct i965_render_state *render_state = &i965->render_state;
1617     dri_bo *bo;
1618
1619     /* VERTEX BUFFER */
1620     dri_bo_unreference(render_state->vb.vertex_buffer);
1621     bo = dri_bo_alloc(i965->intel.bufmgr,
1622                       "vertex buffer",
1623                       4096,
1624                       4096);
1625     assert(bo);
1626     render_state->vb.vertex_buffer = bo;
1627
1628     /* WM */
1629     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
1630     bo = dri_bo_alloc(i965->intel.bufmgr,
1631                       "surface state & binding table",
1632                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
1633                       4096);
1634     assert(bo);
1635     render_state->wm.surface_state_binding_table_bo = bo;
1636
1637     dri_bo_unreference(render_state->wm.sampler);
1638     bo = dri_bo_alloc(i965->intel.bufmgr,
1639                       "sampler state",
1640                       MAX_SAMPLERS * sizeof(struct i965_sampler_state),
1641                       4096);
1642     assert(bo);
1643     render_state->wm.sampler = bo;
1644     render_state->wm.sampler_count = 0;
1645
1646     /* COLOR CALCULATOR */
1647     dri_bo_unreference(render_state->cc.state);
1648     bo = dri_bo_alloc(i965->intel.bufmgr,
1649                       "color calc state",
1650                       sizeof(struct gen6_color_calc_state),
1651                       4096);
1652     assert(bo);
1653     render_state->cc.state = bo;
1654
1655     /* CC VIEWPORT */
1656     dri_bo_unreference(render_state->cc.viewport);
1657     bo = dri_bo_alloc(i965->intel.bufmgr,
1658                       "cc viewport",
1659                       sizeof(struct i965_cc_viewport),
1660                       4096);
1661     assert(bo);
1662     render_state->cc.viewport = bo;
1663
1664     /* BLEND STATE */
1665     dri_bo_unreference(render_state->cc.blend);
1666     bo = dri_bo_alloc(i965->intel.bufmgr,
1667                       "blend state",
1668                       sizeof(struct gen6_blend_state),
1669                       4096);
1670     assert(bo);
1671     render_state->cc.blend = bo;
1672
1673     /* DEPTH & STENCIL STATE */
1674     dri_bo_unreference(render_state->cc.depth_stencil);
1675     bo = dri_bo_alloc(i965->intel.bufmgr,
1676                       "depth & stencil state",
1677                       sizeof(struct gen6_depth_stencil_state),
1678                       4096);
1679     assert(bo);
1680     render_state->cc.depth_stencil = bo;
1681 }
1682
1683 static void
1684 gen6_render_color_calc_state(VADriverContextP ctx)
1685 {
1686     struct i965_driver_data *i965 = i965_driver_data(ctx);
1687     struct i965_render_state *render_state = &i965->render_state;
1688     struct gen6_color_calc_state *color_calc_state;
1689     
1690     dri_bo_map(render_state->cc.state, 1);
1691     assert(render_state->cc.state->virtual);
1692     color_calc_state = render_state->cc.state->virtual;
1693     memset(color_calc_state, 0, sizeof(*color_calc_state));
1694     color_calc_state->constant_r = 1.0;
1695     color_calc_state->constant_g = 0.0;
1696     color_calc_state->constant_b = 1.0;
1697     color_calc_state->constant_a = 1.0;
1698     dri_bo_unmap(render_state->cc.state);
1699 }
1700
1701 static void
1702 gen6_render_blend_state(VADriverContextP ctx)
1703 {
1704     struct i965_driver_data *i965 = i965_driver_data(ctx);
1705     struct i965_render_state *render_state = &i965->render_state;
1706     struct gen6_blend_state *blend_state;
1707     
1708     dri_bo_map(render_state->cc.blend, 1);
1709     assert(render_state->cc.blend->virtual);
1710     blend_state = render_state->cc.blend->virtual;
1711     memset(blend_state, 0, sizeof(*blend_state));
1712     blend_state->blend1.logic_op_enable = 1;
1713     blend_state->blend1.logic_op_func = 0xc;
1714     dri_bo_unmap(render_state->cc.blend);
1715 }
1716
1717 static void
1718 gen6_render_depth_stencil_state(VADriverContextP ctx)
1719 {
1720     struct i965_driver_data *i965 = i965_driver_data(ctx);
1721     struct i965_render_state *render_state = &i965->render_state;
1722     struct gen6_depth_stencil_state *depth_stencil_state;
1723     
1724     dri_bo_map(render_state->cc.depth_stencil, 1);
1725     assert(render_state->cc.depth_stencil->virtual);
1726     depth_stencil_state = render_state->cc.depth_stencil->virtual;
1727     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
1728     dri_bo_unmap(render_state->cc.depth_stencil);
1729 }
1730
1731 static void
1732 gen6_render_setup_states(
1733     VADriverContextP   ctx,
1734     VASurfaceID        surface,
1735     const VARectangle *src_rect,
1736     const VARectangle *dst_rect,
1737     unsigned int       flags
1738 )
1739 {
1740     i965_render_dest_surface_state(ctx, 0);
1741     i965_render_src_surfaces_state(ctx, surface, flags);
1742     i965_render_sampler(ctx);
1743     i965_render_cc_viewport(ctx);
1744     gen6_render_color_calc_state(ctx);
1745     gen6_render_blend_state(ctx);
1746     gen6_render_depth_stencil_state(ctx);
1747     i965_render_upload_constants(ctx, surface);
1748     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
1749 }
1750
1751 static void
1752 gen6_emit_invarient_states(VADriverContextP ctx)
1753 {
1754     struct i965_driver_data *i965 = i965_driver_data(ctx);
1755     struct intel_batchbuffer *batch = i965->batch;
1756
1757     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
1758
1759     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (3 - 2));
1760     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
1761               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
1762     OUT_BATCH(batch, 0);
1763
1764     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
1765     OUT_BATCH(batch, 1);
1766
1767     /* Set system instruction pointer */
1768     OUT_BATCH(batch, CMD_STATE_SIP | 0);
1769     OUT_BATCH(batch, 0);
1770 }
1771
1772 static void
1773 gen6_emit_state_base_address(VADriverContextP ctx)
1774 {
1775     struct i965_driver_data *i965 = i965_driver_data(ctx);
1776     struct intel_batchbuffer *batch = i965->batch;
1777     struct i965_render_state *render_state = &i965->render_state;
1778
1779     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1780     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
1781     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1782     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
1783     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
1784     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
1785     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
1786     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
1787     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
1788     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
1789 }
1790
1791 static void
1792 gen6_emit_viewport_state_pointers(VADriverContextP ctx)
1793 {
1794     struct i965_driver_data *i965 = i965_driver_data(ctx);
1795     struct intel_batchbuffer *batch = i965->batch;
1796     struct i965_render_state *render_state = &i965->render_state;
1797
1798     OUT_BATCH(batch, GEN6_3DSTATE_VIEWPORT_STATE_POINTERS |
1799               GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC |
1800               (4 - 2));
1801     OUT_BATCH(batch, 0);
1802     OUT_BATCH(batch, 0);
1803     OUT_RELOC(batch, render_state->cc.viewport, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1804 }
1805
1806 static void
1807 gen6_emit_urb(VADriverContextP ctx)
1808 {
1809     struct i965_driver_data *i965 = i965_driver_data(ctx);
1810     struct intel_batchbuffer *batch = i965->batch;
1811
1812     OUT_BATCH(batch, GEN6_3DSTATE_URB | (3 - 2));
1813     OUT_BATCH(batch, ((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
1814               (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
1815     OUT_BATCH(batch, (0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
1816               (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
1817 }
1818
1819 static void
1820 gen6_emit_cc_state_pointers(VADriverContextP ctx)
1821 {
1822     struct i965_driver_data *i965 = i965_driver_data(ctx);
1823     struct intel_batchbuffer *batch = i965->batch;
1824     struct i965_render_state *render_state = &i965->render_state;
1825
1826     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
1827     OUT_RELOC(batch, render_state->cc.blend, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1828     OUT_RELOC(batch, render_state->cc.depth_stencil, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1829     OUT_RELOC(batch, render_state->cc.state, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
1830 }
1831
1832 static void
1833 gen6_emit_sampler_state_pointers(VADriverContextP ctx)
1834 {
1835     struct i965_driver_data *i965 = i965_driver_data(ctx);
1836     struct intel_batchbuffer *batch = i965->batch;
1837     struct i965_render_state *render_state = &i965->render_state;
1838
1839     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
1840               GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
1841               (4 - 2));
1842     OUT_BATCH(batch, 0); /* VS */
1843     OUT_BATCH(batch, 0); /* GS */
1844     OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1845 }
1846
1847 static void
1848 gen6_emit_binding_table(VADriverContextP ctx)
1849 {
1850     struct i965_driver_data *i965 = i965_driver_data(ctx);
1851     struct intel_batchbuffer *batch = i965->batch;
1852
1853     /* Binding table pointers */
1854     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
1855               GEN6_BINDING_TABLE_MODIFY_PS |
1856               (4 - 2));
1857     OUT_BATCH(batch, 0);                /* vs */
1858     OUT_BATCH(batch, 0);                /* gs */
1859     /* Only the PS uses the binding table */
1860     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
1861 }
1862
1863 static void
1864 gen6_emit_depth_buffer_state(VADriverContextP ctx)
1865 {
1866     struct i965_driver_data *i965 = i965_driver_data(ctx);
1867     struct intel_batchbuffer *batch = i965->batch;
1868
1869     OUT_BATCH(batch, CMD_DEPTH_BUFFER | (7 - 2));
1870     OUT_BATCH(batch, (I965_SURFACE_NULL << CMD_DEPTH_BUFFER_TYPE_SHIFT) |
1871               (I965_DEPTHFORMAT_D32_FLOAT << CMD_DEPTH_BUFFER_FORMAT_SHIFT));
1872     OUT_BATCH(batch, 0);
1873     OUT_BATCH(batch, 0);
1874     OUT_BATCH(batch, 0);
1875     OUT_BATCH(batch, 0);
1876     OUT_BATCH(batch, 0);
1877
1878     OUT_BATCH(batch, CMD_CLEAR_PARAMS | (2 - 2));
1879     OUT_BATCH(batch, 0);
1880 }
1881
1882 static void
1883 gen6_emit_drawing_rectangle(VADriverContextP ctx)
1884 {
1885     i965_render_drawing_rectangle(ctx);
1886 }
1887
1888 static void 
1889 gen6_emit_vs_state(VADriverContextP ctx)
1890 {
1891     struct i965_driver_data *i965 = i965_driver_data(ctx);
1892     struct intel_batchbuffer *batch = i965->batch;
1893
1894     /* disable VS constant buffer */
1895     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (5 - 2));
1896     OUT_BATCH(batch, 0);
1897     OUT_BATCH(batch, 0);
1898     OUT_BATCH(batch, 0);
1899     OUT_BATCH(batch, 0);
1900         
1901     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
1902     OUT_BATCH(batch, 0); /* without VS kernel */
1903     OUT_BATCH(batch, 0);
1904     OUT_BATCH(batch, 0);
1905     OUT_BATCH(batch, 0);
1906     OUT_BATCH(batch, 0); /* pass-through */
1907 }
1908
1909 static void 
1910 gen6_emit_gs_state(VADriverContextP ctx)
1911 {
1912     struct i965_driver_data *i965 = i965_driver_data(ctx);
1913     struct intel_batchbuffer *batch = i965->batch;
1914
1915     /* disable GS constant buffer */
1916     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (5 - 2));
1917     OUT_BATCH(batch, 0);
1918     OUT_BATCH(batch, 0);
1919     OUT_BATCH(batch, 0);
1920     OUT_BATCH(batch, 0);
1921         
1922     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
1923     OUT_BATCH(batch, 0); /* without GS kernel */
1924     OUT_BATCH(batch, 0);
1925     OUT_BATCH(batch, 0);
1926     OUT_BATCH(batch, 0);
1927     OUT_BATCH(batch, 0);
1928     OUT_BATCH(batch, 0); /* pass-through */
1929 }
1930
1931 static void 
1932 gen6_emit_clip_state(VADriverContextP ctx)
1933 {
1934     struct i965_driver_data *i965 = i965_driver_data(ctx);
1935     struct intel_batchbuffer *batch = i965->batch;
1936
1937     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
1938     OUT_BATCH(batch, 0);
1939     OUT_BATCH(batch, 0); /* pass-through */
1940     OUT_BATCH(batch, 0);
1941 }
1942
1943 static void 
1944 gen6_emit_sf_state(VADriverContextP ctx)
1945 {
1946     struct i965_driver_data *i965 = i965_driver_data(ctx);
1947     struct intel_batchbuffer *batch = i965->batch;
1948
1949     OUT_BATCH(batch, GEN6_3DSTATE_SF | (20 - 2));
1950     OUT_BATCH(batch, (1 << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT) |
1951               (1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT) |
1952               (0 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT));
1953     OUT_BATCH(batch, 0);
1954     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
1955     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */
1956     OUT_BATCH(batch, 0);
1957     OUT_BATCH(batch, 0);
1958     OUT_BATCH(batch, 0);
1959     OUT_BATCH(batch, 0);
1960     OUT_BATCH(batch, 0); /* DW9 */
1961     OUT_BATCH(batch, 0);
1962     OUT_BATCH(batch, 0);
1963     OUT_BATCH(batch, 0);
1964     OUT_BATCH(batch, 0);
1965     OUT_BATCH(batch, 0); /* DW14 */
1966     OUT_BATCH(batch, 0);
1967     OUT_BATCH(batch, 0);
1968     OUT_BATCH(batch, 0);
1969     OUT_BATCH(batch, 0);
1970     OUT_BATCH(batch, 0); /* DW19 */
1971 }
1972
1973 static void 
1974 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
1975 {
1976     struct i965_driver_data *i965 = i965_driver_data(ctx);
1977     struct intel_batchbuffer *batch = i965->batch;
1978     struct i965_render_state *render_state = &i965->render_state;
1979
1980     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
1981               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
1982               (5 - 2));
1983     OUT_RELOC(batch, 
1984               render_state->curbe.bo,
1985               I915_GEM_DOMAIN_INSTRUCTION, 0,
1986               0);
1987     OUT_BATCH(batch, 0);
1988     OUT_BATCH(batch, 0);
1989     OUT_BATCH(batch, 0);
1990
1991     OUT_BATCH(batch, GEN6_3DSTATE_WM | (9 - 2));
1992     OUT_RELOC(batch, render_state->render_kernels[kernel].bo,
1993               I915_GEM_DOMAIN_INSTRUCTION, 0,
1994               0);
1995     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF) |
1996               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
1997     OUT_BATCH(batch, 0);
1998     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
1999     OUT_BATCH(batch, ((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
2000               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
2001               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
2002     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
2003               GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2004     OUT_BATCH(batch, 0);
2005     OUT_BATCH(batch, 0);
2006 }
2007
2008 static void
2009 gen6_emit_vertex_element_state(VADriverContextP ctx)
2010 {
2011     struct i965_driver_data *i965 = i965_driver_data(ctx);
2012     struct intel_batchbuffer *batch = i965->batch;
2013
2014     /* Set up our vertex elements, sourced from the single vertex buffer. */
2015     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2016     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2017     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2018               GEN6_VE0_VALID |
2019               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2020               (0 << VE0_OFFSET_SHIFT));
2021     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2022               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2023               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2024               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2025     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2026     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2027               GEN6_VE0_VALID |
2028               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2029               (8 << VE0_OFFSET_SHIFT));
2030     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2031               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2032               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2033               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2034 }
2035
2036 static void
2037 gen6_emit_vertices(VADriverContextP ctx)
2038 {
2039     struct i965_driver_data *i965 = i965_driver_data(ctx);
2040     struct intel_batchbuffer *batch = i965->batch;
2041     struct i965_render_state *render_state = &i965->render_state;
2042
2043     BEGIN_BATCH(batch, 11);
2044     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
2045     OUT_BATCH(batch, 
2046               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2047               GEN6_VB0_VERTEXDATA |
2048               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2049     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2050     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2051     OUT_BATCH(batch, 0);
2052
2053     OUT_BATCH(batch, 
2054               CMD_3DPRIMITIVE |
2055               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
2056               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
2057               (0 << 9) |
2058               4);
2059     OUT_BATCH(batch, 3); /* vertex count per instance */
2060     OUT_BATCH(batch, 0); /* start vertex offset */
2061     OUT_BATCH(batch, 1); /* single instance */
2062     OUT_BATCH(batch, 0); /* start instance location */
2063     OUT_BATCH(batch, 0); /* index buffer offset, ignored */
2064     ADVANCE_BATCH(batch);
2065 }
2066
2067 static void
2068 gen6_render_emit_states(VADriverContextP ctx, int kernel)
2069 {
2070     struct i965_driver_data *i965 = i965_driver_data(ctx);
2071     struct intel_batchbuffer *batch = i965->batch;
2072
2073     intel_batchbuffer_start_atomic(batch, 0x1000);
2074     intel_batchbuffer_emit_mi_flush(batch);
2075     gen6_emit_invarient_states(ctx);
2076     gen6_emit_state_base_address(ctx);
2077     gen6_emit_viewport_state_pointers(ctx);
2078     gen6_emit_urb(ctx);
2079     gen6_emit_cc_state_pointers(ctx);
2080     gen6_emit_sampler_state_pointers(ctx);
2081     gen6_emit_vs_state(ctx);
2082     gen6_emit_gs_state(ctx);
2083     gen6_emit_clip_state(ctx);
2084     gen6_emit_sf_state(ctx);
2085     gen6_emit_wm_state(ctx, kernel);
2086     gen6_emit_binding_table(ctx);
2087     gen6_emit_depth_buffer_state(ctx);
2088     gen6_emit_drawing_rectangle(ctx);
2089     gen6_emit_vertex_element_state(ctx);
2090     gen6_emit_vertices(ctx);
2091     intel_batchbuffer_end_atomic(batch);
2092 }
2093
2094 static void
2095 gen6_render_put_surface(
2096     VADriverContextP   ctx,
2097     VASurfaceID        surface,
2098     const VARectangle *src_rect,
2099     const VARectangle *dst_rect,
2100     unsigned int       flags
2101 )
2102 {
2103     struct i965_driver_data *i965 = i965_driver_data(ctx);
2104     struct intel_batchbuffer *batch = i965->batch;
2105
2106     gen6_render_initialize(ctx);
2107     gen6_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2108     i965_clear_dest_region(ctx);
2109     gen6_render_emit_states(ctx, PS_KERNEL);
2110     intel_batchbuffer_flush(batch);
2111 }
2112
2113 static void
2114 gen6_subpicture_render_blend_state(VADriverContextP ctx)
2115 {
2116     struct i965_driver_data *i965 = i965_driver_data(ctx);
2117     struct i965_render_state *render_state = &i965->render_state;
2118     struct gen6_blend_state *blend_state;
2119
2120     dri_bo_unmap(render_state->cc.state);    
2121     dri_bo_map(render_state->cc.blend, 1);
2122     assert(render_state->cc.blend->virtual);
2123     blend_state = render_state->cc.blend->virtual;
2124     memset(blend_state, 0, sizeof(*blend_state));
2125     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2126     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2127     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2128     blend_state->blend0.blend_enable = 1;
2129     blend_state->blend1.post_blend_clamp_enable = 1;
2130     blend_state->blend1.pre_blend_clamp_enable = 1;
2131     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2132     dri_bo_unmap(render_state->cc.blend);
2133 }
2134
2135 static void
2136 gen6_subpicture_render_setup_states(
2137     VADriverContextP   ctx,
2138     VASurfaceID        surface,
2139     const VARectangle *src_rect,
2140     const VARectangle *dst_rect
2141 )
2142 {
2143     i965_render_dest_surface_state(ctx, 0);
2144     i965_subpic_render_src_surfaces_state(ctx, surface);
2145     i965_render_sampler(ctx);
2146     i965_render_cc_viewport(ctx);
2147     gen6_render_color_calc_state(ctx);
2148     gen6_subpicture_render_blend_state(ctx);
2149     gen6_render_depth_stencil_state(ctx);
2150     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2151 }
2152
2153 static void
2154 gen6_render_put_subpicture(
2155     VADriverContextP   ctx,
2156     VASurfaceID        surface,
2157     const VARectangle *src_rect,
2158     const VARectangle *dst_rect
2159 )
2160 {
2161     struct i965_driver_data *i965 = i965_driver_data(ctx);
2162     struct intel_batchbuffer *batch = i965->batch;
2163     struct object_surface *obj_surface = SURFACE(surface);
2164     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2165
2166     assert(obj_subpic);
2167     gen6_render_initialize(ctx);
2168     gen6_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2169     gen6_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2170     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2171     intel_batchbuffer_flush(batch);
2172 }
2173
2174 /*
2175  * for GEN7
2176  */
2177 static void 
2178 gen7_render_initialize(VADriverContextP ctx)
2179 {
2180     struct i965_driver_data *i965 = i965_driver_data(ctx);
2181     struct i965_render_state *render_state = &i965->render_state;
2182     dri_bo *bo;
2183
2184     /* VERTEX BUFFER */
2185     dri_bo_unreference(render_state->vb.vertex_buffer);
2186     bo = dri_bo_alloc(i965->intel.bufmgr,
2187                       "vertex buffer",
2188                       4096,
2189                       4096);
2190     assert(bo);
2191     render_state->vb.vertex_buffer = bo;
2192
2193     /* WM */
2194     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
2195     bo = dri_bo_alloc(i965->intel.bufmgr,
2196                       "surface state & binding table",
2197                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_RENDER_SURFACES,
2198                       4096);
2199     assert(bo);
2200     render_state->wm.surface_state_binding_table_bo = bo;
2201
2202     dri_bo_unreference(render_state->wm.sampler);
2203     bo = dri_bo_alloc(i965->intel.bufmgr,
2204                       "sampler state",
2205                       MAX_SAMPLERS * sizeof(struct gen7_sampler_state),
2206                       4096);
2207     assert(bo);
2208     render_state->wm.sampler = bo;
2209     render_state->wm.sampler_count = 0;
2210
2211     /* COLOR CALCULATOR */
2212     dri_bo_unreference(render_state->cc.state);
2213     bo = dri_bo_alloc(i965->intel.bufmgr,
2214                       "color calc state",
2215                       sizeof(struct gen6_color_calc_state),
2216                       4096);
2217     assert(bo);
2218     render_state->cc.state = bo;
2219
2220     /* CC VIEWPORT */
2221     dri_bo_unreference(render_state->cc.viewport);
2222     bo = dri_bo_alloc(i965->intel.bufmgr,
2223                       "cc viewport",
2224                       sizeof(struct i965_cc_viewport),
2225                       4096);
2226     assert(bo);
2227     render_state->cc.viewport = bo;
2228
2229     /* BLEND STATE */
2230     dri_bo_unreference(render_state->cc.blend);
2231     bo = dri_bo_alloc(i965->intel.bufmgr,
2232                       "blend state",
2233                       sizeof(struct gen6_blend_state),
2234                       4096);
2235     assert(bo);
2236     render_state->cc.blend = bo;
2237
2238     /* DEPTH & STENCIL STATE */
2239     dri_bo_unreference(render_state->cc.depth_stencil);
2240     bo = dri_bo_alloc(i965->intel.bufmgr,
2241                       "depth & stencil state",
2242                       sizeof(struct gen6_depth_stencil_state),
2243                       4096);
2244     assert(bo);
2245     render_state->cc.depth_stencil = bo;
2246 }
2247
2248 static void
2249 gen7_render_color_calc_state(VADriverContextP ctx)
2250 {
2251     struct i965_driver_data *i965 = i965_driver_data(ctx);
2252     struct i965_render_state *render_state = &i965->render_state;
2253     struct gen6_color_calc_state *color_calc_state;
2254     
2255     dri_bo_map(render_state->cc.state, 1);
2256     assert(render_state->cc.state->virtual);
2257     color_calc_state = render_state->cc.state->virtual;
2258     memset(color_calc_state, 0, sizeof(*color_calc_state));
2259     color_calc_state->constant_r = 1.0;
2260     color_calc_state->constant_g = 0.0;
2261     color_calc_state->constant_b = 1.0;
2262     color_calc_state->constant_a = 1.0;
2263     dri_bo_unmap(render_state->cc.state);
2264 }
2265
2266 static void
2267 gen7_render_blend_state(VADriverContextP ctx)
2268 {
2269     struct i965_driver_data *i965 = i965_driver_data(ctx);
2270     struct i965_render_state *render_state = &i965->render_state;
2271     struct gen6_blend_state *blend_state;
2272     
2273     dri_bo_map(render_state->cc.blend, 1);
2274     assert(render_state->cc.blend->virtual);
2275     blend_state = render_state->cc.blend->virtual;
2276     memset(blend_state, 0, sizeof(*blend_state));
2277     blend_state->blend1.logic_op_enable = 1;
2278     blend_state->blend1.logic_op_func = 0xc;
2279     blend_state->blend1.pre_blend_clamp_enable = 1;
2280     dri_bo_unmap(render_state->cc.blend);
2281 }
2282
2283 static void
2284 gen7_render_depth_stencil_state(VADriverContextP ctx)
2285 {
2286     struct i965_driver_data *i965 = i965_driver_data(ctx);
2287     struct i965_render_state *render_state = &i965->render_state;
2288     struct gen6_depth_stencil_state *depth_stencil_state;
2289     
2290     dri_bo_map(render_state->cc.depth_stencil, 1);
2291     assert(render_state->cc.depth_stencil->virtual);
2292     depth_stencil_state = render_state->cc.depth_stencil->virtual;
2293     memset(depth_stencil_state, 0, sizeof(*depth_stencil_state));
2294     dri_bo_unmap(render_state->cc.depth_stencil);
2295 }
2296
2297 static void 
2298 gen7_render_sampler(VADriverContextP ctx)
2299 {
2300     struct i965_driver_data *i965 = i965_driver_data(ctx);
2301     struct i965_render_state *render_state = &i965->render_state;
2302     struct gen7_sampler_state *sampler_state;
2303     int i;
2304     
2305     assert(render_state->wm.sampler_count > 0);
2306     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
2307
2308     dri_bo_map(render_state->wm.sampler, 1);
2309     assert(render_state->wm.sampler->virtual);
2310     sampler_state = render_state->wm.sampler->virtual;
2311     for (i = 0; i < render_state->wm.sampler_count; i++) {
2312         memset(sampler_state, 0, sizeof(*sampler_state));
2313         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
2314         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
2315         sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2316         sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2317         sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
2318         sampler_state++;
2319     }
2320
2321     dri_bo_unmap(render_state->wm.sampler);
2322 }
2323
2324 static void
2325 gen7_render_setup_states(
2326     VADriverContextP   ctx,
2327     VASurfaceID        surface,
2328     const VARectangle *src_rect,
2329     const VARectangle *dst_rect,
2330     unsigned int       flags
2331 )
2332 {
2333     i965_render_dest_surface_state(ctx, 0);
2334     i965_render_src_surfaces_state(ctx, surface, flags);
2335     gen7_render_sampler(ctx);
2336     i965_render_cc_viewport(ctx);
2337     gen7_render_color_calc_state(ctx);
2338     gen7_render_blend_state(ctx);
2339     gen7_render_depth_stencil_state(ctx);
2340     i965_render_upload_constants(ctx, surface);
2341     i965_render_upload_vertex(ctx, surface, src_rect, dst_rect);
2342 }
2343
2344 static void
2345 gen7_emit_invarient_states(VADriverContextP ctx)
2346 {
2347     struct i965_driver_data *i965 = i965_driver_data(ctx);
2348     struct intel_batchbuffer *batch = i965->batch;
2349
2350     BEGIN_BATCH(batch, 1);
2351     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
2352     ADVANCE_BATCH(batch);
2353
2354     BEGIN_BATCH(batch, 4);
2355     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
2356     OUT_BATCH(batch, GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
2357               GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
2358     OUT_BATCH(batch, 0);
2359     OUT_BATCH(batch, 0);
2360     ADVANCE_BATCH(batch);
2361
2362     BEGIN_BATCH(batch, 2);
2363     OUT_BATCH(batch, GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
2364     OUT_BATCH(batch, 1);
2365     ADVANCE_BATCH(batch);
2366
2367     /* Set system instruction pointer */
2368     BEGIN_BATCH(batch, 2);
2369     OUT_BATCH(batch, CMD_STATE_SIP | 0);
2370     OUT_BATCH(batch, 0);
2371     ADVANCE_BATCH(batch);
2372 }
2373
2374 static void
2375 gen7_emit_state_base_address(VADriverContextP ctx)
2376 {
2377     struct i965_driver_data *i965 = i965_driver_data(ctx);
2378     struct intel_batchbuffer *batch = i965->batch;
2379     struct i965_render_state *render_state = &i965->render_state;
2380
2381     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
2382     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state base address */
2383     OUT_RELOC(batch, render_state->wm.surface_state_binding_table_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
2384     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state base address */
2385     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object base address */
2386     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction base address */
2387     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General state upper bound */
2388     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic state upper bound */
2389     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect object upper bound */
2390     OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction access upper bound */
2391 }
2392
2393 static void
2394 gen7_emit_viewport_state_pointers(VADriverContextP ctx)
2395 {
2396     struct i965_driver_data *i965 = i965_driver_data(ctx);
2397     struct intel_batchbuffer *batch = i965->batch;
2398     struct i965_render_state *render_state = &i965->render_state;
2399
2400     BEGIN_BATCH(batch, 2);
2401     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
2402     OUT_RELOC(batch,
2403               render_state->cc.viewport,
2404               I915_GEM_DOMAIN_INSTRUCTION, 0,
2405               0);
2406     ADVANCE_BATCH(batch);
2407
2408     BEGIN_BATCH(batch, 2);
2409     OUT_BATCH(batch, GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
2410     OUT_BATCH(batch, 0);
2411     ADVANCE_BATCH(batch);
2412 }
2413
2414 /*
2415  * URB layout on GEN7 
2416  * ----------------------------------------
2417  * | PS Push Constants (8KB) | VS entries |
2418  * ----------------------------------------
2419  */
2420 static void
2421 gen7_emit_urb(VADriverContextP ctx)
2422 {
2423     struct i965_driver_data *i965 = i965_driver_data(ctx);
2424     struct intel_batchbuffer *batch = i965->batch;
2425
2426     BEGIN_BATCH(batch, 2);
2427     OUT_BATCH(batch, GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
2428     OUT_BATCH(batch, 8); /* in 1KBs */
2429     ADVANCE_BATCH(batch);
2430
2431     BEGIN_BATCH(batch, 2);
2432     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
2433     OUT_BATCH(batch, 
2434               (32 << GEN7_URB_ENTRY_NUMBER_SHIFT) | /* at least 32 */
2435               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
2436               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2437    ADVANCE_BATCH(batch);
2438
2439    BEGIN_BATCH(batch, 2);
2440    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
2441    OUT_BATCH(batch,
2442              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2443              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2444    ADVANCE_BATCH(batch);
2445
2446    BEGIN_BATCH(batch, 2);
2447    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
2448    OUT_BATCH(batch,
2449              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2450              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2451    ADVANCE_BATCH(batch);
2452
2453    BEGIN_BATCH(batch, 2);
2454    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
2455    OUT_BATCH(batch,
2456              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
2457              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
2458    ADVANCE_BATCH(batch);
2459 }
2460
2461 static void
2462 gen7_emit_cc_state_pointers(VADriverContextP ctx)
2463 {
2464     struct i965_driver_data *i965 = i965_driver_data(ctx);
2465     struct intel_batchbuffer *batch = i965->batch;
2466     struct i965_render_state *render_state = &i965->render_state;
2467
2468     BEGIN_BATCH(batch, 2);
2469     OUT_BATCH(batch, GEN6_3DSTATE_CC_STATE_POINTERS | (2 - 2));
2470     OUT_RELOC(batch,
2471               render_state->cc.state,
2472               I915_GEM_DOMAIN_INSTRUCTION, 0,
2473               1);
2474     ADVANCE_BATCH(batch);
2475
2476     BEGIN_BATCH(batch, 2);
2477     OUT_BATCH(batch, GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
2478     OUT_RELOC(batch,
2479               render_state->cc.blend,
2480               I915_GEM_DOMAIN_INSTRUCTION, 0,
2481               1);
2482     ADVANCE_BATCH(batch);
2483
2484     BEGIN_BATCH(batch, 2);
2485     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
2486     OUT_RELOC(batch,
2487               render_state->cc.depth_stencil,
2488               I915_GEM_DOMAIN_INSTRUCTION, 0, 
2489               1);
2490     ADVANCE_BATCH(batch);
2491 }
2492
2493 static void
2494 gen7_emit_sampler_state_pointers(VADriverContextP ctx)
2495 {
2496     struct i965_driver_data *i965 = i965_driver_data(ctx);
2497     struct intel_batchbuffer *batch = i965->batch;
2498     struct i965_render_state *render_state = &i965->render_state;
2499
2500     BEGIN_BATCH(batch, 2);
2501     OUT_BATCH(batch, GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
2502     OUT_RELOC(batch,
2503               render_state->wm.sampler,
2504               I915_GEM_DOMAIN_INSTRUCTION, 0,
2505               0);
2506     ADVANCE_BATCH(batch);
2507 }
2508
2509 static void
2510 gen7_emit_binding_table(VADriverContextP ctx)
2511 {
2512     struct i965_driver_data *i965 = i965_driver_data(ctx);
2513     struct intel_batchbuffer *batch = i965->batch;
2514
2515     BEGIN_BATCH(batch, 2);
2516     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
2517     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
2518     ADVANCE_BATCH(batch);
2519 }
2520
2521 static void
2522 gen7_emit_depth_buffer_state(VADriverContextP ctx)
2523 {
2524     struct i965_driver_data *i965 = i965_driver_data(ctx);
2525     struct intel_batchbuffer *batch = i965->batch;
2526
2527     BEGIN_BATCH(batch, 7);
2528     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
2529     OUT_BATCH(batch,
2530               (I965_DEPTHFORMAT_D32_FLOAT << 18) |
2531               (I965_SURFACE_NULL << 29));
2532     OUT_BATCH(batch, 0);
2533     OUT_BATCH(batch, 0);
2534     OUT_BATCH(batch, 0);
2535     OUT_BATCH(batch, 0);
2536     OUT_BATCH(batch, 0);
2537     ADVANCE_BATCH(batch);
2538
2539     BEGIN_BATCH(batch, 3);
2540     OUT_BATCH(batch, GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
2541     OUT_BATCH(batch, 0);
2542     OUT_BATCH(batch, 0);
2543     ADVANCE_BATCH(batch);
2544 }
2545
2546 static void
2547 gen7_emit_drawing_rectangle(VADriverContextP ctx)
2548 {
2549     i965_render_drawing_rectangle(ctx);
2550 }
2551
2552 static void 
2553 gen7_emit_vs_state(VADriverContextP ctx)
2554 {
2555     struct i965_driver_data *i965 = i965_driver_data(ctx);
2556     struct intel_batchbuffer *batch = i965->batch;
2557
2558     /* disable VS constant buffer */
2559     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_VS | (7 - 2));
2560     OUT_BATCH(batch, 0);
2561     OUT_BATCH(batch, 0);
2562     OUT_BATCH(batch, 0);
2563     OUT_BATCH(batch, 0);
2564     OUT_BATCH(batch, 0);
2565     OUT_BATCH(batch, 0);
2566         
2567     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
2568     OUT_BATCH(batch, 0); /* without VS kernel */
2569     OUT_BATCH(batch, 0);
2570     OUT_BATCH(batch, 0);
2571     OUT_BATCH(batch, 0);
2572     OUT_BATCH(batch, 0); /* pass-through */
2573 }
2574
2575 static void 
2576 gen7_emit_bypass_state(VADriverContextP ctx)
2577 {
2578     struct i965_driver_data *i965 = i965_driver_data(ctx);
2579     struct intel_batchbuffer *batch = i965->batch;
2580
2581     /* bypass GS */
2582     BEGIN_BATCH(batch, 7);
2583     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_GS | (7 - 2));
2584     OUT_BATCH(batch, 0);
2585     OUT_BATCH(batch, 0);
2586     OUT_BATCH(batch, 0);
2587     OUT_BATCH(batch, 0);
2588     OUT_BATCH(batch, 0);
2589     OUT_BATCH(batch, 0);
2590     ADVANCE_BATCH(batch);
2591
2592     BEGIN_BATCH(batch, 7);      
2593     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
2594     OUT_BATCH(batch, 0); /* without GS kernel */
2595     OUT_BATCH(batch, 0);
2596     OUT_BATCH(batch, 0);
2597     OUT_BATCH(batch, 0);
2598     OUT_BATCH(batch, 0);
2599     OUT_BATCH(batch, 0); /* pass-through */
2600     ADVANCE_BATCH(batch);
2601
2602     BEGIN_BATCH(batch, 2);
2603     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_GS | (2 - 2));
2604     OUT_BATCH(batch, 0);
2605     ADVANCE_BATCH(batch);
2606
2607     /* disable HS */
2608     BEGIN_BATCH(batch, 7);
2609     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_HS | (7 - 2));
2610     OUT_BATCH(batch, 0);
2611     OUT_BATCH(batch, 0);
2612     OUT_BATCH(batch, 0);
2613     OUT_BATCH(batch, 0);
2614     OUT_BATCH(batch, 0);
2615     OUT_BATCH(batch, 0);
2616     ADVANCE_BATCH(batch);
2617
2618     BEGIN_BATCH(batch, 7);
2619     OUT_BATCH(batch, GEN7_3DSTATE_HS | (7 - 2));
2620     OUT_BATCH(batch, 0);
2621     OUT_BATCH(batch, 0);
2622     OUT_BATCH(batch, 0);
2623     OUT_BATCH(batch, 0);
2624     OUT_BATCH(batch, 0);
2625     OUT_BATCH(batch, 0);
2626     ADVANCE_BATCH(batch);
2627
2628     BEGIN_BATCH(batch, 2);
2629     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_HS | (2 - 2));
2630     OUT_BATCH(batch, 0);
2631     ADVANCE_BATCH(batch);
2632
2633     /* Disable TE */
2634     BEGIN_BATCH(batch, 4);
2635     OUT_BATCH(batch, GEN7_3DSTATE_TE | (4 - 2));
2636     OUT_BATCH(batch, 0);
2637     OUT_BATCH(batch, 0);
2638     OUT_BATCH(batch, 0);
2639     ADVANCE_BATCH(batch);
2640
2641     /* Disable DS */
2642     BEGIN_BATCH(batch, 7);
2643     OUT_BATCH(batch, GEN7_3DSTATE_CONSTANT_DS | (7 - 2));
2644     OUT_BATCH(batch, 0);
2645     OUT_BATCH(batch, 0);
2646     OUT_BATCH(batch, 0);
2647     OUT_BATCH(batch, 0);
2648     OUT_BATCH(batch, 0);
2649     OUT_BATCH(batch, 0);
2650     ADVANCE_BATCH(batch);
2651
2652     BEGIN_BATCH(batch, 6);
2653     OUT_BATCH(batch, GEN7_3DSTATE_DS | (6 - 2));
2654     OUT_BATCH(batch, 0);
2655     OUT_BATCH(batch, 0);
2656     OUT_BATCH(batch, 0);
2657     OUT_BATCH(batch, 0);
2658     OUT_BATCH(batch, 0);
2659     ADVANCE_BATCH(batch);
2660
2661     BEGIN_BATCH(batch, 2);
2662     OUT_BATCH(batch, GEN7_3DSTATE_BINDING_TABLE_POINTERS_DS | (2 - 2));
2663     OUT_BATCH(batch, 0);
2664     ADVANCE_BATCH(batch);
2665
2666     /* Disable STREAMOUT */
2667     BEGIN_BATCH(batch, 3);
2668     OUT_BATCH(batch, GEN7_3DSTATE_STREAMOUT | (3 - 2));
2669     OUT_BATCH(batch, 0);
2670     OUT_BATCH(batch, 0);
2671     ADVANCE_BATCH(batch);
2672 }
2673
2674 static void 
2675 gen7_emit_clip_state(VADriverContextP ctx)
2676 {
2677     struct i965_driver_data *i965 = i965_driver_data(ctx);
2678     struct intel_batchbuffer *batch = i965->batch;
2679
2680     OUT_BATCH(batch, GEN6_3DSTATE_CLIP | (4 - 2));
2681     OUT_BATCH(batch, 0);
2682     OUT_BATCH(batch, 0); /* pass-through */
2683     OUT_BATCH(batch, 0);
2684 }
2685
2686 static void 
2687 gen7_emit_sf_state(VADriverContextP ctx)
2688 {
2689     struct i965_driver_data *i965 = i965_driver_data(ctx);
2690     struct intel_batchbuffer *batch = i965->batch;
2691
2692     BEGIN_BATCH(batch, 14);
2693     OUT_BATCH(batch, GEN7_3DSTATE_SBE | (14 - 2));
2694     OUT_BATCH(batch,
2695               (1 << GEN7_SBE_NUM_OUTPUTS_SHIFT) |
2696               (1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT) |
2697               (0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT));
2698     OUT_BATCH(batch, 0);
2699     OUT_BATCH(batch, 0);
2700     OUT_BATCH(batch, 0); /* DW4 */
2701     OUT_BATCH(batch, 0);
2702     OUT_BATCH(batch, 0);
2703     OUT_BATCH(batch, 0);
2704     OUT_BATCH(batch, 0);
2705     OUT_BATCH(batch, 0); /* DW9 */
2706     OUT_BATCH(batch, 0);
2707     OUT_BATCH(batch, 0);
2708     OUT_BATCH(batch, 0);
2709     OUT_BATCH(batch, 0);
2710     ADVANCE_BATCH(batch);
2711
2712     BEGIN_BATCH(batch, 7);
2713     OUT_BATCH(batch, GEN6_3DSTATE_SF | (7 - 2));
2714     OUT_BATCH(batch, 0);
2715     OUT_BATCH(batch, GEN6_3DSTATE_SF_CULL_NONE);
2716     OUT_BATCH(batch, 2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
2717     OUT_BATCH(batch, 0);
2718     OUT_BATCH(batch, 0);
2719     OUT_BATCH(batch, 0);
2720     ADVANCE_BATCH(batch);
2721 }
2722
2723 static void 
2724 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
2725 {
2726     struct i965_driver_data *i965 = i965_driver_data(ctx);
2727     struct intel_batchbuffer *batch = i965->batch;
2728     struct i965_render_state *render_state = &i965->render_state;
2729
2730     BEGIN_BATCH(batch, 3);
2731     OUT_BATCH(batch, GEN6_3DSTATE_WM | (3 - 2));
2732     OUT_BATCH(batch,
2733               GEN7_WM_DISPATCH_ENABLE |
2734               GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
2735     OUT_BATCH(batch, 0);
2736     ADVANCE_BATCH(batch);
2737
2738     BEGIN_BATCH(batch, 7);
2739     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
2740     OUT_BATCH(batch, 1);
2741     OUT_BATCH(batch, 0);
2742     OUT_RELOC(batch, 
2743               render_state->curbe.bo,
2744               I915_GEM_DOMAIN_INSTRUCTION, 0,
2745               0);
2746     OUT_BATCH(batch, 0);
2747     OUT_BATCH(batch, 0);
2748     OUT_BATCH(batch, 0);
2749     ADVANCE_BATCH(batch);
2750
2751     BEGIN_BATCH(batch, 8);
2752     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
2753     OUT_RELOC(batch, 
2754               render_state->render_kernels[kernel].bo,
2755               I915_GEM_DOMAIN_INSTRUCTION, 0,
2756               0);
2757     OUT_BATCH(batch, 
2758               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
2759               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
2760     OUT_BATCH(batch, 0); /* scratch space base offset */
2761     OUT_BATCH(batch, 
2762               ((86 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
2763               GEN7_PS_PUSH_CONSTANT_ENABLE |
2764               GEN7_PS_ATTRIBUTE_ENABLE |
2765               GEN7_PS_16_DISPATCH_ENABLE);
2766     OUT_BATCH(batch, 
2767               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
2768     OUT_BATCH(batch, 0); /* kernel 1 pointer */
2769     OUT_BATCH(batch, 0); /* kernel 2 pointer */
2770     ADVANCE_BATCH(batch);
2771 }
2772
2773 static void
2774 gen7_emit_vertex_element_state(VADriverContextP ctx)
2775 {
2776     struct i965_driver_data *i965 = i965_driver_data(ctx);
2777     struct intel_batchbuffer *batch = i965->batch;
2778
2779     /* Set up our vertex elements, sourced from the single vertex buffer. */
2780     OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | (5 - 2));
2781     /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
2782     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2783               GEN6_VE0_VALID |
2784               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2785               (0 << VE0_OFFSET_SHIFT));
2786     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
2787               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2788               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2789               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2790     /* offset 8: S0, T0 -> {S0, T0, 1.0, 1.0} */
2791     OUT_BATCH(batch, (0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT) |
2792               GEN6_VE0_VALID |
2793               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
2794               (8 << VE0_OFFSET_SHIFT));
2795     OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
2796               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
2797               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
2798               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
2799 }
2800
2801 static void
2802 gen7_emit_vertices(VADriverContextP ctx)
2803 {
2804     struct i965_driver_data *i965 = i965_driver_data(ctx);
2805     struct intel_batchbuffer *batch = i965->batch;
2806     struct i965_render_state *render_state = &i965->render_state;
2807
2808     BEGIN_BATCH(batch, 5);
2809     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
2810     OUT_BATCH(batch, 
2811               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
2812               GEN6_VB0_VERTEXDATA |
2813               GEN7_VB0_ADDRESS_MODIFYENABLE |
2814               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
2815     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
2816     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
2817     OUT_BATCH(batch, 0);
2818     ADVANCE_BATCH(batch);
2819
2820     BEGIN_BATCH(batch, 7);
2821     OUT_BATCH(batch, CMD_3DPRIMITIVE | (7 - 2));
2822     OUT_BATCH(batch,
2823               _3DPRIM_RECTLIST |
2824               GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
2825     OUT_BATCH(batch, 3); /* vertex count per instance */
2826     OUT_BATCH(batch, 0); /* start vertex offset */
2827     OUT_BATCH(batch, 1); /* single instance */
2828     OUT_BATCH(batch, 0); /* start instance location */
2829     OUT_BATCH(batch, 0);
2830     ADVANCE_BATCH(batch);
2831 }
2832
2833 static void
2834 gen7_render_emit_states(VADriverContextP ctx, int kernel)
2835 {
2836     struct i965_driver_data *i965 = i965_driver_data(ctx);
2837     struct intel_batchbuffer *batch = i965->batch;
2838
2839     intel_batchbuffer_start_atomic(batch, 0x1000);
2840     intel_batchbuffer_emit_mi_flush(batch);
2841     gen7_emit_invarient_states(ctx);
2842     gen7_emit_state_base_address(ctx);
2843     gen7_emit_viewport_state_pointers(ctx);
2844     gen7_emit_urb(ctx);
2845     gen7_emit_cc_state_pointers(ctx);
2846     gen7_emit_sampler_state_pointers(ctx);
2847     gen7_emit_bypass_state(ctx);
2848     gen7_emit_vs_state(ctx);
2849     gen7_emit_clip_state(ctx);
2850     gen7_emit_sf_state(ctx);
2851     gen7_emit_wm_state(ctx, kernel);
2852     gen7_emit_binding_table(ctx);
2853     gen7_emit_depth_buffer_state(ctx);
2854     gen7_emit_drawing_rectangle(ctx);
2855     gen7_emit_vertex_element_state(ctx);
2856     gen7_emit_vertices(ctx);
2857     intel_batchbuffer_end_atomic(batch);
2858 }
2859
2860 static void
2861 gen7_render_put_surface(
2862     VADriverContextP   ctx,
2863     VASurfaceID        surface,
2864     const VARectangle *src_rect,
2865     const VARectangle *dst_rect,
2866     unsigned int       flags
2867 )
2868 {
2869     struct i965_driver_data *i965 = i965_driver_data(ctx);
2870     struct intel_batchbuffer *batch = i965->batch;
2871
2872     gen7_render_initialize(ctx);
2873     gen7_render_setup_states(ctx, surface, src_rect, dst_rect, flags);
2874     i965_clear_dest_region(ctx);
2875     gen7_render_emit_states(ctx, PS_KERNEL);
2876     intel_batchbuffer_flush(batch);
2877 }
2878
2879 static void
2880 gen7_subpicture_render_blend_state(VADriverContextP ctx)
2881 {
2882     struct i965_driver_data *i965 = i965_driver_data(ctx);
2883     struct i965_render_state *render_state = &i965->render_state;
2884     struct gen6_blend_state *blend_state;
2885
2886     dri_bo_unmap(render_state->cc.state);    
2887     dri_bo_map(render_state->cc.blend, 1);
2888     assert(render_state->cc.blend->virtual);
2889     blend_state = render_state->cc.blend->virtual;
2890     memset(blend_state, 0, sizeof(*blend_state));
2891     blend_state->blend0.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
2892     blend_state->blend0.source_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
2893     blend_state->blend0.blend_func = I965_BLENDFUNCTION_ADD;
2894     blend_state->blend0.blend_enable = 1;
2895     blend_state->blend1.post_blend_clamp_enable = 1;
2896     blend_state->blend1.pre_blend_clamp_enable = 1;
2897     blend_state->blend1.clamp_range = 0; /* clamp range [0, 1] */
2898     dri_bo_unmap(render_state->cc.blend);
2899 }
2900
2901 static void
2902 gen7_subpicture_render_setup_states(
2903     VADriverContextP   ctx,
2904     VASurfaceID        surface,
2905     const VARectangle *src_rect,
2906     const VARectangle *dst_rect
2907 )
2908 {
2909     i965_render_dest_surface_state(ctx, 0);
2910     i965_subpic_render_src_surfaces_state(ctx, surface);
2911     i965_render_sampler(ctx);
2912     i965_render_cc_viewport(ctx);
2913     gen7_render_color_calc_state(ctx);
2914     gen7_subpicture_render_blend_state(ctx);
2915     gen7_render_depth_stencil_state(ctx);
2916     i965_subpic_render_upload_vertex(ctx, surface, dst_rect);
2917 }
2918
2919 static void
2920 gen7_render_put_subpicture(
2921     VADriverContextP   ctx,
2922     VASurfaceID        surface,
2923     const VARectangle *src_rect,
2924     const VARectangle *dst_rect
2925 )
2926 {
2927     struct i965_driver_data *i965 = i965_driver_data(ctx);
2928     struct intel_batchbuffer *batch = i965->batch;
2929     struct object_surface *obj_surface = SURFACE(surface);
2930     struct object_subpic *obj_subpic = SUBPIC(obj_surface->subpic);
2931
2932     assert(obj_subpic);
2933     gen7_render_initialize(ctx);
2934     gen7_subpicture_render_setup_states(ctx, surface, src_rect, dst_rect);
2935     gen7_render_emit_states(ctx, PS_SUBPIC_KERNEL);
2936     i965_render_upload_image_palette(ctx, obj_subpic->image, 0xff);
2937     intel_batchbuffer_flush(batch);
2938 }
2939
2940
2941 /*
2942  * global functions
2943  */
2944 VAStatus 
2945 i965_DestroySurfaces(VADriverContextP ctx,
2946                      VASurfaceID *surface_list,
2947                      int num_surfaces);
2948 void
2949 intel_render_put_surface(
2950     VADriverContextP   ctx,
2951     VASurfaceID        surface,
2952     const VARectangle *src_rect,
2953     const VARectangle *dst_rect,
2954     unsigned int       flags
2955 )
2956 {
2957     struct i965_driver_data *i965 = i965_driver_data(ctx);
2958     int has_done_scaling = 0;
2959     VASurfaceID in_surface_id = surface;
2960     VASurfaceID out_surface_id = i965_post_processing(ctx, surface, src_rect, dst_rect, flags, &has_done_scaling);
2961
2962     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
2963
2964     if (out_surface_id != VA_INVALID_ID)
2965         in_surface_id = out_surface_id;
2966
2967     if (IS_GEN7(i965->intel.device_id))
2968         gen7_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2969     else if (IS_GEN6(i965->intel.device_id))
2970         gen6_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2971     else
2972         i965_render_put_surface(ctx, in_surface_id, has_done_scaling ? dst_rect : src_rect, dst_rect, flags);
2973
2974     if (in_surface_id != surface)
2975         i965_DestroySurfaces(ctx, &in_surface_id, 1);
2976 }
2977
2978 void
2979 intel_render_put_subpicture(
2980     VADriverContextP   ctx,
2981     VASurfaceID        surface,
2982     const VARectangle *src_rect,
2983     const VARectangle *dst_rect
2984 )
2985 {
2986     struct i965_driver_data *i965 = i965_driver_data(ctx);
2987
2988     if (IS_GEN7(i965->intel.device_id))
2989         gen7_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2990     else if (IS_GEN6(i965->intel.device_id))
2991         gen6_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2992     else
2993         i965_render_put_subpicture(ctx, surface, src_rect, dst_rect);
2994 }
2995
2996 Bool 
2997 i965_render_init(VADriverContextP ctx)
2998 {
2999     struct i965_driver_data *i965 = i965_driver_data(ctx);
3000     struct i965_render_state *render_state = &i965->render_state;
3001     int i;
3002
3003     /* kernel */
3004     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
3005                                  sizeof(render_kernels_gen5[0])));
3006     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
3007                                  sizeof(render_kernels_gen6[0])));
3008
3009     if (IS_GEN7(i965->intel.device_id))
3010         memcpy(render_state->render_kernels, render_kernels_gen7, sizeof(render_state->render_kernels));
3011     else if (IS_GEN6(i965->intel.device_id))
3012         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
3013     else if (IS_IRONLAKE(i965->intel.device_id))
3014         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
3015     else
3016         memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
3017
3018     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3019         struct i965_kernel *kernel = &render_state->render_kernels[i];
3020
3021         if (!kernel->size)
3022             continue;
3023
3024         kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
3025                                   kernel->name, 
3026                                   kernel->size, 0x1000);
3027         assert(kernel->bo);
3028         dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
3029     }
3030
3031     /* constant buffer */
3032     render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
3033                       "constant buffer",
3034                       4096, 64);
3035     assert(render_state->curbe.bo);
3036
3037     return True;
3038 }
3039
3040 Bool 
3041 i965_render_terminate(VADriverContextP ctx)
3042 {
3043     int i;
3044     struct i965_driver_data *i965 = i965_driver_data(ctx);
3045     struct i965_render_state *render_state = &i965->render_state;
3046
3047     dri_bo_unreference(render_state->curbe.bo);
3048     render_state->curbe.bo = NULL;
3049
3050     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
3051         struct i965_kernel *kernel = &render_state->render_kernels[i];
3052         
3053         dri_bo_unreference(kernel->bo);
3054         kernel->bo = NULL;
3055     }
3056
3057     dri_bo_unreference(render_state->vb.vertex_buffer);
3058     render_state->vb.vertex_buffer = NULL;
3059     dri_bo_unreference(render_state->vs.state);
3060     render_state->vs.state = NULL;
3061     dri_bo_unreference(render_state->sf.state);
3062     render_state->sf.state = NULL;
3063     dri_bo_unreference(render_state->wm.sampler);
3064     render_state->wm.sampler = NULL;
3065     dri_bo_unreference(render_state->wm.state);
3066     render_state->wm.state = NULL;
3067     dri_bo_unreference(render_state->wm.surface_state_binding_table_bo);
3068     dri_bo_unreference(render_state->cc.viewport);
3069     render_state->cc.viewport = NULL;
3070     dri_bo_unreference(render_state->cc.state);
3071     render_state->cc.state = NULL;
3072     dri_bo_unreference(render_state->cc.blend);
3073     render_state->cc.blend = NULL;
3074     dri_bo_unreference(render_state->cc.depth_stencil);
3075     render_state->cc.depth_stencil = NULL;
3076
3077     if (render_state->draw_region) {
3078         dri_bo_unreference(render_state->draw_region->bo);
3079         free(render_state->draw_region);
3080         render_state->draw_region = NULL;
3081     }
3082
3083     return True;
3084 }
3085