OSDN Git Service

Update README.md
[android-x86/hardware-intel-common-vaapi.git] / src / i965_render.c
index 7fa7af7..0f5859a 100644 (file)
@@ -35,6 +35,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <assert.h>
+#include <math.h>
 
 #include <va/va_drmcommon.h>
 
 #include "i965_defines.h"
 #include "i965_drv_video.h"
 #include "i965_structs.h"
+#include "i965_yuv_coefs.h"
 
 #include "i965_render.h"
+#include "i965_post_processing.h"
 
 #define SF_KERNEL_NUM_GRF       16
 #define SF_MAX_THREADS          1
 
-static const uint32_t sf_kernel_static[][4] = 
-{
+static const uint32_t sf_kernel_static[][4] = {
 #include "shaders/render/exa_sf.g4b"
 };
 
-#define PS_KERNEL_NUM_GRF       32
+#define PS_KERNEL_NUM_GRF       48
 #define PS_MAX_THREADS          32
 
-#define I965_GRF_BLOCKS(nreg)  ((nreg + 15) / 16 - 1)
+#define I965_GRF_BLOCKS(nreg)   ((nreg + 15) / 16 - 1)
 
-static const uint32_t ps_kernel_static[][4] = 
-{
+static const uint32_t ps_kernel_static[][4] = {
 #include "shaders/render/exa_wm_xy.g4b"
 #include "shaders/render/exa_wm_src_affine.g4b"
 #include "shaders/render/exa_wm_src_sample_planar.g4b"
+#include "shaders/render/exa_wm_yuv_color_balance.g4b"
 #include "shaders/render/exa_wm_yuv_rgb.g4b"
 #include "shaders/render/exa_wm_write.g4b"
 };
-static const uint32_t ps_subpic_kernel_static[][4] = 
-{
+static const uint32_t ps_subpic_kernel_static[][4] = {
 #include "shaders/render/exa_wm_xy.g4b"
 #include "shaders/render/exa_wm_src_affine.g4b"
 #include "shaders/render/exa_wm_src_sample_argb.g4b"
@@ -76,21 +77,19 @@ static const uint32_t ps_subpic_kernel_static[][4] =
 };
 
 /* On IRONLAKE */
-static const uint32_t sf_kernel_static_gen5[][4] = 
-{
+static const uint32_t sf_kernel_static_gen5[][4] = {
 #include "shaders/render/exa_sf.g4b.gen5"
 };
 
-static const uint32_t ps_kernel_static_gen5[][4] = 
-{
+static const uint32_t ps_kernel_static_gen5[][4] = {
 #include "shaders/render/exa_wm_xy.g4b.gen5"
 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
 #include "shaders/render/exa_wm_src_sample_planar.g4b.gen5"
+#include "shaders/render/exa_wm_yuv_color_balance.g4b.gen5"
 #include "shaders/render/exa_wm_yuv_rgb.g4b.gen5"
 #include "shaders/render/exa_wm_write.g4b.gen5"
 };
-static const uint32_t ps_subpic_kernel_static_gen5[][4] = 
-{
+static const uint32_t ps_subpic_kernel_static_gen5[][4] = {
 #include "shaders/render/exa_wm_xy.g4b.gen5"
 #include "shaders/render/exa_wm_src_affine.g4b.gen5"
 #include "shaders/render/exa_wm_src_sample_argb.g4b.gen5"
@@ -98,13 +97,13 @@ static const uint32_t ps_subpic_kernel_static_gen5[][4] =
 };
 
 /* programs for Sandybridge */
-static const uint32_t sf_kernel_static_gen6[][4] = 
-{
+static const uint32_t sf_kernel_static_gen6[][4] = {
 };
 
 static const uint32_t ps_kernel_static_gen6[][4] = {
 #include "shaders/render/exa_wm_src_affine.g6b"
 #include "shaders/render/exa_wm_src_sample_planar.g6b"
+#include "shaders/render/exa_wm_yuv_color_balance.g6b"
 #include "shaders/render/exa_wm_yuv_rgb.g6b"
 #include "shaders/render/exa_wm_write.g6b"
 };
@@ -116,13 +115,13 @@ static const uint32_t ps_subpic_kernel_static_gen6[][4] = {
 };
 
 /* programs for Ivybridge */
-static const uint32_t sf_kernel_static_gen7[][4] = 
-{
+static const uint32_t sf_kernel_static_gen7[][4] = {
 };
 
 static const uint32_t ps_kernel_static_gen7[][4] = {
 #include "shaders/render/exa_wm_src_affine.g7b"
 #include "shaders/render/exa_wm_src_sample_planar.g7b"
+#include "shaders/render/exa_wm_yuv_color_balance.g7b"
 #include "shaders/render/exa_wm_yuv_rgb.g7b"
 #include "shaders/render/exa_wm_write.g7b"
 };
@@ -137,20 +136,21 @@ static const uint32_t ps_subpic_kernel_static_gen7[][4] = {
 static const uint32_t ps_kernel_static_gen7_haswell[][4] = {
 #include "shaders/render/exa_wm_src_affine.g7b"
 #include "shaders/render/exa_wm_src_sample_planar.g7b.haswell"
+#include "shaders/render/exa_wm_yuv_color_balance.g7b.haswell"
 #include "shaders/render/exa_wm_yuv_rgb.g7b"
 #include "shaders/render/exa_wm_write.g7b"
 };
 
-#define SURFACE_STATE_PADDED_SIZE_I965  ALIGN(sizeof(struct i965_surface_state), 32)
-#define SURFACE_STATE_PADDED_SIZE_GEN7  ALIGN(sizeof(struct gen7_surface_state), 32)
-#define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_I965, SURFACE_STATE_PADDED_SIZE_GEN7)
+
+#define SURFACE_STATE_PADDED_SIZE       MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7)
+
 #define SURFACE_STATE_OFFSET(index)     (SURFACE_STATE_PADDED_SIZE * index)
 #define BINDING_TABLE_OFFSET            SURFACE_STATE_OFFSET(MAX_RENDER_SURFACES)
 
-static uint32_t float_to_uint (float f) 
+static uint32_t float_to_uint(float f)
 {
     union {
-        uint32_t i; 
+        uint32_t i;
         float f;
     } x;
 
@@ -158,8 +158,7 @@ static uint32_t float_to_uint (float f)
     return x.i;
 }
 
-enum 
-{
+enum {
     SF_KERNEL = 0,
     PS_KERNEL,
     PS_SUBPIC_KERNEL
@@ -290,20 +289,20 @@ static struct i965_kernel render_kernels_gen7_haswell[] = {
     }
 };
 
-#define URB_VS_ENTRIES       8
+#define URB_VS_ENTRIES        8
 #define URB_VS_ENTRY_SIZE     1
 
-#define URB_GS_ENTRIES       0
+#define URB_GS_ENTRIES        0
 #define URB_GS_ENTRY_SIZE     0
 
 #define URB_CLIP_ENTRIES      0
 #define URB_CLIP_ENTRY_SIZE   0
 
-#define URB_SF_ENTRIES       1
+#define URB_SF_ENTRIES        1
 #define URB_SF_ENTRY_SIZE     2
 
-#define URB_CS_ENTRIES       1
-#define URB_CS_ENTRY_SIZE     1
+#define URB_CS_ENTRIES        4
+#define URB_CS_ENTRY_SIZE     4
 
 static void
 i965_render_vs_unit(VADriverContextP ctx)
@@ -317,7 +316,7 @@ i965_render_vs_unit(VADriverContextP ctx)
     vs_state = render_state->vs.state->virtual;
     memset(vs_state, 0, sizeof(*vs_state));
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
     else
         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
@@ -325,7 +324,7 @@ i965_render_vs_unit(VADriverContextP ctx)
     vs_state->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
     vs_state->vs6.vs_enable = 0;
     vs_state->vs6.vert_cache_disable = 1;
-    
+
     dri_bo_unmap(render_state->vs.state);
 }
 
@@ -386,14 +385,14 @@ i965_render_sf_unit(VADriverContextP ctx)
     dri_bo_unmap(render_state->sf.state);
 }
 
-static void 
+static void
 i965_render_sampler(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct i965_sampler_state *sampler_state;
     int i;
-    
+
     assert(render_state->wm.sampler_count > 0);
     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
 
@@ -431,7 +430,7 @@ i965_subpic_render_wm_unit(VADriverContextP ctx)
 
     wm_state->thread1.single_program_flow = 1; /* XXX */
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
     else
         wm_state->thread1.binding_table_entry_count = 7;
@@ -439,22 +438,22 @@ i965_subpic_render_wm_unit(VADriverContextP ctx)
     wm_state->thread2.scratch_space_base_pointer = 0;
     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
 
-    wm_state->thread3.dispatch_grf_start_reg = 3; /* XXX */
-    wm_state->thread3.const_urb_entry_read_length = 0;
+    wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
+    wm_state->thread3.const_urb_entry_read_length = 4;
     wm_state->thread3.const_urb_entry_read_offset = 0;
     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
 
     wm_state->wm4.stats_enable = 0;
-    wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
+    wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
     } else {
         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
     }
 
-    wm_state->wm5.max_threads = render_state->max_wm_threads - 1;
+    wm_state->wm5.max_threads = i965->intel.device_info->max_wm_threads - 1;
     wm_state->wm5.thread_dispatch_enable = 1;
     wm_state->wm5.enable_16_pix = 1;
     wm_state->wm5.enable_8_pix = 0;
@@ -495,7 +494,7 @@ i965_render_wm_unit(VADriverContextP ctx)
 
     wm_state->thread1.single_program_flow = 1; /* XXX */
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
     else
         wm_state->thread1.binding_table_entry_count = 7;
@@ -504,21 +503,21 @@ i965_render_wm_unit(VADriverContextP ctx)
     wm_state->thread2.per_thread_scratch_space = 0; /* 1024 bytes */
 
     wm_state->thread3.dispatch_grf_start_reg = 2; /* XXX */
-    wm_state->thread3.const_urb_entry_read_length = 1;
+    wm_state->thread3.const_urb_entry_read_length = 4;
     wm_state->thread3.const_urb_entry_read_offset = 0;
     wm_state->thread3.urb_entry_read_length = 1; /* XXX */
     wm_state->thread3.urb_entry_read_offset = 0; /* XXX */
 
     wm_state->wm4.stats_enable = 0;
-    wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
+    wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
     } else {
         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
     }
 
-    wm_state->wm5.max_threads = render_state->max_wm_threads - 1;
+    wm_state->wm5.max_threads = i965->intel.device_info->max_wm_threads - 1;
     wm_state->wm5.thread_dispatch_enable = 1;
     wm_state->wm5.enable_16_pix = 1;
     wm_state->wm5.enable_8_pix = 0;
@@ -539,7 +538,7 @@ i965_render_wm_unit(VADriverContextP ctx)
     dri_bo_unmap(render_state->wm.state);
 }
 
-static void 
+static void
 i965_render_cc_viewport(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -550,14 +549,14 @@ i965_render_cc_viewport(VADriverContextP ctx)
     assert(render_state->cc.viewport->virtual);
     cc_viewport = render_state->cc.viewport->virtual;
     memset(cc_viewport, 0, sizeof(*cc_viewport));
-    
+
     cc_viewport->min_depth = -1.e35;
     cc_viewport->max_depth = 1.e35;
 
     dri_bo_unmap(render_state->cc.viewport);
 }
 
-static void 
+static void
 i965_subpic_render_cc_unit(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -588,16 +587,16 @@ i965_subpic_render_cc_unit(VADriverContextP ctx)
     cc_state->cc5.ia_src_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
     cc_state->cc5.ia_dest_blend_factor = I965_BLENDFACTOR_DST_ALPHA;
 
-    cc_state->cc6.clamp_post_alpha_blend = 0; 
-    cc_state->cc6.clamp_pre_alpha_blend  =0; 
-    
+    cc_state->cc6.clamp_post_alpha_blend = 0;
+    cc_state->cc6.clamp_pre_alpha_blend  = 0;
+
     /*final color = src_color*src_blend_factor +/- dst_color*dest_color_blend_factor*/
     cc_state->cc6.blend_function = I965_BLENDFUNCTION_ADD;
     cc_state->cc6.src_blend_factor = I965_BLENDFACTOR_SRC_ALPHA;
     cc_state->cc6.dest_blend_factor = I965_BLENDFACTOR_INV_SRC_ALPHA;
-   
+
     /*alpha test reference*/
-    cc_state->cc7.alpha_ref.f =0.0 ;
+    cc_state->cc7.alpha_ref.f = 0.0 ;
 
 
     dri_bo_emit_reloc(render_state->cc.state,
@@ -610,7 +609,7 @@ i965_subpic_render_cc_unit(VADriverContextP ctx)
 }
 
 
-static void 
+static void
 i965_render_cc_unit(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -684,11 +683,11 @@ i965_render_set_surface_state(
 
     memset(ss, 0, sizeof(*ss));
 
-    switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
-    case I965_PP_FLAG_BOTTOM_FIELD:
+    switch (flags & (VA_TOP_FIELD | VA_BOTTOM_FIELD)) {
+    case VA_BOTTOM_FIELD:
         ss->ss0.vert_line_stride_ofs = 1;
         /* fall-through */
-    case I965_PP_FLAG_TOP_FIELD:
+    case VA_TOP_FIELD:
         ss->ss0.vert_line_stride = 1;
         height /= 2;
         break;
@@ -712,20 +711,20 @@ i965_render_set_surface_state(
 static void
 gen7_render_set_surface_tiling(struct gen7_surface_state *ss, uint32_t tiling)
 {
-   switch (tiling) {
-   case I915_TILING_NONE:
-      ss->ss0.tiled_surface = 0;
-      ss->ss0.tile_walk = 0;
-      break;
-   case I915_TILING_X:
-      ss->ss0.tiled_surface = 1;
-      ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
-      break;
-   case I915_TILING_Y:
-      ss->ss0.tiled_surface = 1;
-      ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
-      break;
-   }
+    switch (tiling) {
+    case I915_TILING_NONE:
+        ss->ss0.tiled_surface = 0;
+        ss->ss0.tile_walk = 0;
+        break;
+    case I915_TILING_X:
+        ss->ss0.tiled_surface = 1;
+        ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
+        break;
+    case I915_TILING_Y:
+        ss->ss0.tiled_surface = 1;
+        ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
+        break;
+    }
 }
 
 /* Set "Shader Channel Select" */
@@ -755,11 +754,11 @@ gen7_render_set_surface_state(
 
     memset(ss, 0, sizeof(*ss));
 
-    switch (flags & (I965_PP_FLAG_TOP_FIELD|I965_PP_FLAG_BOTTOM_FIELD)) {
-    case I965_PP_FLAG_BOTTOM_FIELD:
+    switch (flags & (VA_TOP_FIELD | VA_BOTTOM_FIELD)) {
+    case VA_BOTTOM_FIELD:
         ss->ss0.vert_line_stride_ofs = 1;
         /* fall-through */
-    case I965_PP_FLAG_TOP_FIELD:
+    case VA_TOP_FIELD:
         ss->ss0.vert_line_stride = 1;
         height /= 2;
         break;
@@ -779,9 +778,10 @@ gen7_render_set_surface_state(
     gen7_render_set_surface_tiling(ss, tiling);
 }
 
+
 static void
 i965_render_src_surface_state(
-    VADriverContextP ctx, 
+    VADriverContextP ctx,
     int              index,
     dri_bo          *region,
     unsigned long    offset,
@@ -792,7 +792,7 @@ i965_render_src_surface_state(
     unsigned int     flags
 )
 {
-    struct i965_driver_data *i965 = i965_driver_data(ctx);  
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     void *ss;
     dri_bo *ss_bo = render_state->wm.surface_state_binding_table_bo;
@@ -803,12 +803,12 @@ i965_render_src_surface_state(
     assert(ss_bo->virtual);
     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
 
-    if (IS_GEN7(i965->intel.device_id)) {
+    if (IS_GEN7(i965->intel.device_info)) {
         gen7_render_set_surface_state(ss,
                                       region, offset,
                                       w, h,
                                       pitch, format, flags);
-        if (IS_HASWELL(i965->intel.device_id))
+        if (IS_HASWELL(i965->intel.device_info))
             gen7_render_set_surface_scs(ss);
         dri_bo_emit_reloc(ss_bo,
                           I915_GEM_DOMAIN_SAMPLER, 0,
@@ -851,7 +851,10 @@ i965_render_src_surfaces_state(
     i965_render_src_surface_state(ctx, 1, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);     /* Y */
     i965_render_src_surface_state(ctx, 2, region, 0, rw, rh, region_pitch, I965_SURFACEFORMAT_R8_UNORM, flags);
 
-    if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2')) {
+    if (obj_surface->fourcc == VA_FOURCC_Y800) /* single plane for grayscale */
+        return;
+
+    if (obj_surface->fourcc == VA_FOURCC_NV12) {
         i965_render_src_surface_state(ctx, 3, region,
                                       region_pitch * obj_surface->y_cb_offset,
                                       obj_surface->cb_cr_width, obj_surface->cb_cr_height, obj_surface->cb_cr_pitch,
@@ -893,14 +896,14 @@ i965_subpic_render_src_surfaces_state(VADriverContextP ctx,
     assert(obj_surface->bo);
     subpic_region = obj_image->bo;
     /*subpicture surface*/
-    i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
-    i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);     
+    i965_render_src_surface_state(ctx, 1, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);
+    i965_render_src_surface_state(ctx, 2, subpic_region, 0, obj_subpic->width, obj_subpic->height, obj_subpic->pitch, obj_subpic->format, 0);
 }
 
 static void
 i965_render_dest_surface_state(VADriverContextP ctx, int index)
 {
-    struct i965_driver_data *i965 = i965_driver_data(ctx);  
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct intel_region *dest_region = render_state->draw_region;
     void *ss;
@@ -909,21 +912,21 @@ i965_render_dest_surface_state(VADriverContextP ctx, int index)
     assert(index < MAX_RENDER_SURFACES);
 
     if (dest_region->cpp == 2) {
-       format = I965_SURFACEFORMAT_B5G6R5_UNORM;
+        format = I965_SURFACEFORMAT_B5G6R5_UNORM;
     } else {
-       format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
+        format = I965_SURFACEFORMAT_B8G8R8A8_UNORM;
     }
 
     dri_bo_map(ss_bo, 1);
     assert(ss_bo->virtual);
     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
 
-    if (IS_GEN7(i965->intel.device_id)) {
+    if (IS_GEN7(i965->intel.device_info)) {
         gen7_render_set_surface_state(ss,
                                       dest_region->bo, 0,
                                       dest_region->width, dest_region->height,
                                       dest_region->pitch, format, 0);
-        if (IS_HASWELL(i965->intel.device_id))
+        if (IS_HASWELL(i965->intel.device_info))
             gen7_render_set_surface_scs(ss);
         dri_bo_emit_reloc(ss_bo,
                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
@@ -986,11 +989,11 @@ i965_fill_vertex_buffer(
     dri_bo_subdata(i965->render_state.vb.vertex_buffer, 0, sizeof(vb), vb);
 }
 
-static void 
+static void
 i965_subpic_render_upload_vertex(VADriverContextP ctx,
                                  struct object_surface *obj_surface,
                                  const VARectangle *output_rect)
-{    
+{
     unsigned int index = obj_surface->subpic_render_idx;
     struct object_subpic     *obj_subpic   = obj_surface->obj_subpic[index];
     float tex_coords[4], vid_coords[4];
@@ -1020,7 +1023,7 @@ i965_subpic_render_upload_vertex(VADriverContextP ctx,
     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
 }
 
-static void 
+static void
 i965_render_upload_vertex(
     VADriverContextP   ctx,
     struct object_surface *obj_surface,
@@ -1050,29 +1053,59 @@ i965_render_upload_vertex(
     i965_fill_vertex_buffer(ctx, tex_coords, vid_coords);
 }
 
+#define PI  3.1415926
+
 static void
 i965_render_upload_constants(VADriverContextP ctx,
-                             struct object_surface *obj_surface)
+                             struct object_surface *obj_surface,
+                             unsigned int flags)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     unsigned short *constant_buffer;
+    float *color_balance_base;
+    float contrast = (float)i965->contrast_attrib->value / DEFAULT_CONTRAST;
+    float brightness = (float)i965->brightness_attrib->value / 255; /* YUV is float in the shader */
+    float hue = (float)i965->hue_attrib->value / 180 * PI;
+    float saturation = (float)i965->saturation_attrib->value / DEFAULT_SATURATION;
+    float *yuv_to_rgb;
+    const float* yuv_coefs;
+    size_t coefs_length;
 
     dri_bo_map(render_state->curbe.bo, 1);
     assert(render_state->curbe.bo->virtual);
     constant_buffer = render_state->curbe.bo->virtual;
 
     if (obj_surface->subsampling == SUBSAMPLE_YUV400) {
-        assert(obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '1') ||
-               obj_surface->fourcc == VA_FOURCC('I', 'M', 'C', '3'));
-        *constant_buffer = 2;
+        assert(obj_surface->fourcc == VA_FOURCC_Y800);
+
+        constant_buffer[0] = 2;
     } else {
-        if (obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'))
-            *constant_buffer = 1;
+        if (obj_surface->fourcc == VA_FOURCC_NV12)
+            constant_buffer[0] = 1;
         else
-            *constant_buffer = 0;
+            constant_buffer[0] = 0;
     }
 
+    if (i965->contrast_attrib->value == DEFAULT_CONTRAST &&
+        i965->brightness_attrib->value == DEFAULT_BRIGHTNESS &&
+        i965->hue_attrib->value == DEFAULT_HUE &&
+        i965->saturation_attrib->value == DEFAULT_SATURATION)
+        constant_buffer[1] = 1; /* skip color balance transformation */
+    else
+        constant_buffer[1] = 0;
+
+    color_balance_base = (float *)constant_buffer + 4;
+    *color_balance_base++ = contrast;
+    *color_balance_base++ = brightness;
+    *color_balance_base++ = cos(hue) * contrast * saturation;
+    *color_balance_base++ = sin(hue) * contrast * saturation;
+
+    yuv_to_rgb = (float *)constant_buffer + 8;
+    yuv_coefs = i915_color_standard_to_coefs(i915_filter_to_color_standard(flags & VA_SRC_COLOR_MASK),
+                                             &coefs_length);
+    memcpy(yuv_to_rgb, yuv_coefs, coefs_length);
+
     dri_bo_unmap(render_state->curbe.bo);
 }
 
@@ -1086,7 +1119,7 @@ i965_subpic_render_upload_constants(VADriverContextP ctx,
     float global_alpha = 1.0;
     unsigned int index = obj_surface->subpic_render_idx;
     struct object_subpic *obj_subpic = obj_surface->obj_subpic[index];
-    
+
     if (obj_subpic->flags & VA_SUBPICTURE_GLOBAL_ALPHA) {
         global_alpha = obj_subpic->global_alpha;
     }
@@ -1099,7 +1132,7 @@ i965_subpic_render_upload_constants(VADriverContextP ctx,
 
     dri_bo_unmap(render_state->curbe.bo);
 }
+
 static void
 i965_surface_render_state_setup(
     VADriverContextP   ctx,
@@ -1118,7 +1151,7 @@ i965_surface_render_state_setup(
     i965_render_cc_viewport(ctx);
     i965_render_cc_unit(ctx);
     i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect);
-    i965_render_upload_constants(ctx, obj_surface);
+    i965_render_upload_constants(ctx, obj_surface, flags);
 }
 
 static void
@@ -1147,7 +1180,7 @@ i965_render_pipeline_select(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct intel_batchbuffer *batch = i965->batch;
+
     BEGIN_BATCH(batch, 1);
     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_3D);
     ADVANCE_BATCH(batch);
@@ -1172,7 +1205,7 @@ i965_render_state_base_address(VADriverContextP ctx)
     struct intel_batchbuffer *batch = i965->batch;
     struct i965_render_state *render_state = &i965->render_state;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         BEGIN_BATCH(batch, 8);
         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
@@ -1211,7 +1244,7 @@ i965_render_binding_table_pointers(VADriverContextP ctx)
     ADVANCE_BATCH(batch);
 }
 
-static void 
+static void
 i965_render_constant_color(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -1267,7 +1300,7 @@ i965_render_urb_layout(VADriverContextP ctx)
     urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
 
     BEGIN_BATCH(batch, 3);
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               CMD_URB_FENCE |
               UF0_CS_REALLOC |
               UF0_SF_REALLOC |
@@ -1275,7 +1308,7 @@ i965_render_urb_layout(VADriverContextP ctx)
               UF0_GS_REALLOC |
               UF0_VS_REALLOC |
               1);
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               ((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
               ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
               ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
@@ -1285,7 +1318,7 @@ i965_render_urb_layout(VADriverContextP ctx)
     ADVANCE_BATCH(batch);
 }
 
-static void 
+static void
 i965_render_cs_urb_layout(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -1311,7 +1344,7 @@ i965_render_constant_buffer(VADriverContextP ctx)
     OUT_RELOC(batch, render_state->curbe.bo,
               I915_GEM_DOMAIN_INSTRUCTION, 0,
               URB_CS_ENTRY_SIZE - 1);
-    ADVANCE_BATCH(batch);    
+    ADVANCE_BATCH(batch);
 }
 
 static void
@@ -1326,7 +1359,7 @@ i965_render_drawing_rectangle(VADriverContextP ctx)
     OUT_BATCH(batch, CMD_DRAWING_RECTANGLE | 2);
     OUT_BATCH(batch, 0x00000000);
     OUT_BATCH(batch, (dest_region->width - 1) | (dest_region->height - 1) << 16);
-    OUT_BATCH(batch, 0x00000000);         
+    OUT_BATCH(batch, 0x00000000);
     ADVANCE_BATCH(batch);
 }
 
@@ -1336,7 +1369,7 @@ i965_render_vertex_elements(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct intel_batchbuffer *batch = i965->batch;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         BEGIN_BATCH(batch, 5);
         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
@@ -1422,20 +1455,20 @@ i965_render_startup(VADriverContextP ctx)
 
     BEGIN_BATCH(batch, 11);
     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               (0 << VB0_BUFFER_INDEX_SHIFT) |
               VB0_VERTEXDATA |
               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
     else
         OUT_BATCH(batch, 3);
 
     OUT_BATCH(batch, 0);
 
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               CMD_3DPRIMITIVE |
               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
@@ -1449,7 +1482,7 @@ i965_render_startup(VADriverContextP ctx)
     ADVANCE_BATCH(batch);
 }
 
-static void 
+static void
 i965_clear_dest_region(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -1478,8 +1511,8 @@ i965_clear_dest_region(VADriverContextP ctx)
 
     br13 |= pitch;
 
-    if (IS_GEN6(i965->intel.device_id) ||
-        IS_GEN7(i965->intel.device_id)) {
+    if (IS_GEN6(i965->intel.device_info) ||
+        IS_GEN7(i965->intel.device_info)) {
         intel_batchbuffer_start_atomic_blt(batch, 24);
         BEGIN_BLT_BATCH(batch, 6);
     } else {
@@ -1492,7 +1525,7 @@ i965_clear_dest_region(VADriverContextP ctx)
     OUT_BATCH(batch, (dest_region->y << 16) | (dest_region->x));
     OUT_BATCH(batch, ((dest_region->y + dest_region->height) << 16) |
               (dest_region->x + dest_region->width));
-    OUT_RELOC(batch, dest_region->bo, 
+    OUT_RELOC(batch, dest_region->bo,
               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
               0);
     OUT_BATCH(batch, 0x0);
@@ -1540,6 +1573,7 @@ i965_subpic_render_pipeline_setup(VADriverContextP ctx)
     i965_render_pipelined_pointers(ctx);
     i965_render_urb_layout(ctx);
     i965_render_cs_urb_layout(ctx);
+    i965_render_constant_buffer(ctx);
     i965_render_drawing_rectangle(ctx);
     i965_render_vertex_elements(ctx);
     i965_render_startup(ctx);
@@ -1547,7 +1581,7 @@ i965_subpic_render_pipeline_setup(VADriverContextP ctx)
 }
 
 
-static void 
+static void
 i965_render_initialize(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -1670,7 +1704,7 @@ i965_render_put_subpicture(
 /*
  * for GEN6+
  */
-static void 
+static void
 gen6_render_initialize(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -1747,7 +1781,7 @@ gen6_render_color_calc_state(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_color_calc_state *color_calc_state;
-    
+
     dri_bo_map(render_state->cc.state, 1);
     assert(render_state->cc.state->virtual);
     color_calc_state = render_state->cc.state->virtual;
@@ -1765,7 +1799,7 @@ gen6_render_blend_state(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_blend_state *blend_state;
-    
+
     dri_bo_map(render_state->cc.blend, 1);
     assert(render_state->cc.blend->virtual);
     blend_state = render_state->cc.blend->virtual;
@@ -1781,7 +1815,7 @@ gen6_render_depth_stencil_state(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_depth_stencil_state *depth_stencil_state;
-    
+
     dri_bo_map(render_state->cc.depth_stencil, 1);
     assert(render_state->cc.depth_stencil->virtual);
     depth_stencil_state = render_state->cc.depth_stencil->virtual;
@@ -1805,7 +1839,7 @@ gen6_render_setup_states(
     gen6_render_color_calc_state(ctx);
     gen6_render_blend_state(ctx);
     gen6_render_depth_stencil_state(ctx);
-    i965_render_upload_constants(ctx, obj_surface);
+    i965_render_upload_constants(ctx, obj_surface, flags);
     i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect);
 }
 
@@ -1902,7 +1936,7 @@ gen6_emit_sampler_state_pointers(VADriverContextP ctx)
               (4 - 2));
     OUT_BATCH(batch, 0); /* VS */
     OUT_BATCH(batch, 0); /* GS */
-    OUT_RELOC(batch,render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
+    OUT_RELOC(batch, render_state->wm.sampler, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
 }
 
 static void
@@ -1915,8 +1949,8 @@ gen6_emit_binding_table(VADriverContextP ctx)
     OUT_BATCH(batch, CMD_BINDING_TABLE_POINTERS |
               GEN6_BINDING_TABLE_MODIFY_PS |
               (4 - 2));
-    OUT_BATCH(batch, 0);               /* vs */
-    OUT_BATCH(batch, 0);               /* gs */
+    OUT_BATCH(batch, 0);        /* vs */
+    OUT_BATCH(batch, 0);        /* gs */
     /* Only the PS uses the binding table */
     OUT_BATCH(batch, BINDING_TABLE_OFFSET);
 }
@@ -1946,7 +1980,7 @@ gen6_emit_drawing_rectangle(VADriverContextP ctx)
     i965_render_drawing_rectangle(ctx);
 }
 
-static void 
+static void
 gen6_emit_vs_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -1958,7 +1992,7 @@ gen6_emit_vs_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
-       
+
     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
     OUT_BATCH(batch, 0); /* without VS kernel */
     OUT_BATCH(batch, 0);
@@ -1967,7 +2001,7 @@ gen6_emit_vs_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0); /* pass-through */
 }
 
-static void 
+static void
 gen6_emit_gs_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -1979,7 +2013,7 @@ gen6_emit_gs_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
-       
+
     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
     OUT_BATCH(batch, 0); /* without GS kernel */
     OUT_BATCH(batch, 0);
@@ -1989,7 +2023,7 @@ gen6_emit_gs_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0); /* pass-through */
 }
 
-static void 
+static void
 gen6_emit_clip_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2001,7 +2035,7 @@ gen6_emit_clip_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0);
 }
 
-static void 
+static void
 gen6_emit_sf_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2031,7 +2065,7 @@ gen6_emit_sf_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0); /* DW19 */
 }
 
-static void 
+static void
 gen6_emit_wm_state(VADriverContextP ctx, int kernel)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2041,10 +2075,10 @@ gen6_emit_wm_state(VADriverContextP ctx, int kernel)
     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS |
               GEN6_3DSTATE_CONSTANT_BUFFER_0_ENABLE |
               (5 - 2));
-    OUT_RELOC(batch, 
+    OUT_RELOC(batch,
               render_state->curbe.bo,
               I915_GEM_DOMAIN_INSTRUCTION, 0,
-              0);
+              (URB_CS_ENTRY_SIZE - 1));
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
@@ -2057,7 +2091,7 @@ gen6_emit_wm_state(VADriverContextP ctx, int kernel)
               (5 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT));
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, (6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT)); /* DW4 */
-    OUT_BATCH(batch, ((render_state->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
+    OUT_BATCH(batch, ((i965->intel.device_info->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT) |
               GEN6_3DSTATE_WM_DISPATCH_ENABLE |
               GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
     OUT_BATCH(batch, (1 << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT) |
@@ -2088,7 +2122,7 @@ gen6_emit_vertex_element_state(VADriverContextP ctx)
               GEN6_VE0_VALID |
               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
               (8 << VE0_OFFSET_SHIFT));
-    OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
+    OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
@@ -2103,7 +2137,7 @@ gen6_emit_vertices(VADriverContextP ctx)
 
     BEGIN_BATCH(batch, 11);
     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | 3);
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
               GEN6_VB0_VERTEXDATA |
               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
@@ -2111,7 +2145,7 @@ gen6_emit_vertices(VADriverContextP ctx)
     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
     OUT_BATCH(batch, 0);
 
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               CMD_3DPRIMITIVE |
               _3DPRIMITIVE_VERTEX_SEQUENTIAL |
               (_3DPRIM_RECTLIST << _3DPRIMITIVE_TOPOLOGY_SHIFT) |
@@ -2178,7 +2212,7 @@ gen6_subpicture_render_blend_state(VADriverContextP ctx)
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_blend_state *blend_state;
 
-    dri_bo_unmap(render_state->cc.state);    
+    dri_bo_unmap(render_state->cc.state);
     dri_bo_map(render_state->cc.blend, 1);
     assert(render_state->cc.blend->virtual);
     blend_state = render_state->cc.blend->virtual;
@@ -2236,7 +2270,7 @@ gen6_render_put_subpicture(
 /*
  * for GEN7
  */
-static void 
+static void
 gen7_render_initialize(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2307,13 +2341,18 @@ gen7_render_initialize(VADriverContextP ctx)
     render_state->cc.depth_stencil = bo;
 }
 
+/*
+ * for GEN8
+ */
+#define ALIGNMENT       64
+
 static void
 gen7_render_color_calc_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_color_calc_state *color_calc_state;
-    
+
     dri_bo_map(render_state->cc.state, 1);
     assert(render_state->cc.state->virtual);
     color_calc_state = render_state->cc.state->virtual;
@@ -2331,7 +2370,7 @@ gen7_render_blend_state(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_blend_state *blend_state;
-    
+
     dri_bo_map(render_state->cc.blend, 1);
     assert(render_state->cc.blend->virtual);
     blend_state = render_state->cc.blend->virtual;
@@ -2348,7 +2387,7 @@ gen7_render_depth_stencil_state(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_depth_stencil_state *depth_stencil_state;
-    
+
     dri_bo_map(render_state->cc.depth_stencil, 1);
     assert(render_state->cc.depth_stencil->virtual);
     depth_stencil_state = render_state->cc.depth_stencil->virtual;
@@ -2356,14 +2395,14 @@ gen7_render_depth_stencil_state(VADriverContextP ctx)
     dri_bo_unmap(render_state->cc.depth_stencil);
 }
 
-static void 
+static void
 gen7_render_sampler(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
     struct gen7_sampler_state *sampler_state;
     int i;
-    
+
     assert(render_state->wm.sampler_count > 0);
     assert(render_state->wm.sampler_count <= MAX_SAMPLERS);
 
@@ -2383,6 +2422,7 @@ gen7_render_sampler(VADriverContextP ctx)
     dri_bo_unmap(render_state->wm.sampler);
 }
 
+
 static void
 gen7_render_setup_states(
     VADriverContextP   ctx,
@@ -2399,10 +2439,11 @@ gen7_render_setup_states(
     gen7_render_color_calc_state(ctx);
     gen7_render_blend_state(ctx);
     gen7_render_depth_stencil_state(ctx);
-    i965_render_upload_constants(ctx, obj_surface);
+    i965_render_upload_constants(ctx, obj_surface, flags);
     i965_render_upload_vertex(ctx, obj_surface, src_rect, dst_rect);
 }
 
+
 static void
 gen7_emit_invarient_states(VADriverContextP ctx)
 {
@@ -2474,7 +2515,7 @@ gen7_emit_viewport_state_pointers(VADriverContextP ctx)
 }
 
 /*
- * URB layout on GEN7 
+ * URB layout on GEN7
  * ----------------------------------------
  * | PS Push Constants (8KB) | VS entries |
  * ----------------------------------------
@@ -2486,7 +2527,7 @@ gen7_emit_urb(VADriverContextP ctx)
     struct intel_batchbuffer *batch = i965->batch;
     unsigned int num_urb_entries = 32;
 
-    if (IS_HASWELL(i965->intel.device_id))
+    if (IS_HASWELL(i965->intel.device_info))
         num_urb_entries = 64;
 
     BEGIN_BATCH(batch, 2);
@@ -2496,32 +2537,32 @@ gen7_emit_urb(VADriverContextP ctx)
 
     BEGIN_BATCH(batch, 2);
     OUT_BATCH(batch, GEN7_3DSTATE_URB_VS | (2 - 2));
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               (num_urb_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
               (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
               (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
-   ADVANCE_BATCH(batch);
+    ADVANCE_BATCH(batch);
 
-   BEGIN_BATCH(batch, 2);
-   OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
-   OUT_BATCH(batch,
-             (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
-             (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
-   ADVANCE_BATCH(batch);
+    BEGIN_BATCH(batch, 2);
+    OUT_BATCH(batch, GEN7_3DSTATE_URB_GS | (2 - 2));
+    OUT_BATCH(batch,
+              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
+              (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
+    ADVANCE_BATCH(batch);
 
-   BEGIN_BATCH(batch, 2);
-   OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
-   OUT_BATCH(batch,
-             (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
-             (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
-   ADVANCE_BATCH(batch);
+    BEGIN_BATCH(batch, 2);
+    OUT_BATCH(batch, GEN7_3DSTATE_URB_HS | (2 - 2));
+    OUT_BATCH(batch,
+              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
+              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
+    ADVANCE_BATCH(batch);
 
-   BEGIN_BATCH(batch, 2);
-   OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
-   OUT_BATCH(batch,
-             (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
-             (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
-   ADVANCE_BATCH(batch);
+    BEGIN_BATCH(batch, 2);
+    OUT_BATCH(batch, GEN7_3DSTATE_URB_DS | (2 - 2));
+    OUT_BATCH(batch,
+              (0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
+              (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
+    ADVANCE_BATCH(batch);
 }
 
 static void
@@ -2551,7 +2592,7 @@ gen7_emit_cc_state_pointers(VADriverContextP ctx)
     OUT_BATCH(batch, GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS | (2 - 2));
     OUT_RELOC(batch,
               render_state->cc.depth_stencil,
-              I915_GEM_DOMAIN_INSTRUCTION, 0, 
+              I915_GEM_DOMAIN_INSTRUCTION, 0,
               1);
     ADVANCE_BATCH(batch);
 }
@@ -2615,7 +2656,7 @@ gen7_emit_drawing_rectangle(VADriverContextP ctx)
     i965_render_drawing_rectangle(ctx);
 }
 
-static void 
+static void
 gen7_emit_vs_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2629,7 +2670,7 @@ gen7_emit_vs_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
     OUT_BATCH(batch, 0);
-       
+
     OUT_BATCH(batch, GEN6_3DSTATE_VS | (6 - 2));
     OUT_BATCH(batch, 0); /* without VS kernel */
     OUT_BATCH(batch, 0);
@@ -2638,7 +2679,7 @@ gen7_emit_vs_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0); /* pass-through */
 }
 
-static void 
+static void
 gen7_emit_bypass_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2655,7 +2696,7 @@ gen7_emit_bypass_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0);
     ADVANCE_BATCH(batch);
 
-    BEGIN_BATCH(batch, 7);     
+    BEGIN_BATCH(batch, 7);
     OUT_BATCH(batch, GEN6_3DSTATE_GS | (7 - 2));
     OUT_BATCH(batch, 0); /* without GS kernel */
     OUT_BATCH(batch, 0);
@@ -2737,7 +2778,7 @@ gen7_emit_bypass_state(VADriverContextP ctx)
     ADVANCE_BATCH(batch);
 }
 
-static void 
+static void
 gen7_emit_clip_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2749,7 +2790,7 @@ gen7_emit_clip_state(VADriverContextP ctx)
     OUT_BATCH(batch, 0);
 }
 
-static void 
+static void
 gen7_emit_sf_state(VADriverContextP ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2786,7 +2827,7 @@ gen7_emit_sf_state(VADriverContextP ctx)
     ADVANCE_BATCH(batch);
 }
 
-static void 
+static void
 gen7_emit_wm_state(VADriverContextP ctx, int kernel)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -2795,7 +2836,7 @@ gen7_emit_wm_state(VADriverContextP ctx, int kernel)
     unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
     unsigned int num_samples = 0;
 
-    if (IS_HASWELL(i965->intel.device_id)) {
+    if (IS_HASWELL(i965->intel.device_info)) {
         max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
         num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
     }
@@ -2810,9 +2851,9 @@ gen7_emit_wm_state(VADriverContextP ctx, int kernel)
 
     BEGIN_BATCH(batch, 7);
     OUT_BATCH(batch, GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
-    OUT_BATCH(batch, 1);
+    OUT_BATCH(batch, URB_CS_ENTRY_SIZE);
     OUT_BATCH(batch, 0);
-    OUT_RELOC(batch, 
+    OUT_RELOC(batch,
               render_state->curbe.bo,
               I915_GEM_DOMAIN_INSTRUCTION, 0,
               0);
@@ -2823,20 +2864,20 @@ gen7_emit_wm_state(VADriverContextP ctx, int kernel)
 
     BEGIN_BATCH(batch, 8);
     OUT_BATCH(batch, GEN7_3DSTATE_PS | (8 - 2));
-    OUT_RELOC(batch, 
+    OUT_RELOC(batch,
               render_state->render_kernels[kernel].bo,
               I915_GEM_DOMAIN_INSTRUCTION, 0,
               0);
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               (1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
               (5 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
     OUT_BATCH(batch, 0); /* scratch space base offset */
-    OUT_BATCH(batch, 
-              ((render_state->max_wm_threads - 1) << max_threads_shift) | num_samples |
+    OUT_BATCH(batch,
+              ((i965->intel.device_info->max_wm_threads - 1) << max_threads_shift) | num_samples |
               GEN7_PS_PUSH_CONSTANT_ENABLE |
               GEN7_PS_ATTRIBUTE_ENABLE |
               GEN7_PS_16_DISPATCH_ENABLE);
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               (6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
     OUT_BATCH(batch, 0); /* kernel 1 pointer */
     OUT_BATCH(batch, 0); /* kernel 2 pointer */
@@ -2865,7 +2906,7 @@ gen7_emit_vertex_element_state(VADriverContextP ctx)
               GEN6_VE0_VALID |
               (I965_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT) |
               (8 << VE0_OFFSET_SHIFT));
-    OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) | 
+    OUT_BATCH(batch, (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT) |
               (I965_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT) |
               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT) |
               (I965_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT));
@@ -2880,7 +2921,7 @@ gen7_emit_vertices(VADriverContextP ctx)
 
     BEGIN_BATCH(batch, 5);
     OUT_BATCH(batch, CMD_VERTEX_BUFFERS | (5 - 2));
-    OUT_BATCH(batch, 
+    OUT_BATCH(batch,
               (0 << GEN6_VB0_BUFFER_INDEX_SHIFT) |
               GEN6_VB0_VERTEXDATA |
               GEN7_VB0_ADDRESS_MODIFYENABLE |
@@ -2930,10 +2971,11 @@ gen7_render_emit_states(VADriverContextP ctx, int kernel)
     intel_batchbuffer_end_atomic(batch);
 }
 
+
 static void
 gen7_render_put_surface(
     VADriverContextP   ctx,
-    struct object_surface *obj_surface,    
+    struct object_surface *obj_surface,
     const VARectangle *src_rect,
     const VARectangle *dst_rect,
     unsigned int       flags
@@ -2949,6 +2991,7 @@ gen7_render_put_surface(
     intel_batchbuffer_flush(batch);
 }
 
+
 static void
 gen7_subpicture_render_blend_state(VADriverContextP ctx)
 {
@@ -2956,7 +2999,7 @@ gen7_subpicture_render_blend_state(VADriverContextP ctx)
     struct i965_render_state *render_state = &i965->render_state;
     struct gen6_blend_state *blend_state;
 
-    dri_bo_unmap(render_state->cc.state);    
+    dri_bo_unmap(render_state->cc.state);
     dri_bo_map(render_state->cc.blend, 1);
     assert(render_state->cc.blend->virtual);
     blend_state = render_state->cc.blend->virtual;
@@ -3012,13 +3055,6 @@ gen7_render_put_subpicture(
 }
 
 
-/*
- * global functions
- */
-VAStatus 
-i965_DestroySurfaces(VADriverContextP ctx,
-                     VASurfaceID *surface_list,
-                     int num_surfaces);
 void
 intel_render_put_surface(
     VADriverContextP   ctx,
@@ -3029,32 +3065,30 @@ intel_render_put_surface(
 )
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct i965_render_state *render_state = &i965->render_state;
     int has_done_scaling = 0;
+    VARectangle calibrated_rect;
     VASurfaceID out_surface_id = i965_post_processing(ctx,
                                                       obj_surface,
                                                       src_rect,
                                                       dst_rect,
                                                       flags,
-                                                      &has_done_scaling);
+                                                      &has_done_scaling,
+                                                      &calibrated_rect);
 
     assert((!has_done_scaling) || (out_surface_id != VA_INVALID_ID));
 
     if (out_surface_id != VA_INVALID_ID) {
         struct object_surface *new_obj_surface = SURFACE(out_surface_id);
-        
+
         if (new_obj_surface && new_obj_surface->bo)
             obj_surface = new_obj_surface;
 
         if (has_done_scaling)
-            src_rect = dst_rect;
+            src_rect = &calibrated_rect;
     }
 
-    if (IS_GEN7(i965->intel.device_id))
-        gen7_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags);
-    else if (IS_GEN6(i965->intel.device_id))
-        gen6_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags);
-    else
-        i965_render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags);
+    render_state->render_put_surface(ctx, obj_surface, src_rect, dst_rect, flags);
 
     if (out_surface_id != VA_INVALID_ID)
         i965_DestroySurfaces(ctx, &out_surface_id, 1);
@@ -3069,82 +3103,13 @@ intel_render_put_subpicture(
 )
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
-
-    if (IS_GEN7(i965->intel.device_id))
-        gen7_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect);
-    else if (IS_GEN6(i965->intel.device_id))
-        gen6_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect);
-    else
-        i965_render_put_subpicture(ctx, obj_surface, src_rect, dst_rect);
-}
-
-bool 
-i965_render_init(VADriverContextP ctx)
-{
-    struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
-    int i;
-
-    /* kernel */
-    assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) / 
-                                 sizeof(render_kernels_gen5[0])));
-    assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
-                                 sizeof(render_kernels_gen6[0])));
-
-    if (IS_GEN7(i965->intel.device_id))
-        memcpy(render_state->render_kernels,
-               (IS_HASWELL(i965->intel.device_id) ? render_kernels_gen7_haswell : render_kernels_gen7),
-               sizeof(render_state->render_kernels));
-    else if (IS_GEN6(i965->intel.device_id))
-        memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
-    else if (IS_IRONLAKE(i965->intel.device_id))
-        memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
-    else
-        memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
-
-    for (i = 0; i < NUM_RENDER_KERNEL; i++) {
-        struct i965_kernel *kernel = &render_state->render_kernels[i];
-
-        if (!kernel->size)
-            continue;
-
-        kernel->bo = dri_bo_alloc(i965->intel.bufmgr, 
-                                  kernel->name, 
-                                  kernel->size, 0x1000);
-        assert(kernel->bo);
-        dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
-    }
-
-    /* constant buffer */
-    render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
-                      "constant buffer",
-                      4096, 64);
-    assert(render_state->curbe.bo);
-
-    if (IS_IVB_GT1(i965->intel.device_id) ||
-        IS_HSW_GT1(i965->intel.device_id)) {
-        render_state->max_wm_threads = 48;
-    } else if (IS_IVB_GT2(i965->intel.device_id) ||
-               IS_HSW_GT2(i965->intel.device_id)) {
-        render_state->max_wm_threads = 172;
-    } else if (IS_SNB_GT1(i965->intel.device_id)) {
-        render_state->max_wm_threads = 40;
-    } else if (IS_SNB_GT2(i965->intel.device_id)) {
-        render_state->max_wm_threads = 80;
-    } else if (IS_IRONLAKE(i965->intel.device_id)) {
-        render_state->max_wm_threads = 72; /* 12 * 6 */
-    } else if (IS_G4X(i965->intel.device_id)) {
-        render_state->max_wm_threads = 50; /* 12 * 5 */
-    } else {
-        /* should never get here !!! */
-        assert(0);
-    }
 
-    return true;
+    render_state->render_put_subpicture(ctx, obj_surface, src_rect, dst_rect);
 }
 
-void 
-i965_render_terminate(VADriverContextP ctx)
+static void
+genx_render_terminate(VADriverContextP ctx)
 {
     int i;
     struct i965_driver_data *i965 = i965_driver_data(ctx);
@@ -3155,7 +3120,7 @@ i965_render_terminate(VADriverContextP ctx)
 
     for (i = 0; i < NUM_RENDER_KERNEL; i++) {
         struct i965_kernel *kernel = &render_state->render_kernels[i];
-        
+
         dri_bo_unreference(kernel->bo);
         kernel->bo = NULL;
     }
@@ -3187,3 +3152,76 @@ i965_render_terminate(VADriverContextP ctx)
     }
 }
 
+bool
+genx_render_init(VADriverContextP ctx)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct i965_render_state *render_state = &i965->render_state;
+    int i;
+
+    /* kernel */
+    assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen5) /
+                                 sizeof(render_kernels_gen5[0])));
+    assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) /
+                                 sizeof(render_kernels_gen6[0])));
+
+    if (IS_GEN7(i965->intel.device_info)) {
+        memcpy(render_state->render_kernels,
+               (IS_HASWELL(i965->intel.device_info) ? render_kernels_gen7_haswell : render_kernels_gen7),
+               sizeof(render_state->render_kernels));
+        render_state->render_put_surface = gen7_render_put_surface;
+        render_state->render_put_subpicture = gen7_render_put_subpicture;
+    } else if (IS_GEN6(i965->intel.device_info)) {
+        memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
+        render_state->render_put_surface = gen6_render_put_surface;
+        render_state->render_put_subpicture = gen6_render_put_subpicture;
+    } else if (IS_IRONLAKE(i965->intel.device_info)) {
+        memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
+        render_state->render_put_surface = i965_render_put_surface;
+        render_state->render_put_subpicture = i965_render_put_subpicture;
+    } else {
+        memcpy(render_state->render_kernels, render_kernels_gen4, sizeof(render_state->render_kernels));
+        render_state->render_put_surface = i965_render_put_surface;
+        render_state->render_put_subpicture = i965_render_put_subpicture;
+    }
+
+    render_state->render_terminate = genx_render_terminate;
+
+    for (i = 0; i < NUM_RENDER_KERNEL; i++) {
+        struct i965_kernel *kernel = &render_state->render_kernels[i];
+
+        if (!kernel->size)
+            continue;
+
+        kernel->bo = dri_bo_alloc(i965->intel.bufmgr,
+                                  kernel->name,
+                                  kernel->size, 0x1000);
+        assert(kernel->bo);
+        dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
+    }
+
+    /* constant buffer */
+    render_state->curbe.bo = dri_bo_alloc(i965->intel.bufmgr,
+                                          "constant buffer",
+                                          4096, 64);
+    assert(render_state->curbe.bo);
+
+    return true;
+}
+
+bool
+i965_render_init(VADriverContextP ctx)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+
+    return i965->codec_info->render_init(ctx);
+}
+
+void
+i965_render_terminate(VADriverContextP ctx)
+{
+    struct i965_driver_data *i965 = i965_driver_data(ctx);
+    struct i965_render_state *render_state = &i965->render_state;
+
+    render_state->render_terminate(ctx);
+}