2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Xiang Haihao <haihao.xiang@intel.com>
26 * Zhao Yakui <yakui.zhao@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "i965_yuv_coefs.h"
43 #include "intel_media.h"
45 #include "gen75_picture_process.h"
47 #define SURFACE_STATE_PADDED_SIZE SURFACE_STATE_PADDED_SIZE_GEN8
49 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
50 #define BINDING_TABLE_OFFSET SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
52 #define GPU_ASM_BLOCK_WIDTH 16
53 #define GPU_ASM_BLOCK_HEIGHT 8
54 #define GPU_ASM_X_OFFSET_ALIGNMENT 4
56 #define VA_STATUS_SUCCESS_1 0xFFFFFFFE
58 VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
59 const struct i965_surface *src_surface,
60 const VARectangle *src_rect,
61 struct i965_surface *dst_surface,
62 const VARectangle *dst_rect,
65 VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
66 const struct i965_surface *src_surface,
67 const VARectangle *src_rect,
68 struct i965_surface *dst_surface,
69 const VARectangle *dst_rect,
72 /* TODO: Modify the shader and then compile it again.
73 * Currently it is derived from Haswell*/
74 static const uint32_t pp_null_gen8[][4] = {
77 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
78 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
81 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
82 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
85 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
86 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
89 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
90 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
93 static const uint32_t pp_nv12_scaling_gen8[][4] = {
94 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
97 static const uint32_t pp_nv12_avs_gen8[][4] = {
98 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
101 static const uint32_t pp_nv12_dndi_gen8[][4] = {
102 // #include "shaders/post_processing/gen7/dndi.g75b"
105 static const uint32_t pp_nv12_dn_gen8[][4] = {
106 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
108 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
109 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
111 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
112 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
114 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
115 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
117 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
118 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
120 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
121 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
123 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
124 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
126 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
127 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
130 static struct pp_module pp_modules_gen8[] = {
133 "NULL module (for testing)",
136 sizeof(pp_null_gen8),
146 PP_NV12_LOAD_SAVE_N12,
147 pp_nv12_load_save_nv12_gen8,
148 sizeof(pp_nv12_load_save_nv12_gen8),
152 gen8_pp_plx_avs_initialize,
158 PP_NV12_LOAD_SAVE_PL3,
159 pp_nv12_load_save_pl3_gen8,
160 sizeof(pp_nv12_load_save_pl3_gen8),
163 gen8_pp_plx_avs_initialize,
169 PP_PL3_LOAD_SAVE_N12,
170 pp_pl3_load_save_nv12_gen8,
171 sizeof(pp_pl3_load_save_nv12_gen8),
175 gen8_pp_plx_avs_initialize,
181 PP_PL3_LOAD_SAVE_PL3,
182 pp_pl3_load_save_pl3_gen8,
183 sizeof(pp_pl3_load_save_pl3_gen8),
187 gen8_pp_plx_avs_initialize,
192 "NV12 Scaling module",
194 pp_nv12_scaling_gen8,
195 sizeof(pp_nv12_scaling_gen8),
199 gen8_pp_plx_avs_initialize,
207 sizeof(pp_nv12_avs_gen8),
211 gen8_pp_plx_avs_initialize,
219 sizeof(pp_nv12_dndi_gen8),
231 sizeof(pp_nv12_dn_gen8),
240 PP_NV12_LOAD_SAVE_PA,
241 pp_nv12_load_save_pa_gen8,
242 sizeof(pp_nv12_load_save_pa_gen8),
246 gen8_pp_plx_avs_initialize,
253 pp_pl3_load_save_pa_gen8,
254 sizeof(pp_pl3_load_save_pa_gen8),
258 gen8_pp_plx_avs_initialize,
264 PP_PA_LOAD_SAVE_NV12,
265 pp_pa_load_save_nv12_gen8,
266 sizeof(pp_pa_load_save_nv12_gen8),
270 gen8_pp_plx_avs_initialize,
277 pp_pa_load_save_pl3_gen8,
278 sizeof(pp_pa_load_save_pl3_gen8),
282 gen8_pp_plx_avs_initialize,
289 pp_pa_load_save_pa_gen8,
290 sizeof(pp_pa_load_save_pa_gen8),
294 gen8_pp_plx_avs_initialize,
300 PP_RGBX_LOAD_SAVE_NV12,
301 pp_rgbx_load_save_nv12_gen8,
302 sizeof(pp_rgbx_load_save_nv12_gen8),
306 gen8_pp_plx_avs_initialize,
312 PP_NV12_LOAD_SAVE_RGBX,
313 pp_nv12_load_save_rgbx_gen8,
314 sizeof(pp_nv12_load_save_rgbx_gen8),
318 gen8_pp_plx_avs_initialize,
323 pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
327 if (surface->type == I965_SURFACE_TYPE_IMAGE) {
328 struct object_image *obj_image = (struct object_image *)surface->base;
329 fourcc = obj_image->image.format.fourcc;
331 struct object_surface *obj_surface = (struct object_surface *)surface->base;
332 fourcc = obj_surface->fourcc;
339 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
342 case I915_TILING_NONE:
343 ss->ss0.tiled_surface = 0;
344 ss->ss0.tile_walk = 0;
347 ss->ss0.tiled_surface = 1;
348 ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
351 ss->ss0.tiled_surface = 1;
352 ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
358 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
361 case I915_TILING_NONE:
362 ss->ss2.tiled_surface = 0;
363 ss->ss2.tile_walk = 0;
366 ss->ss2.tiled_surface = 1;
367 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
370 ss->ss2.tiled_surface = 1;
371 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
378 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
379 dri_bo *surf_bo, unsigned long surf_bo_offset,
380 int width, int height, int pitch, int format,
381 int index, int is_target)
383 struct gen8_surface_state *ss;
386 unsigned int swizzle;
388 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
389 ss_bo = pp_context->surface_state_binding_table.bo;
392 dri_bo_map(ss_bo, True);
393 assert(ss_bo->virtual);
394 ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
395 memset(ss, 0, sizeof(*ss));
396 ss->ss0.surface_type = I965_SURFACE_2D;
397 ss->ss0.surface_format = format;
398 ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
399 ss->ss2.width = width - 1;
400 ss->ss2.height = height - 1;
401 ss->ss3.pitch = pitch - 1;
403 /* Always set 1(align 4 mode) per B-spec */
404 ss->ss0.vertical_alignment = 1;
405 ss->ss0.horizontal_alignment = 1;
407 gen8_pp_set_surface_tiling(ss, tiling);
408 gen8_render_set_surface_scs(ss);
409 dri_bo_emit_reloc(ss_bo,
410 I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
412 SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
414 ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
420 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
421 dri_bo *surf_bo, unsigned long surf_bo_offset,
422 int width, int height, int wpitch,
423 int xoffset, int yoffset,
424 int format, int interleave_chroma,
427 struct gen8_surface_state2 *ss2;
430 unsigned int swizzle;
432 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
433 ss2_bo = pp_context->surface_state_binding_table.bo;
436 dri_bo_map(ss2_bo, True);
437 assert(ss2_bo->virtual);
438 ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
439 memset(ss2, 0, sizeof(*ss2));
440 ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
441 ss2->ss1.cbcr_pixel_offset_v_direction = 0;
442 ss2->ss1.width = width - 1;
443 ss2->ss1.height = height - 1;
444 ss2->ss2.pitch = wpitch - 1;
445 ss2->ss2.interleave_chroma = interleave_chroma;
446 ss2->ss2.surface_format = format;
447 ss2->ss3.x_offset_for_cb = xoffset;
448 ss2->ss3.y_offset_for_cb = yoffset;
449 gen8_pp_set_surface2_tiling(ss2, tiling);
450 dri_bo_emit_reloc(ss2_bo,
451 I915_GEM_DOMAIN_RENDER, 0,
453 SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
455 ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
456 dri_bo_unmap(ss2_bo);
460 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
461 const struct i965_surface *surface,
462 int base_index, int is_target,
463 const VARectangle *rect,
464 int *width, int *height, int *pitch, int *offset)
466 struct object_surface *obj_surface;
467 struct object_image *obj_image;
469 int fourcc = pp_get_surface_fourcc(ctx, surface);
470 const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
472 if (fourcc_info == NULL)
475 if (surface->type == I965_SURFACE_TYPE_SURFACE) {
476 obj_surface = (struct object_surface *)surface->base;
477 bo = obj_surface->bo;
478 width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
479 height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
480 pitch[0] = obj_surface->width;
483 if (fourcc_info->num_planes == 1 && is_target)
484 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
486 width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
487 height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
488 pitch[1] = obj_surface->cb_cr_pitch;
489 offset[1] = obj_surface->y_cb_offset * obj_surface->width;
491 width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
492 height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
493 pitch[2] = obj_surface->cb_cr_pitch;
494 offset[2] = obj_surface->y_cr_offset * obj_surface->width;
498 /* FIXME: add support for ARGB/ABGR image */
499 obj_image = (struct object_image *)surface->base;
501 width[0] = MIN(rect->x + rect->width, obj_image->image.width);
502 height[0] = MIN(rect->y + rect->height, obj_image->image.height);
503 pitch[0] = obj_image->image.pitches[0];
504 offset[0] = obj_image->image.offsets[0];
506 if (fourcc_info->num_planes == 1) {
508 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
509 } else if (fourcc_info->num_planes == 2) {
512 assert(fourcc_info->num_components == 3);
514 U = fourcc_info->components[1].plane;
515 V = fourcc_info->components[2].plane;
516 assert((U == 1 && V == 2) ||
520 /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
521 width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
522 height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
523 pitch[1] = obj_image->image.pitches[U];
524 offset[1] = obj_image->image.offsets[U];
526 width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
527 height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
528 pitch[2] = obj_image->image.pitches[V];
529 offset[2] = obj_image->image.offsets[V];
533 gen8_pp_set_surface_state(ctx, pp_context,
535 ALIGN(width[0], 4) / 4, height[0], pitch[0],
536 I965_SURFACEFORMAT_R8_UINT,
539 if (fourcc_info->num_planes == 2) {
540 gen8_pp_set_surface_state(ctx, pp_context,
542 ALIGN(width[1], 2) / 2, height[1], pitch[1],
543 I965_SURFACEFORMAT_R8G8_SINT,
545 } else if (fourcc_info->num_planes == 3) {
546 gen8_pp_set_surface_state(ctx, pp_context,
548 ALIGN(width[1], 4) / 4, height[1], pitch[1],
549 I965_SURFACEFORMAT_R8_SINT,
551 gen8_pp_set_surface_state(ctx, pp_context,
553 ALIGN(width[2], 4) / 4, height[2], pitch[2],
554 I965_SURFACEFORMAT_R8_SINT,
558 if (fourcc_info->format == I965_COLOR_RGB) {
559 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
560 /* the format is MSB: X-B-G-R */
561 pp_static_parameter->grf2.save_avs_rgb_swap = 0;
562 if ((fourcc == VA_FOURCC_BGRA) ||
563 (fourcc == VA_FOURCC_BGRX)) {
564 /* It is stored as MSB: X-R-G-B */
565 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
569 int format0 = SURFACE_FORMAT_Y8_UNORM;
573 format0 = SURFACE_FORMAT_YCRCB_NORMAL;
577 format0 = SURFACE_FORMAT_YCRCB_SWAPY;
584 if (fourcc_info->format == I965_COLOR_RGB) {
585 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
586 /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
587 format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
588 pp_static_parameter->grf2.src_avs_rgb_swap = 0;
589 if ((fourcc == VA_FOURCC_BGRA) ||
590 (fourcc == VA_FOURCC_BGRX)) {
591 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
595 gen8_pp_set_surface2_state(ctx, pp_context,
597 width[0], height[0], pitch[0],
602 if (fourcc_info->num_planes == 2) {
603 gen8_pp_set_surface2_state(ctx, pp_context,
605 width[1], height[1], pitch[1],
607 SURFACE_FORMAT_R8B8_UNORM, 0,
609 } else if (fourcc_info->num_planes == 3) {
610 gen8_pp_set_surface2_state(ctx, pp_context,
612 width[1], height[1], pitch[1],
614 SURFACE_FORMAT_R8_UNORM, 0,
616 gen8_pp_set_surface2_state(ctx, pp_context,
618 width[2], height[2], pitch[2],
620 SURFACE_FORMAT_R8_UNORM, 0,
627 pp_null_x_steps(void *private_context)
633 pp_null_y_steps(void *private_context)
639 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
645 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
646 const struct i965_surface *src_surface,
647 const VARectangle *src_rect,
648 struct i965_surface *dst_surface,
649 const VARectangle *dst_rect,
652 /* private function & data */
653 pp_context->pp_x_steps = pp_null_x_steps;
654 pp_context->pp_y_steps = pp_null_y_steps;
655 pp_context->private_context = NULL;
656 pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
658 dst_surface->flags = src_surface->flags;
660 return VA_STATUS_SUCCESS;
663 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
665 int i, dst_width_adjust;
666 /* x offset of dest surface must be dword aligned.
667 * so we have to extend dst surface on left edge, and mask out pixels not interested
669 if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
670 pp_context->block_horizontal_mask_left = 0;
671 for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
673 pp_context->block_horizontal_mask_left |= 1<<i;
677 pp_context->block_horizontal_mask_left = 0xffff;
680 dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
681 if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
682 pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
685 pp_context->block_horizontal_mask_right = 0xffff;
688 if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
689 pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
692 pp_context->block_vertical_mask_bottom = 0xff;
698 gen7_pp_avs_x_steps(void *private_context)
700 struct pp_avs_context *pp_avs_context = private_context;
702 return pp_avs_context->dest_w / 16;
706 gen7_pp_avs_y_steps(void *private_context)
708 struct pp_avs_context *pp_avs_context = private_context;
710 return pp_avs_context->dest_h / 16;
714 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
716 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
717 struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
719 pp_inline_parameter->grf9.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
720 pp_inline_parameter->grf9.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
721 pp_inline_parameter->grf9.constant_0 = 0xffffffff;
722 pp_inline_parameter->grf9.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
727 static void gen7_update_src_surface_uv_offset(VADriverContextP ctx,
728 struct i965_post_processing_context *pp_context,
729 const struct i965_surface *surface)
731 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
732 int fourcc = pp_get_surface_fourcc(ctx, surface);
734 if (fourcc == VA_FOURCC_YUY2) {
735 pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
736 pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
737 pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
738 } else if (fourcc == VA_FOURCC_UYVY) {
739 pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
740 pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
741 pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
745 static const AVSConfig gen8_avs_config = {
746 .coeff_frac_bits = 6,
747 .coeff_epsilon = 1.0f / (1U << 6),
749 .num_luma_coeffs = 8,
750 .num_chroma_coeffs = 4,
754 .y_k_h = { -2, -2, -2, -2, -2, -2, -2, -2 },
755 .y_k_v = { -2, -2, -2, -2, -2, -2, -2, -2 },
756 .uv_k_h = { -1, -2, -2, -1 },
757 .uv_k_v = { -1, -2, -2, -1 },
760 .y_k_h = { 2, 2, 2, 2, 2, 2, 2, 2 },
761 .y_k_v = { 2, 2, 2, 2, 2, 2, 2, 2 },
762 .uv_k_h = { 1, 2, 2, 1 },
763 .uv_k_v = { 1, 2, 2, 1 },
769 gen8_pp_get_8tap_filter_mode(VADriverContextP ctx,
770 const struct i965_surface *surface)
772 int fourcc = pp_get_surface_fourcc(ctx, surface);
774 if (fourcc == VA_FOURCC_YUY2 ||
775 fourcc == VA_FOURCC_UYVY)
782 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
783 const struct i965_surface *src_surface,
784 const VARectangle *src_rect,
785 struct i965_surface *dst_surface,
786 const VARectangle *dst_rect,
789 /* TODO: Add the sampler_8x8 state */
790 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
791 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
792 struct gen8_sampler_8x8_avs *sampler_8x8;
794 int width[3], height[3], pitch[3], offset[3];
795 int src_width, src_height;
796 unsigned char *cc_ptr;
797 AVSState * const avs = &pp_avs_context->state;
799 const float * yuv_to_rgb_coefs;
800 size_t yuv_to_rgb_coefs_size;
802 memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
805 gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
807 width, height, pitch, offset);
808 src_height = height[0];
809 src_width = width[0];
811 /* destination surface */
812 gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
814 width, height, pitch, offset);
816 /* sampler 8x8 state */
817 dri_bo_map(pp_context->dynamic_state.bo, True);
818 assert(pp_context->dynamic_state.bo->virtual);
820 cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
821 pp_context->sampler_offset;
822 /* Currently only one gen8 sampler_8x8 is initialized */
823 sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
824 memset(sampler_8x8, 0, sizeof(*sampler_8x8));
826 sampler_8x8->dw0.gain_factor = 44;
827 sampler_8x8->dw0.weak_edge_threshold = 1;
828 sampler_8x8->dw0.strong_edge_threshold = 8;
829 /* Use the value like that on Ivy instead of default
830 * sampler_8x8->dw0.r3x_coefficient = 5;
832 sampler_8x8->dw0.r3x_coefficient = 27;
833 sampler_8x8->dw0.r3c_coefficient = 5;
835 sampler_8x8->dw2.global_noise_estimation = 255;
836 sampler_8x8->dw2.non_edge_weight = 1;
837 sampler_8x8->dw2.regular_weight = 2;
838 sampler_8x8->dw2.strong_edge_weight = 7;
839 /* Use the value like that on Ivy instead of default
840 * sampler_8x8->dw2.r5x_coefficient = 7;
841 * sampler_8x8->dw2.r5cx_coefficient = 7;
842 * sampler_8x8->dw2.r5c_coefficient = 7;
844 sampler_8x8->dw2.r5x_coefficient = 9;
845 sampler_8x8->dw2.r5cx_coefficient = 8;
846 sampler_8x8->dw2.r5c_coefficient = 3;
848 sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
849 sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
850 sampler_8x8->dw3.sat_max = 0x1f;
851 sampler_8x8->dw3.hue_max = 14;
852 /* The 8tap filter will determine whether the adaptive Filter is
853 * applied for all channels(dw153).
854 * If the 8tap filter is disabled, the adaptive filter should be disabled.
855 * Only when 8tap filter is enabled, it can be enabled or not.
857 sampler_8x8->dw3.enable_8tap_filter = gen8_pp_get_8tap_filter_mode(ctx, src_surface);
858 sampler_8x8->dw3.ief4_smooth_enable = 0;
860 sampler_8x8->dw4.s3u = 0;
861 sampler_8x8->dw4.diamond_margin = 4;
862 sampler_8x8->dw4.vy_std_enable = 0;
863 sampler_8x8->dw4.umid = 110;
864 sampler_8x8->dw4.vmid = 154;
866 sampler_8x8->dw5.diamond_dv = 0;
867 sampler_8x8->dw5.diamond_th = 35;
868 sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
869 sampler_8x8->dw5.hs_margin = 3;
870 sampler_8x8->dw5.diamond_du = 2;
872 sampler_8x8->dw6.y_point1 = 46;
873 sampler_8x8->dw6.y_point2 = 47;
874 sampler_8x8->dw6.y_point3 = 254;
875 sampler_8x8->dw6.y_point4 = 255;
877 sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
879 sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
880 sampler_8x8->dw8.p0l = 46;
881 sampler_8x8->dw8.p1l = 216;
883 sampler_8x8->dw9.p2l = 236;
884 sampler_8x8->dw9.p3l = 236;
885 sampler_8x8->dw9.b0l = 133;
886 sampler_8x8->dw9.b1l = 130;
888 sampler_8x8->dw10.b2l = 130;
889 sampler_8x8->dw10.b3l = 130;
890 /* s0l = -5 / 256. s2.8 */
891 sampler_8x8->dw10.s0l = 1029; /* s0l = 0 */
892 sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
894 sampler_8x8->dw11.s1l = 0;
895 sampler_8x8->dw11.s2l = 0;
897 sampler_8x8->dw12.s3l = 0;
898 sampler_8x8->dw12.p0u = 46;
899 sampler_8x8->dw12.p1u = 66;
900 sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
902 sampler_8x8->dw13.p2u = 130;
903 sampler_8x8->dw13.p3u = 236;
904 sampler_8x8->dw13.b0u = 143;
905 sampler_8x8->dw13.b1u = 163;
907 sampler_8x8->dw14.b2u = 200;
908 sampler_8x8->dw14.b3u = 140;
909 sampler_8x8->dw14.s0u = 256; /* s0u = 0 */
911 sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
912 sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
914 sx = (float)dst_rect->width / src_rect->width;
915 sy = (float)dst_rect->height / src_rect->height;
916 avs_update_coefficients(avs, sx, sy, pp_context->filter_flags);
918 assert(avs->config->num_phases >= 16);
919 for (i = 0; i <= 16; i++) {
920 struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
921 &sampler_8x8->coefficients[i];
922 const AVSCoeffs * const coeffs = &avs->coeffs[i];
924 sampler_8x8_state->dw0.table_0x_filter_c0 =
925 intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
926 sampler_8x8_state->dw0.table_0y_filter_c0 =
927 intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
928 sampler_8x8_state->dw0.table_0x_filter_c1 =
929 intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
930 sampler_8x8_state->dw0.table_0y_filter_c1 =
931 intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
933 sampler_8x8_state->dw1.table_0x_filter_c2 =
934 intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
935 sampler_8x8_state->dw1.table_0y_filter_c2 =
936 intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
937 sampler_8x8_state->dw1.table_0x_filter_c3 =
938 intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
939 sampler_8x8_state->dw1.table_0y_filter_c3 =
940 intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
942 sampler_8x8_state->dw2.table_0x_filter_c4 =
943 intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
944 sampler_8x8_state->dw2.table_0y_filter_c4 =
945 intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
946 sampler_8x8_state->dw2.table_0x_filter_c5 =
947 intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
948 sampler_8x8_state->dw2.table_0y_filter_c5 =
949 intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
951 sampler_8x8_state->dw3.table_0x_filter_c6 =
952 intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
953 sampler_8x8_state->dw3.table_0y_filter_c6 =
954 intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
955 sampler_8x8_state->dw3.table_0x_filter_c7 =
956 intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
957 sampler_8x8_state->dw3.table_0y_filter_c7 =
958 intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
960 sampler_8x8_state->dw4.pad0 = 0;
961 sampler_8x8_state->dw5.pad0 = 0;
962 sampler_8x8_state->dw4.table_1x_filter_c2 =
963 intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
964 sampler_8x8_state->dw4.table_1x_filter_c3 =
965 intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
966 sampler_8x8_state->dw5.table_1x_filter_c4 =
967 intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
968 sampler_8x8_state->dw5.table_1x_filter_c5 =
969 intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
971 sampler_8x8_state->dw6.pad0 =
972 sampler_8x8_state->dw7.pad0 =
973 sampler_8x8_state->dw6.table_1y_filter_c2 =
974 intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
975 sampler_8x8_state->dw6.table_1y_filter_c3 =
976 intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
977 sampler_8x8_state->dw7.table_1y_filter_c4 =
978 intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
979 sampler_8x8_state->dw7.table_1y_filter_c5 =
980 intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
983 sampler_8x8->dw152.default_sharpness_level =
984 -avs_is_needed(pp_context->filter_flags);
985 sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
986 sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
987 sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
989 for ( ; i <= avs->config->num_phases; i++) {
990 struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
991 &sampler_8x8->coefficients1[i - 17];
992 const AVSCoeffs * const coeffs = &avs->coeffs[i];
994 sampler_8x8_state->dw0.table_0x_filter_c0 =
995 intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
996 sampler_8x8_state->dw0.table_0y_filter_c0 =
997 intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
998 sampler_8x8_state->dw0.table_0x_filter_c1 =
999 intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1000 sampler_8x8_state->dw0.table_0y_filter_c1 =
1001 intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1003 sampler_8x8_state->dw1.table_0x_filter_c2 =
1004 intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1005 sampler_8x8_state->dw1.table_0y_filter_c2 =
1006 intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1007 sampler_8x8_state->dw1.table_0x_filter_c3 =
1008 intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1009 sampler_8x8_state->dw1.table_0y_filter_c3 =
1010 intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1012 sampler_8x8_state->dw2.table_0x_filter_c4 =
1013 intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1014 sampler_8x8_state->dw2.table_0y_filter_c4 =
1015 intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1016 sampler_8x8_state->dw2.table_0x_filter_c5 =
1017 intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1018 sampler_8x8_state->dw2.table_0y_filter_c5 =
1019 intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1021 sampler_8x8_state->dw3.table_0x_filter_c6 =
1022 intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1023 sampler_8x8_state->dw3.table_0y_filter_c6 =
1024 intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1025 sampler_8x8_state->dw3.table_0x_filter_c7 =
1026 intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1027 sampler_8x8_state->dw3.table_0y_filter_c7 =
1028 intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1030 sampler_8x8_state->dw4.pad0 = 0;
1031 sampler_8x8_state->dw5.pad0 = 0;
1032 sampler_8x8_state->dw4.table_1x_filter_c2 =
1033 intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1034 sampler_8x8_state->dw4.table_1x_filter_c3 =
1035 intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1036 sampler_8x8_state->dw5.table_1x_filter_c4 =
1037 intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1038 sampler_8x8_state->dw5.table_1x_filter_c5 =
1039 intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1041 sampler_8x8_state->dw6.pad0 =
1042 sampler_8x8_state->dw7.pad0 =
1043 sampler_8x8_state->dw6.table_1y_filter_c2 =
1044 intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1045 sampler_8x8_state->dw6.table_1y_filter_c3 =
1046 intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1047 sampler_8x8_state->dw7.table_1y_filter_c4 =
1048 intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1049 sampler_8x8_state->dw7.table_1y_filter_c5 =
1050 intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1053 dri_bo_unmap(pp_context->dynamic_state.bo);
1056 /* private function & data */
1057 pp_context->pp_x_steps = gen7_pp_avs_x_steps;
1058 pp_context->pp_y_steps = gen7_pp_avs_y_steps;
1059 pp_context->private_context = &pp_context->pp_avs_context;
1060 pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
1062 int dst_left_edge_extend = dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
1063 pp_avs_context->dest_x = dst_rect->x - dst_left_edge_extend;
1064 pp_avs_context->dest_y = dst_rect->y;
1065 pp_avs_context->dest_w = ALIGN(dst_rect->width + dst_left_edge_extend, 16);
1066 pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
1067 pp_avs_context->src_w = src_rect->width;
1068 pp_avs_context->src_h = src_rect->height;
1069 pp_avs_context->horiz_range = (float)src_rect->width / src_width;
1071 int dw = (pp_avs_context->src_w - 1) / 16 + 1;
1072 dw = MAX(dw, dst_rect->width + dst_left_edge_extend);
1074 pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
1075 pp_static_parameter->grf2.avs_wa_enable = 0; /* It is not required on GEN8+ */
1076 pp_static_parameter->grf2.alpha = 255;
1078 pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
1079 pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
1080 pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
1081 (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
1082 pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
1083 (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
1085 gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
1087 yuv_to_rgb_coefs = i915_color_standard_to_coefs (i915_filter_to_color_standard (src_surface->flags &
1089 &yuv_to_rgb_coefs_size);
1090 memcpy(&pp_static_parameter->grf7, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
1092 dst_surface->flags = src_surface->flags;
1094 return VA_STATUS_SUCCESS;
1099 VADriverContextP ctx,
1100 struct i965_post_processing_context *pp_context,
1101 const struct i965_surface *src_surface,
1102 const VARectangle *src_rect,
1103 struct i965_surface *dst_surface,
1104 const VARectangle *dst_rect,
1110 struct i965_driver_data *i965 = i965_driver_data(ctx);
1113 unsigned int end_offset;
1114 struct pp_module *pp_module;
1115 int static_param_size, inline_param_size;
1117 dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1118 bo = dri_bo_alloc(i965->intel.bufmgr,
1119 "surface state & binding table",
1120 (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1123 pp_context->surface_state_binding_table.bo = bo;
1125 pp_context->idrt.num_interface_descriptors = 0;
1127 pp_context->sampler_size = 4 * 4096;
1129 bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1130 + pp_context->idrt_size;
1132 dri_bo_unreference(pp_context->dynamic_state.bo);
1133 bo = dri_bo_alloc(i965->intel.bufmgr,
1139 pp_context->dynamic_state.bo = bo;
1140 pp_context->dynamic_state.bo_size = bo_size;
1143 pp_context->dynamic_state.end_offset = 0;
1145 /* Constant buffer offset */
1146 pp_context->curbe_offset = ALIGN(end_offset, 64);
1147 end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1149 /* Interface descriptor offset */
1150 pp_context->idrt_offset = ALIGN(end_offset, 64);
1151 end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1153 /* Sampler state offset */
1154 pp_context->sampler_offset = ALIGN(end_offset, 64);
1155 end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1157 /* update the end offset of dynamic_state */
1158 pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1160 static_param_size = sizeof(struct gen7_pp_static_parameter);
1161 inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1163 memset(pp_context->pp_static_parameter, 0, static_param_size);
1164 memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1166 assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1167 pp_context->current_pp = pp_index;
1168 pp_module = &pp_context->pp_modules[pp_index];
1170 if (pp_module->initialize)
1171 va_status = pp_module->initialize(ctx, pp_context,
1178 va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1180 calculate_boundary_block_mask(pp_context, dst_rect);
1186 gen8_pp_interface_descriptor_table(VADriverContextP ctx,
1187 struct i965_post_processing_context *pp_context)
1189 struct gen8_interface_descriptor_data *desc;
1191 int pp_index = pp_context->current_pp;
1192 unsigned char *cc_ptr;
1194 bo = pp_context->dynamic_state.bo;
1197 assert(bo->virtual);
1198 cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1200 desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1201 pp_context->idrt.num_interface_descriptors;
1203 memset(desc, 0, sizeof(*desc));
1204 desc->desc0.kernel_start_pointer =
1205 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1206 desc->desc2.single_program_flow = 1;
1207 desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1208 desc->desc3.sampler_count = 0; /* 1 - 4 samplers used */
1209 desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1210 desc->desc4.binding_table_entry_count = 0;
1211 desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1212 desc->desc5.constant_urb_entry_read_offset = 0;
1214 desc->desc5.constant_urb_entry_read_length = 8; /* grf 1-8 */
1217 pp_context->idrt.num_interface_descriptors++;
1222 gen8_pp_upload_constants(VADriverContextP ctx,
1223 struct i965_post_processing_context *pp_context)
1225 unsigned char *constant_buffer;
1228 assert(sizeof(struct gen7_pp_static_parameter) == 256);
1230 param_size = sizeof(struct gen7_pp_static_parameter);
1232 dri_bo_map(pp_context->dynamic_state.bo, 1);
1233 assert(pp_context->dynamic_state.bo->virtual);
1234 constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1235 pp_context->curbe_offset;
1237 memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1238 dri_bo_unmap(pp_context->dynamic_state.bo);
1243 gen8_pp_states_setup(VADriverContextP ctx,
1244 struct i965_post_processing_context *pp_context)
1246 gen8_pp_interface_descriptor_table(ctx, pp_context);
1247 gen8_pp_upload_constants(ctx, pp_context);
1251 gen6_pp_pipeline_select(VADriverContextP ctx,
1252 struct i965_post_processing_context *pp_context)
1254 struct intel_batchbuffer *batch = pp_context->batch;
1256 BEGIN_BATCH(batch, 1);
1257 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1258 ADVANCE_BATCH(batch);
1262 gen8_pp_state_base_address(VADriverContextP ctx,
1263 struct i965_post_processing_context *pp_context)
1265 struct intel_batchbuffer *batch = pp_context->batch;
1267 BEGIN_BATCH(batch, 16);
1268 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1269 /* DW1 Generate state address */
1270 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1271 OUT_BATCH(batch, 0);
1272 OUT_BATCH(batch, 0);
1274 /* DW4-5. Surface state address */
1275 OUT_RELOC64(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1277 /* DW6-7. Dynamic state address */
1278 OUT_RELOC64(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1279 0, 0 | BASE_ADDRESS_MODIFY);
1281 /* DW8. Indirect object address */
1282 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1283 OUT_BATCH(batch, 0);
1285 /* DW10-11. Instruction base address */
1286 OUT_RELOC64(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1288 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1289 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1290 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1291 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1292 ADVANCE_BATCH(batch);
1296 gen8_pp_vfe_state(VADriverContextP ctx,
1297 struct i965_post_processing_context *pp_context)
1299 struct intel_batchbuffer *batch = pp_context->batch;
1301 BEGIN_BATCH(batch, 9);
1302 OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1303 OUT_BATCH(batch, 0);
1304 OUT_BATCH(batch, 0);
1306 (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1307 pp_context->vfe_gpu_state.num_urb_entries << 8);
1308 OUT_BATCH(batch, 0);
1310 (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1311 /* URB Entry Allocation Size, in 256 bits unit */
1312 (pp_context->vfe_gpu_state.curbe_allocation_size));
1313 /* CURBE Allocation Size, in 256 bits unit */
1314 OUT_BATCH(batch, 0);
1315 OUT_BATCH(batch, 0);
1316 OUT_BATCH(batch, 0);
1317 ADVANCE_BATCH(batch);
1321 gen8_interface_descriptor_load(VADriverContextP ctx,
1322 struct i965_post_processing_context *pp_context)
1324 struct intel_batchbuffer *batch = pp_context->batch;
1326 BEGIN_BATCH(batch, 6);
1328 OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1329 OUT_BATCH(batch, 0);
1331 OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1332 OUT_BATCH(batch, 0);
1334 pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1335 OUT_BATCH(batch, pp_context->idrt_offset);
1336 ADVANCE_BATCH(batch);
1340 gen8_pp_curbe_load(VADriverContextP ctx,
1341 struct i965_post_processing_context *pp_context)
1343 struct intel_batchbuffer *batch = pp_context->batch;
1344 int param_size = 64;
1346 param_size = sizeof(struct gen7_pp_static_parameter);
1348 BEGIN_BATCH(batch, 4);
1349 OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1350 OUT_BATCH(batch, 0);
1353 OUT_BATCH(batch, pp_context->curbe_offset);
1354 ADVANCE_BATCH(batch);
1358 gen8_pp_object_walker(VADriverContextP ctx,
1359 struct i965_post_processing_context *pp_context)
1361 struct i965_driver_data *i965 = i965_driver_data(ctx);
1362 struct intel_batchbuffer *batch = pp_context->batch;
1363 int x, x_steps, y, y_steps;
1364 int param_size, command_length_in_dws, extra_cmd_in_dws;
1365 dri_bo *command_buffer;
1366 unsigned int *command_ptr;
1368 param_size = sizeof(struct gen7_pp_inline_parameter);
1370 x_steps = pp_context->pp_x_steps(pp_context->private_context);
1371 y_steps = pp_context->pp_y_steps(pp_context->private_context);
1372 command_length_in_dws = 6 + (param_size >> 2);
1373 extra_cmd_in_dws = 2;
1374 command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1375 "command objects buffer",
1376 (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1379 dri_bo_map(command_buffer, 1);
1380 command_ptr = command_buffer->virtual;
1382 for (y = 0; y < y_steps; y++) {
1383 for (x = 0; x < x_steps; x++) {
1384 if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1386 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1392 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1393 command_ptr += (param_size >> 2);
1395 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1401 if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1404 *command_ptr++ = MI_BATCH_BUFFER_END;
1407 dri_bo_unmap(command_buffer);
1409 BEGIN_BATCH(batch, 3);
1410 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1411 OUT_RELOC(batch, command_buffer,
1412 I915_GEM_DOMAIN_COMMAND, 0, 0);
1413 OUT_BATCH(batch, 0);
1414 ADVANCE_BATCH(batch);
1416 dri_bo_unreference(command_buffer);
1418 /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1419 * will cause control to pass back to ring buffer
1421 intel_batchbuffer_end_atomic(batch);
1422 intel_batchbuffer_flush(batch);
1423 intel_batchbuffer_start_atomic(batch, 0x1000);
1427 gen8_pp_pipeline_setup(VADriverContextP ctx,
1428 struct i965_post_processing_context *pp_context)
1430 struct intel_batchbuffer *batch = pp_context->batch;
1432 intel_batchbuffer_start_atomic(batch, 0x1000);
1433 intel_batchbuffer_emit_mi_flush(batch);
1434 gen6_pp_pipeline_select(ctx, pp_context);
1435 gen8_pp_state_base_address(ctx, pp_context);
1436 gen8_pp_vfe_state(ctx, pp_context);
1437 gen8_pp_curbe_load(ctx, pp_context);
1438 gen8_interface_descriptor_load(ctx, pp_context);
1439 gen8_pp_vfe_state(ctx, pp_context);
1440 gen8_pp_object_walker(ctx, pp_context);
1441 intel_batchbuffer_end_atomic(batch);
1445 gen8_post_processing(
1446 VADriverContextP ctx,
1447 struct i965_post_processing_context *pp_context,
1448 const struct i965_surface *src_surface,
1449 const VARectangle *src_rect,
1450 struct i965_surface *dst_surface,
1451 const VARectangle *dst_rect,
1458 va_status = gen8_pp_initialize(ctx, pp_context,
1466 if (va_status == VA_STATUS_SUCCESS) {
1467 gen8_pp_states_setup(ctx, pp_context);
1468 gen8_pp_pipeline_setup(ctx, pp_context);
1475 gen8_post_processing_context_finalize(VADriverContextP ctx,
1476 struct i965_post_processing_context *pp_context)
1478 if(pp_context->vebox_proc_ctx){
1479 gen75_vebox_context_destroy(ctx,pp_context->vebox_proc_ctx);
1480 pp_context->vebox_proc_ctx = NULL;
1483 dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1484 pp_context->surface_state_binding_table.bo = NULL;
1486 dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1487 pp_context->pp_dn_context.stmm_bo = NULL;
1489 if (pp_context->instruction_state.bo) {
1490 dri_bo_unreference(pp_context->instruction_state.bo);
1491 pp_context->instruction_state.bo = NULL;
1494 if (pp_context->indirect_state.bo) {
1495 dri_bo_unreference(pp_context->indirect_state.bo);
1496 pp_context->indirect_state.bo = NULL;
1499 if (pp_context->dynamic_state.bo) {
1500 dri_bo_unreference(pp_context->dynamic_state.bo);
1501 pp_context->dynamic_state.bo = NULL;
1504 free(pp_context->pp_static_parameter);
1505 free(pp_context->pp_inline_parameter);
1506 pp_context->pp_static_parameter = NULL;
1507 pp_context->pp_inline_parameter = NULL;
1510 #define VPP_CURBE_ALLOCATION_SIZE 32
1513 gen8_post_processing_context_common_init(VADriverContextP ctx,
1515 struct pp_module *pp_modules,
1517 struct intel_batchbuffer *batch)
1519 struct i965_driver_data *i965 = i965_driver_data(ctx);
1521 unsigned int kernel_offset, end_offset;
1522 unsigned char *kernel_ptr;
1523 struct pp_module *pp_module;
1524 struct i965_post_processing_context *pp_context = data;
1526 pp_context->vfe_gpu_state.max_num_threads = 60;
1527 pp_context->vfe_gpu_state.num_urb_entries = 59;
1528 pp_context->vfe_gpu_state.gpgpu_mode = 0;
1529 pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1530 pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1532 pp_context->intel_post_processing = gen8_post_processing;
1533 pp_context->finalize = gen8_post_processing_context_finalize;
1535 assert(ARRAY_ELEMS(pp_context->pp_modules) == num_pp_modules);
1537 memcpy(pp_context->pp_modules, pp_modules, sizeof(pp_context->pp_modules));
1539 kernel_size = 4096 ;
1541 for (i = 0; i < NUM_PP_MODULES; i++) {
1542 pp_module = &pp_context->pp_modules[i];
1544 if (pp_module->kernel.bin && pp_module->kernel.size) {
1545 kernel_size += pp_module->kernel.size;
1549 pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1553 if (pp_context->instruction_state.bo == NULL) {
1554 WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1558 assert(pp_context->instruction_state.bo);
1561 pp_context->instruction_state.bo_size = kernel_size;
1562 pp_context->instruction_state.end_offset = 0;
1565 dri_bo_map(pp_context->instruction_state.bo, 1);
1566 kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1568 for (i = 0; i < NUM_PP_MODULES; i++) {
1569 pp_module = &pp_context->pp_modules[i];
1571 kernel_offset = ALIGN(end_offset, 64);
1572 pp_module->kernel.kernel_offset = kernel_offset;
1574 if (pp_module->kernel.bin && pp_module->kernel.size) {
1576 memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1577 end_offset = kernel_offset + pp_module->kernel.size;
1581 pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1583 dri_bo_unmap(pp_context->instruction_state.bo);
1585 /* static & inline parameters */
1586 pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1587 pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1589 pp_context->batch = batch;
1591 pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1592 pp_context->curbe_size = 256;
1597 gen8_post_processing_context_init(VADriverContextP ctx,
1599 struct intel_batchbuffer *batch)
1601 struct i965_post_processing_context *pp_context = data;
1603 gen8_post_processing_context_common_init(ctx, data, pp_modules_gen8, ARRAY_ELEMS(pp_modules_gen8), batch);
1604 avs_init_state(&pp_context->pp_avs_context.state, &gen8_avs_config);