2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Xiang Haihao <haihao.xiang@intel.com>
26 * Zhao Yakui <yakui.zhao@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "i965_yuv_coefs.h"
43 #include "intel_media.h"
45 #include "gen75_picture_process.h"
46 #include "intel_common_vpp_internal.h"
48 #define SURFACE_STATE_PADDED_SIZE SURFACE_STATE_PADDED_SIZE_GEN8
50 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
51 #define BINDING_TABLE_OFFSET SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
53 #define GPU_ASM_BLOCK_WIDTH 16
54 #define GPU_ASM_BLOCK_HEIGHT 8
55 #define GPU_ASM_X_OFFSET_ALIGNMENT 4
57 #define VA_STATUS_SUCCESS_1 0xFFFFFFFE
59 VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
60 const struct i965_surface *src_surface,
61 const VARectangle *src_rect,
62 struct i965_surface *dst_surface,
63 const VARectangle *dst_rect,
66 VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
67 const struct i965_surface *src_surface,
68 const VARectangle *src_rect,
69 struct i965_surface *dst_surface,
70 const VARectangle *dst_rect,
73 /* TODO: Modify the shader and then compile it again.
74 * Currently it is derived from Haswell*/
75 static const uint32_t pp_null_gen8[][4] = {
78 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
79 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
82 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
83 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
86 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
87 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
90 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
91 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
94 static const uint32_t pp_nv12_scaling_gen8[][4] = {
95 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
98 static const uint32_t pp_nv12_avs_gen8[][4] = {
99 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
102 static const uint32_t pp_nv12_dndi_gen8[][4] = {
103 // #include "shaders/post_processing/gen7/dndi.g75b"
106 static const uint32_t pp_nv12_dn_gen8[][4] = {
107 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
109 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
110 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
112 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
113 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
115 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
116 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
118 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
119 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
121 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
122 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
124 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
125 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
127 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
128 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
131 static struct pp_module pp_modules_gen8[] = {
134 "NULL module (for testing)",
137 sizeof(pp_null_gen8),
147 PP_NV12_LOAD_SAVE_N12,
148 pp_nv12_load_save_nv12_gen8,
149 sizeof(pp_nv12_load_save_nv12_gen8),
153 gen8_pp_plx_avs_initialize,
159 PP_NV12_LOAD_SAVE_PL3,
160 pp_nv12_load_save_pl3_gen8,
161 sizeof(pp_nv12_load_save_pl3_gen8),
164 gen8_pp_plx_avs_initialize,
170 PP_PL3_LOAD_SAVE_N12,
171 pp_pl3_load_save_nv12_gen8,
172 sizeof(pp_pl3_load_save_nv12_gen8),
176 gen8_pp_plx_avs_initialize,
182 PP_PL3_LOAD_SAVE_PL3,
183 pp_pl3_load_save_pl3_gen8,
184 sizeof(pp_pl3_load_save_pl3_gen8),
188 gen8_pp_plx_avs_initialize,
193 "NV12 Scaling module",
195 pp_nv12_scaling_gen8,
196 sizeof(pp_nv12_scaling_gen8),
200 gen8_pp_plx_avs_initialize,
208 sizeof(pp_nv12_avs_gen8),
212 gen8_pp_plx_avs_initialize,
220 sizeof(pp_nv12_dndi_gen8),
232 sizeof(pp_nv12_dn_gen8),
241 PP_NV12_LOAD_SAVE_PA,
242 pp_nv12_load_save_pa_gen8,
243 sizeof(pp_nv12_load_save_pa_gen8),
247 gen8_pp_plx_avs_initialize,
254 pp_pl3_load_save_pa_gen8,
255 sizeof(pp_pl3_load_save_pa_gen8),
259 gen8_pp_plx_avs_initialize,
265 PP_PA_LOAD_SAVE_NV12,
266 pp_pa_load_save_nv12_gen8,
267 sizeof(pp_pa_load_save_nv12_gen8),
271 gen8_pp_plx_avs_initialize,
278 pp_pa_load_save_pl3_gen8,
279 sizeof(pp_pa_load_save_pl3_gen8),
283 gen8_pp_plx_avs_initialize,
290 pp_pa_load_save_pa_gen8,
291 sizeof(pp_pa_load_save_pa_gen8),
295 gen8_pp_plx_avs_initialize,
301 PP_RGBX_LOAD_SAVE_NV12,
302 pp_rgbx_load_save_nv12_gen8,
303 sizeof(pp_rgbx_load_save_nv12_gen8),
307 gen8_pp_plx_avs_initialize,
313 PP_NV12_LOAD_SAVE_RGBX,
314 pp_nv12_load_save_rgbx_gen8,
315 sizeof(pp_nv12_load_save_rgbx_gen8),
319 gen8_pp_plx_avs_initialize,
323 #define MAX_SCALING_SURFACES 16
325 #define DEFAULT_MOCS 0
327 static const uint32_t pp_yuv420p8_scaling_gen8[][4] = {
328 #include "shaders/post_processing/gen8/conv_nv12.g8b"
331 static const uint32_t pp_8bit_420_rgb32_scaling_gen8[][4] = {
332 #include "shaders/post_processing/gen8/conv_8bit_420_rgb32.g8b"
335 struct i965_kernel pp_common_scaling_gen8[] = {
339 pp_yuv420p8_scaling_gen8,
340 sizeof(pp_yuv420p8_scaling_gen8),
347 pp_8bit_420_rgb32_scaling_gen8,
348 sizeof(pp_8bit_420_rgb32_scaling_gen8),
354 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
357 case I915_TILING_NONE:
358 ss->ss0.tiled_surface = 0;
359 ss->ss0.tile_walk = 0;
362 ss->ss0.tiled_surface = 1;
363 ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
366 ss->ss0.tiled_surface = 1;
367 ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
373 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
376 case I915_TILING_NONE:
377 ss->ss2.tiled_surface = 0;
378 ss->ss2.tile_walk = 0;
381 ss->ss2.tiled_surface = 1;
382 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
385 ss->ss2.tiled_surface = 1;
386 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
393 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
394 dri_bo *surf_bo, unsigned long surf_bo_offset,
395 int width, int height, int pitch, int format,
396 int index, int is_target)
398 struct i965_driver_data *i965 = i965_driver_data(ctx);
399 struct gen8_surface_state *ss;
402 unsigned int swizzle;
404 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
405 ss_bo = pp_context->surface_state_binding_table.bo;
408 dri_bo_map(ss_bo, True);
409 assert(ss_bo->virtual);
410 ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
411 memset(ss, 0, sizeof(*ss));
413 if (IS_GEN9(i965->intel.device_info) ||
414 IS_GEN10(i965->intel.device_info))
415 ss->ss1.surface_mocs = GEN9_CACHE_PTE;
417 ss->ss0.surface_type = I965_SURFACE_2D;
418 ss->ss0.surface_format = format;
419 ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
420 ss->ss2.width = width - 1;
421 ss->ss2.height = height - 1;
422 ss->ss3.pitch = pitch - 1;
424 /* Always set 1(align 4 mode) per B-spec */
425 ss->ss0.vertical_alignment = 1;
426 ss->ss0.horizontal_alignment = 1;
428 gen8_pp_set_surface_tiling(ss, tiling);
429 gen8_render_set_surface_scs(ss);
430 dri_bo_emit_reloc(ss_bo,
431 I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
433 SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
435 ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
441 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
442 dri_bo *surf_bo, unsigned long surf_bo_offset,
443 int width, int height, int wpitch,
444 int xoffset, int yoffset,
445 int format, int interleave_chroma,
448 struct i965_driver_data *i965 = i965_driver_data(ctx);
449 struct gen8_surface_state2 *ss2;
452 unsigned int swizzle;
454 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
455 ss2_bo = pp_context->surface_state_binding_table.bo;
458 dri_bo_map(ss2_bo, True);
459 assert(ss2_bo->virtual);
460 ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
461 memset(ss2, 0, sizeof(*ss2));
463 if (IS_GEN9(i965->intel.device_info) ||
464 IS_GEN10(i965->intel.device_info))
465 ss2->ss5.surface_object_mocs = GEN9_CACHE_PTE;
467 ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
468 ss2->ss1.cbcr_pixel_offset_v_direction = 0;
469 ss2->ss1.width = width - 1;
470 ss2->ss1.height = height - 1;
471 ss2->ss2.pitch = wpitch - 1;
472 ss2->ss2.interleave_chroma = interleave_chroma;
473 ss2->ss2.surface_format = format;
474 ss2->ss3.x_offset_for_cb = xoffset;
475 ss2->ss3.y_offset_for_cb = yoffset;
476 gen8_pp_set_surface2_tiling(ss2, tiling);
477 dri_bo_emit_reloc(ss2_bo,
478 I915_GEM_DOMAIN_RENDER, 0,
480 SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
482 ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
483 dri_bo_unmap(ss2_bo);
487 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
488 const struct i965_surface *surface,
489 int base_index, int is_target,
490 const VARectangle *rect,
491 int *width, int *height, int *pitch, int *offset)
493 struct object_surface *obj_surface;
494 struct object_image *obj_image;
496 int fourcc = pp_get_surface_fourcc(ctx, surface);
497 const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
499 if (fourcc_info == NULL)
502 if (surface->type == I965_SURFACE_TYPE_SURFACE) {
503 obj_surface = (struct object_surface *)surface->base;
504 bo = obj_surface->bo;
505 width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
506 height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
507 pitch[0] = obj_surface->width;
510 if (fourcc_info->num_planes == 1 && is_target)
511 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
513 width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
514 height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
515 pitch[1] = obj_surface->cb_cr_pitch;
516 offset[1] = obj_surface->y_cb_offset * obj_surface->width;
518 width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
519 height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
520 pitch[2] = obj_surface->cb_cr_pitch;
521 offset[2] = obj_surface->y_cr_offset * obj_surface->width;
525 /* FIXME: add support for ARGB/ABGR image */
526 obj_image = (struct object_image *)surface->base;
528 width[0] = MIN(rect->x + rect->width, obj_image->image.width);
529 height[0] = MIN(rect->y + rect->height, obj_image->image.height);
530 pitch[0] = obj_image->image.pitches[0];
531 offset[0] = obj_image->image.offsets[0];
533 if (fourcc_info->num_planes == 1) {
535 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
536 } else if (fourcc_info->num_planes == 2) {
539 assert(fourcc_info->num_components == 3);
541 U = fourcc_info->components[1].plane;
542 V = fourcc_info->components[2].plane;
543 assert((U == 1 && V == 2) ||
547 /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
548 width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
549 height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
550 pitch[1] = obj_image->image.pitches[U];
551 offset[1] = obj_image->image.offsets[U];
553 width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
554 height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
555 pitch[2] = obj_image->image.pitches[V];
556 offset[2] = obj_image->image.offsets[V];
560 gen8_pp_set_surface_state(ctx, pp_context,
562 ALIGN(width[0], 4) / 4, height[0], pitch[0],
563 I965_SURFACEFORMAT_R8_UINT,
566 if (fourcc_info->num_planes == 2) {
567 gen8_pp_set_surface_state(ctx, pp_context,
569 ALIGN(width[1], 2) / 2, height[1], pitch[1],
570 I965_SURFACEFORMAT_R8G8_SINT,
572 } else if (fourcc_info->num_planes == 3) {
573 gen8_pp_set_surface_state(ctx, pp_context,
575 ALIGN(width[1], 4) / 4, height[1], pitch[1],
576 I965_SURFACEFORMAT_R8_SINT,
578 gen8_pp_set_surface_state(ctx, pp_context,
580 ALIGN(width[2], 4) / 4, height[2], pitch[2],
581 I965_SURFACEFORMAT_R8_SINT,
585 if (fourcc_info->format == I965_COLOR_RGB) {
586 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
587 /* the format is MSB: X-B-G-R */
588 pp_static_parameter->grf2.save_avs_rgb_swap = 0;
589 if ((fourcc == VA_FOURCC_BGRA) ||
590 (fourcc == VA_FOURCC_BGRX)) {
591 /* It is stored as MSB: X-R-G-B */
592 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
596 int format0 = SURFACE_FORMAT_Y8_UNORM;
600 format0 = SURFACE_FORMAT_YCRCB_NORMAL;
604 format0 = SURFACE_FORMAT_YCRCB_SWAPY;
611 if (fourcc_info->format == I965_COLOR_RGB) {
612 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
613 /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
614 format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
615 pp_static_parameter->grf2.src_avs_rgb_swap = 0;
616 if ((fourcc == VA_FOURCC_BGRA) ||
617 (fourcc == VA_FOURCC_BGRX)) {
618 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
622 gen8_pp_set_surface2_state(ctx, pp_context,
624 width[0], height[0], pitch[0],
629 if (fourcc_info->num_planes == 2) {
630 gen8_pp_set_surface2_state(ctx, pp_context,
632 width[1], height[1], pitch[1],
634 SURFACE_FORMAT_R8B8_UNORM, 0,
636 } else if (fourcc_info->num_planes == 3) {
637 gen8_pp_set_surface2_state(ctx, pp_context,
639 width[1], height[1], pitch[1],
641 SURFACE_FORMAT_R8_UNORM, 0,
643 gen8_pp_set_surface2_state(ctx, pp_context,
645 width[2], height[2], pitch[2],
647 SURFACE_FORMAT_R8_UNORM, 0,
651 gen8_pp_set_surface_state(ctx, pp_context,
653 ALIGN(width[0], 4) / 4, height[0], pitch[0],
654 I965_SURFACEFORMAT_R8_UINT,
657 if (fourcc_info->num_planes == 2) {
658 gen8_pp_set_surface_state(ctx, pp_context,
660 ALIGN(width[1], 2) / 2, height[1], pitch[1],
661 I965_SURFACEFORMAT_R8G8_SINT,
663 } else if (fourcc_info->num_planes == 3) {
664 gen8_pp_set_surface_state(ctx, pp_context,
666 ALIGN(width[1], 4) / 4, height[1], pitch[1],
667 I965_SURFACEFORMAT_R8_SINT,
669 gen8_pp_set_surface_state(ctx, pp_context,
671 ALIGN(width[2], 4) / 4, height[2], pitch[2],
672 I965_SURFACEFORMAT_R8_SINT,
679 pp_null_x_steps(void *private_context)
685 pp_null_y_steps(void *private_context)
691 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
697 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
698 const struct i965_surface *src_surface,
699 const VARectangle *src_rect,
700 struct i965_surface *dst_surface,
701 const VARectangle *dst_rect,
704 /* private function & data */
705 pp_context->pp_x_steps = pp_null_x_steps;
706 pp_context->pp_y_steps = pp_null_y_steps;
707 pp_context->private_context = NULL;
708 pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
710 dst_surface->flags = src_surface->flags;
712 return VA_STATUS_SUCCESS;
715 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
717 int i, dst_width_adjust;
718 /* x offset of dest surface must be dword aligned.
719 * so we have to extend dst surface on left edge, and mask out pixels not interested
721 if (dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT) {
722 pp_context->block_horizontal_mask_left = 0;
723 for (i = dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT; i < GPU_ASM_BLOCK_WIDTH; i++) {
724 pp_context->block_horizontal_mask_left |= 1 << i;
727 pp_context->block_horizontal_mask_left = 0xffff;
730 dst_width_adjust = dst_rect->width + dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT;
731 if (dst_width_adjust % GPU_ASM_BLOCK_WIDTH) {
732 pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust % GPU_ASM_BLOCK_WIDTH)) - 1;
734 pp_context->block_horizontal_mask_right = 0xffff;
737 if (dst_rect->height % GPU_ASM_BLOCK_HEIGHT) {
738 pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height % GPU_ASM_BLOCK_HEIGHT)) - 1;
740 pp_context->block_vertical_mask_bottom = 0xff;
746 gen7_pp_avs_x_steps(void *private_context)
748 struct pp_avs_context *pp_avs_context = private_context;
750 return pp_avs_context->dest_w / 16;
754 gen7_pp_avs_y_steps(void *private_context)
756 struct pp_avs_context *pp_avs_context = private_context;
758 return pp_avs_context->dest_h / 16;
762 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
764 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
765 struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
767 pp_inline_parameter->grf9.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
768 pp_inline_parameter->grf9.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
769 pp_inline_parameter->grf9.constant_0 = 0xffffffff;
770 pp_inline_parameter->grf9.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
775 static void gen7_update_src_surface_uv_offset(VADriverContextP ctx,
776 struct i965_post_processing_context *pp_context,
777 const struct i965_surface *surface)
779 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
780 int fourcc = pp_get_surface_fourcc(ctx, surface);
782 if (fourcc == VA_FOURCC_YUY2) {
783 pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
784 pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
785 pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
786 } else if (fourcc == VA_FOURCC_UYVY) {
787 pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
788 pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
789 pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
793 static const AVSConfig gen8_avs_config = {
794 .coeff_frac_bits = 6,
795 .coeff_epsilon = 1.0f / (1U << 6),
797 .num_luma_coeffs = 8,
798 .num_chroma_coeffs = 4,
802 .y_k_h = { -2, -2, -2, -2, -2, -2, -2, -2 },
803 .y_k_v = { -2, -2, -2, -2, -2, -2, -2, -2 },
804 .uv_k_h = { -1, -2, -2, -1 },
805 .uv_k_v = { -1, -2, -2, -1 },
808 .y_k_h = { 2, 2, 2, 2, 2, 2, 2, 2 },
809 .y_k_v = { 2, 2, 2, 2, 2, 2, 2, 2 },
810 .uv_k_h = { 1, 2, 2, 1 },
811 .uv_k_v = { 1, 2, 2, 1 },
817 gen8_pp_get_8tap_filter_mode(VADriverContextP ctx,
818 const struct i965_surface *surface)
820 int fourcc = pp_get_surface_fourcc(ctx, surface);
822 if (fourcc == VA_FOURCC_YUY2 ||
823 fourcc == VA_FOURCC_UYVY)
830 gen8_pp_kernel_use_media_read_msg(VADriverContextP ctx,
831 const struct i965_surface *src_surface,
832 const VARectangle *src_rect,
833 const struct i965_surface *dst_surface,
834 const VARectangle *dst_rect)
836 int src_fourcc = pp_get_surface_fourcc(ctx, src_surface);
837 int dst_fourcc = pp_get_surface_fourcc(ctx, dst_surface);
838 const i965_fourcc_info *src_fourcc_info = get_fourcc_info(src_fourcc);
839 const i965_fourcc_info *dst_fourcc_info = get_fourcc_info(dst_fourcc);
841 if (!src_fourcc_info ||
842 src_fourcc_info->subsampling != SUBSAMPLE_YUV420 ||
844 dst_fourcc_info->subsampling != SUBSAMPLE_YUV420)
847 if (src_rect->x == dst_rect->x &&
848 src_rect->y == dst_rect->y &&
849 src_rect->width == dst_rect->width &&
850 src_rect->height == dst_rect->height)
857 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
858 const struct i965_surface *src_surface,
859 const VARectangle *src_rect,
860 struct i965_surface *dst_surface,
861 const VARectangle *dst_rect,
864 /* TODO: Add the sampler_8x8 state */
865 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
866 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
867 struct gen8_sampler_8x8_avs *sampler_8x8;
869 int width[3], height[3], pitch[3], offset[3];
870 int src_width, src_height;
871 unsigned char *cc_ptr;
872 AVSState * const avs = &pp_avs_context->state;
874 const float * yuv_to_rgb_coefs;
875 size_t yuv_to_rgb_coefs_size;
877 memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
880 gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
882 width, height, pitch, offset);
883 src_height = height[0];
884 src_width = width[0];
886 /* destination surface */
887 gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
889 width, height, pitch, offset);
891 /* sampler 8x8 state */
892 dri_bo_map(pp_context->dynamic_state.bo, True);
893 assert(pp_context->dynamic_state.bo->virtual);
895 cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
896 pp_context->sampler_offset;
897 /* Currently only one gen8 sampler_8x8 is initialized */
898 sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
899 memset(sampler_8x8, 0, sizeof(*sampler_8x8));
901 sampler_8x8->dw0.gain_factor = 44;
902 sampler_8x8->dw0.weak_edge_threshold = 1;
903 sampler_8x8->dw0.strong_edge_threshold = 8;
904 /* Use the value like that on Ivy instead of default
905 * sampler_8x8->dw0.r3x_coefficient = 5;
907 sampler_8x8->dw0.r3x_coefficient = 27;
908 sampler_8x8->dw0.r3c_coefficient = 5;
910 sampler_8x8->dw2.global_noise_estimation = 255;
911 sampler_8x8->dw2.non_edge_weight = 1;
912 sampler_8x8->dw2.regular_weight = 2;
913 sampler_8x8->dw2.strong_edge_weight = 7;
914 /* Use the value like that on Ivy instead of default
915 * sampler_8x8->dw2.r5x_coefficient = 7;
916 * sampler_8x8->dw2.r5cx_coefficient = 7;
917 * sampler_8x8->dw2.r5c_coefficient = 7;
919 sampler_8x8->dw2.r5x_coefficient = 9;
920 sampler_8x8->dw2.r5cx_coefficient = 8;
921 sampler_8x8->dw2.r5c_coefficient = 3;
923 sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
924 sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
925 sampler_8x8->dw3.sat_max = 0x1f;
926 sampler_8x8->dw3.hue_max = 14;
927 /* The 8tap filter will determine whether the adaptive Filter is
928 * applied for all channels(dw153).
929 * If the 8tap filter is disabled, the adaptive filter should be disabled.
930 * Only when 8tap filter is enabled, it can be enabled or not.
932 sampler_8x8->dw3.enable_8tap_filter = gen8_pp_get_8tap_filter_mode(ctx, src_surface);
933 sampler_8x8->dw3.ief4_smooth_enable = 0;
935 sampler_8x8->dw4.s3u = 0;
936 sampler_8x8->dw4.diamond_margin = 4;
937 sampler_8x8->dw4.vy_std_enable = 0;
938 sampler_8x8->dw4.umid = 110;
939 sampler_8x8->dw4.vmid = 154;
941 sampler_8x8->dw5.diamond_dv = 0;
942 sampler_8x8->dw5.diamond_th = 35;
943 sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
944 sampler_8x8->dw5.hs_margin = 3;
945 sampler_8x8->dw5.diamond_du = 2;
947 sampler_8x8->dw6.y_point1 = 46;
948 sampler_8x8->dw6.y_point2 = 47;
949 sampler_8x8->dw6.y_point3 = 254;
950 sampler_8x8->dw6.y_point4 = 255;
952 sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
954 sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
955 sampler_8x8->dw8.p0l = 46;
956 sampler_8x8->dw8.p1l = 216;
958 sampler_8x8->dw9.p2l = 236;
959 sampler_8x8->dw9.p3l = 236;
960 sampler_8x8->dw9.b0l = 133;
961 sampler_8x8->dw9.b1l = 130;
963 sampler_8x8->dw10.b2l = 130;
964 sampler_8x8->dw10.b3l = 130;
965 /* s0l = -5 / 256. s2.8 */
966 sampler_8x8->dw10.s0l = 1029; /* s0l = 0 */
967 sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
969 sampler_8x8->dw11.s1l = 0;
970 sampler_8x8->dw11.s2l = 0;
972 sampler_8x8->dw12.s3l = 0;
973 sampler_8x8->dw12.p0u = 46;
974 sampler_8x8->dw12.p1u = 66;
975 sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
977 sampler_8x8->dw13.p2u = 130;
978 sampler_8x8->dw13.p3u = 236;
979 sampler_8x8->dw13.b0u = 143;
980 sampler_8x8->dw13.b1u = 163;
982 sampler_8x8->dw14.b2u = 200;
983 sampler_8x8->dw14.b3u = 140;
984 sampler_8x8->dw14.s0u = 256; /* s0u = 0 */
986 sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
987 sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
989 sx = (float)dst_rect->width / src_rect->width;
990 sy = (float)dst_rect->height / src_rect->height;
991 avs_update_coefficients(avs, sx, sy, pp_context->filter_flags);
993 assert(avs->config->num_phases >= 16);
994 for (i = 0; i <= 16; i++) {
995 struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
996 &sampler_8x8->coefficients[i];
997 const AVSCoeffs * const coeffs = &avs->coeffs[i];
999 sampler_8x8_state->dw0.table_0x_filter_c0 =
1000 intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
1001 sampler_8x8_state->dw0.table_0y_filter_c0 =
1002 intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
1003 sampler_8x8_state->dw0.table_0x_filter_c1 =
1004 intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1005 sampler_8x8_state->dw0.table_0y_filter_c1 =
1006 intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1008 sampler_8x8_state->dw1.table_0x_filter_c2 =
1009 intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1010 sampler_8x8_state->dw1.table_0y_filter_c2 =
1011 intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1012 sampler_8x8_state->dw1.table_0x_filter_c3 =
1013 intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1014 sampler_8x8_state->dw1.table_0y_filter_c3 =
1015 intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1017 sampler_8x8_state->dw2.table_0x_filter_c4 =
1018 intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1019 sampler_8x8_state->dw2.table_0y_filter_c4 =
1020 intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1021 sampler_8x8_state->dw2.table_0x_filter_c5 =
1022 intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1023 sampler_8x8_state->dw2.table_0y_filter_c5 =
1024 intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1026 sampler_8x8_state->dw3.table_0x_filter_c6 =
1027 intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1028 sampler_8x8_state->dw3.table_0y_filter_c6 =
1029 intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1030 sampler_8x8_state->dw3.table_0x_filter_c7 =
1031 intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1032 sampler_8x8_state->dw3.table_0y_filter_c7 =
1033 intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1035 sampler_8x8_state->dw4.pad0 = 0;
1036 sampler_8x8_state->dw5.pad0 = 0;
1037 sampler_8x8_state->dw4.table_1x_filter_c2 =
1038 intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1039 sampler_8x8_state->dw4.table_1x_filter_c3 =
1040 intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1041 sampler_8x8_state->dw5.table_1x_filter_c4 =
1042 intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1043 sampler_8x8_state->dw5.table_1x_filter_c5 =
1044 intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1046 sampler_8x8_state->dw6.pad0 =
1047 sampler_8x8_state->dw7.pad0 =
1048 sampler_8x8_state->dw6.table_1y_filter_c2 =
1049 intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1050 sampler_8x8_state->dw6.table_1y_filter_c3 =
1051 intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1052 sampler_8x8_state->dw7.table_1y_filter_c4 =
1053 intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1054 sampler_8x8_state->dw7.table_1y_filter_c5 =
1055 intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1058 sampler_8x8->dw152.default_sharpness_level =
1059 -avs_is_needed(pp_context->filter_flags);
1060 sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
1061 sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
1062 sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
1064 for (; i <= avs->config->num_phases; i++) {
1065 struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
1066 &sampler_8x8->coefficients1[i - 17];
1067 const AVSCoeffs * const coeffs = &avs->coeffs[i];
1069 sampler_8x8_state->dw0.table_0x_filter_c0 =
1070 intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
1071 sampler_8x8_state->dw0.table_0y_filter_c0 =
1072 intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
1073 sampler_8x8_state->dw0.table_0x_filter_c1 =
1074 intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1075 sampler_8x8_state->dw0.table_0y_filter_c1 =
1076 intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1078 sampler_8x8_state->dw1.table_0x_filter_c2 =
1079 intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1080 sampler_8x8_state->dw1.table_0y_filter_c2 =
1081 intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1082 sampler_8x8_state->dw1.table_0x_filter_c3 =
1083 intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1084 sampler_8x8_state->dw1.table_0y_filter_c3 =
1085 intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1087 sampler_8x8_state->dw2.table_0x_filter_c4 =
1088 intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1089 sampler_8x8_state->dw2.table_0y_filter_c4 =
1090 intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1091 sampler_8x8_state->dw2.table_0x_filter_c5 =
1092 intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1093 sampler_8x8_state->dw2.table_0y_filter_c5 =
1094 intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1096 sampler_8x8_state->dw3.table_0x_filter_c6 =
1097 intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1098 sampler_8x8_state->dw3.table_0y_filter_c6 =
1099 intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1100 sampler_8x8_state->dw3.table_0x_filter_c7 =
1101 intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1102 sampler_8x8_state->dw3.table_0y_filter_c7 =
1103 intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1105 sampler_8x8_state->dw4.pad0 = 0;
1106 sampler_8x8_state->dw5.pad0 = 0;
1107 sampler_8x8_state->dw4.table_1x_filter_c2 =
1108 intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1109 sampler_8x8_state->dw4.table_1x_filter_c3 =
1110 intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1111 sampler_8x8_state->dw5.table_1x_filter_c4 =
1112 intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1113 sampler_8x8_state->dw5.table_1x_filter_c5 =
1114 intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1116 sampler_8x8_state->dw6.pad0 =
1117 sampler_8x8_state->dw7.pad0 =
1118 sampler_8x8_state->dw6.table_1y_filter_c2 =
1119 intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1120 sampler_8x8_state->dw6.table_1y_filter_c3 =
1121 intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1122 sampler_8x8_state->dw7.table_1y_filter_c4 =
1123 intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1124 sampler_8x8_state->dw7.table_1y_filter_c5 =
1125 intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1128 dri_bo_unmap(pp_context->dynamic_state.bo);
1131 /* private function & data */
1132 pp_context->pp_x_steps = gen7_pp_avs_x_steps;
1133 pp_context->pp_y_steps = gen7_pp_avs_y_steps;
1134 pp_context->private_context = &pp_context->pp_avs_context;
1135 pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
1137 int dst_left_edge_extend = dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT;
1138 pp_avs_context->dest_x = dst_rect->x - dst_left_edge_extend;
1139 pp_avs_context->dest_y = dst_rect->y;
1140 pp_avs_context->dest_w = ALIGN(dst_rect->width + dst_left_edge_extend, 16);
1141 pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
1142 pp_avs_context->src_w = src_rect->width;
1143 pp_avs_context->src_h = src_rect->height;
1144 pp_avs_context->horiz_range = (float)src_rect->width / src_width;
1146 int dw = (pp_avs_context->src_w - 1) / 16 + 1;
1147 dw = MAX(dw, dst_rect->width + dst_left_edge_extend);
1149 pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
1150 pp_static_parameter->grf2.avs_wa_enable = gen8_pp_kernel_use_media_read_msg(ctx,
1151 src_surface, src_rect,
1152 dst_surface, dst_rect); /* reuse this flag for media block reading on gen8+ */
1153 pp_static_parameter->grf2.alpha = 255;
1155 pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
1156 pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
1157 pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
1158 (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
1159 pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
1160 (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
1162 gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
1164 yuv_to_rgb_coefs = i915_color_standard_to_coefs(i915_filter_to_color_standard(src_surface->flags &
1166 &yuv_to_rgb_coefs_size);
1167 memcpy(&pp_static_parameter->grf7, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
1169 dst_surface->flags = src_surface->flags;
1171 return VA_STATUS_SUCCESS;
1176 VADriverContextP ctx,
1177 struct i965_post_processing_context *pp_context,
1178 const struct i965_surface *src_surface,
1179 const VARectangle *src_rect,
1180 struct i965_surface *dst_surface,
1181 const VARectangle *dst_rect,
1187 struct i965_driver_data *i965 = i965_driver_data(ctx);
1190 unsigned int end_offset;
1191 struct pp_module *pp_module;
1192 int static_param_size, inline_param_size;
1194 dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1195 bo = dri_bo_alloc(i965->intel.bufmgr,
1196 "surface state & binding table",
1197 (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1200 pp_context->surface_state_binding_table.bo = bo;
1202 pp_context->idrt.num_interface_descriptors = 0;
1204 pp_context->sampler_size = 4 * 4096;
1206 bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1207 + pp_context->idrt_size;
1209 dri_bo_unreference(pp_context->dynamic_state.bo);
1210 bo = dri_bo_alloc(i965->intel.bufmgr,
1216 pp_context->dynamic_state.bo = bo;
1217 pp_context->dynamic_state.bo_size = bo_size;
1220 pp_context->dynamic_state.end_offset = 0;
1222 /* Constant buffer offset */
1223 pp_context->curbe_offset = ALIGN(end_offset, 64);
1224 end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1226 /* Interface descriptor offset */
1227 pp_context->idrt_offset = ALIGN(end_offset, 64);
1228 end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1230 /* Sampler state offset */
1231 pp_context->sampler_offset = ALIGN(end_offset, 64);
1232 end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1234 /* update the end offset of dynamic_state */
1235 pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1237 static_param_size = sizeof(struct gen7_pp_static_parameter);
1238 inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1240 memset(pp_context->pp_static_parameter, 0, static_param_size);
1241 memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1243 assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1244 pp_context->current_pp = pp_index;
1245 pp_module = &pp_context->pp_modules[pp_index];
1247 if (pp_module->initialize)
1248 va_status = pp_module->initialize(ctx, pp_context,
1255 va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1257 calculate_boundary_block_mask(pp_context, dst_rect);
1263 gen8_pp_interface_descriptor_table(VADriverContextP ctx,
1264 struct i965_post_processing_context *pp_context)
1266 struct gen8_interface_descriptor_data *desc;
1268 int pp_index = pp_context->current_pp;
1269 unsigned char *cc_ptr;
1271 bo = pp_context->dynamic_state.bo;
1274 assert(bo->virtual);
1275 cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1277 desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1278 pp_context->idrt.num_interface_descriptors;
1280 memset(desc, 0, sizeof(*desc));
1281 desc->desc0.kernel_start_pointer =
1282 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1283 desc->desc2.single_program_flow = 1;
1284 desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1285 desc->desc3.sampler_count = 0; /* 1 - 4 samplers used */
1286 desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1287 desc->desc4.binding_table_entry_count = 0;
1288 desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1289 desc->desc5.constant_urb_entry_read_offset = 0;
1291 desc->desc5.constant_urb_entry_read_length = 8; /* grf 1-8 */
1294 pp_context->idrt.num_interface_descriptors++;
1299 gen8_pp_upload_constants(VADriverContextP ctx,
1300 struct i965_post_processing_context *pp_context)
1302 unsigned char *constant_buffer;
1305 assert(sizeof(struct gen7_pp_static_parameter) == 256);
1307 param_size = sizeof(struct gen7_pp_static_parameter);
1309 dri_bo_map(pp_context->dynamic_state.bo, 1);
1310 assert(pp_context->dynamic_state.bo->virtual);
1311 constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1312 pp_context->curbe_offset;
1314 memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1315 dri_bo_unmap(pp_context->dynamic_state.bo);
1320 gen8_pp_states_setup(VADriverContextP ctx,
1321 struct i965_post_processing_context *pp_context)
1323 gen8_pp_interface_descriptor_table(ctx, pp_context);
1324 gen8_pp_upload_constants(ctx, pp_context);
1328 gen6_pp_pipeline_select(VADriverContextP ctx,
1329 struct i965_post_processing_context *pp_context)
1331 struct intel_batchbuffer *batch = pp_context->batch;
1333 BEGIN_BATCH(batch, 1);
1334 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1335 ADVANCE_BATCH(batch);
1339 gen8_pp_state_base_address(VADriverContextP ctx,
1340 struct i965_post_processing_context *pp_context)
1342 struct intel_batchbuffer *batch = pp_context->batch;
1344 BEGIN_BATCH(batch, 16);
1345 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1346 /* DW1 Generate state address */
1347 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1348 OUT_BATCH(batch, 0);
1349 OUT_BATCH(batch, 0);
1351 /* DW4-5. Surface state address */
1352 OUT_RELOC64(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1354 /* DW6-7. Dynamic state address */
1355 OUT_RELOC64(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1356 0, 0 | BASE_ADDRESS_MODIFY);
1358 /* DW8. Indirect object address */
1359 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1360 OUT_BATCH(batch, 0);
1362 /* DW10-11. Instruction base address */
1363 OUT_RELOC64(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1365 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1366 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1367 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1368 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1369 ADVANCE_BATCH(batch);
1373 gen8_pp_vfe_state(VADriverContextP ctx,
1374 struct i965_post_processing_context *pp_context)
1376 struct intel_batchbuffer *batch = pp_context->batch;
1378 BEGIN_BATCH(batch, 9);
1379 OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1380 OUT_BATCH(batch, 0);
1381 OUT_BATCH(batch, 0);
1383 (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1384 pp_context->vfe_gpu_state.num_urb_entries << 8);
1385 OUT_BATCH(batch, 0);
1387 (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1388 /* URB Entry Allocation Size, in 256 bits unit */
1389 (pp_context->vfe_gpu_state.curbe_allocation_size));
1390 /* CURBE Allocation Size, in 256 bits unit */
1391 OUT_BATCH(batch, 0);
1392 OUT_BATCH(batch, 0);
1393 OUT_BATCH(batch, 0);
1394 ADVANCE_BATCH(batch);
1398 gen8_interface_descriptor_load(VADriverContextP ctx,
1399 struct i965_post_processing_context *pp_context)
1401 struct intel_batchbuffer *batch = pp_context->batch;
1403 BEGIN_BATCH(batch, 6);
1405 OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1406 OUT_BATCH(batch, 0);
1408 OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1409 OUT_BATCH(batch, 0);
1411 pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1412 OUT_BATCH(batch, pp_context->idrt_offset);
1413 ADVANCE_BATCH(batch);
1417 gen8_pp_curbe_load(VADriverContextP ctx,
1418 struct i965_post_processing_context *pp_context)
1420 struct intel_batchbuffer *batch = pp_context->batch;
1421 int param_size = 64;
1423 param_size = sizeof(struct gen7_pp_static_parameter);
1425 BEGIN_BATCH(batch, 4);
1426 OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1427 OUT_BATCH(batch, 0);
1430 OUT_BATCH(batch, pp_context->curbe_offset);
1431 ADVANCE_BATCH(batch);
1435 gen8_pp_object_walker(VADriverContextP ctx,
1436 struct i965_post_processing_context *pp_context)
1438 struct i965_driver_data *i965 = i965_driver_data(ctx);
1439 struct intel_batchbuffer *batch = pp_context->batch;
1440 int x, x_steps, y, y_steps;
1441 int param_size, command_length_in_dws, extra_cmd_in_dws;
1442 dri_bo *command_buffer;
1443 unsigned int *command_ptr;
1445 param_size = sizeof(struct gen7_pp_inline_parameter);
1447 x_steps = pp_context->pp_x_steps(pp_context->private_context);
1448 y_steps = pp_context->pp_y_steps(pp_context->private_context);
1449 command_length_in_dws = 6 + (param_size >> 2);
1450 extra_cmd_in_dws = 2;
1451 command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1452 "command objects buffer",
1453 (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1456 dri_bo_map(command_buffer, 1);
1457 command_ptr = command_buffer->virtual;
1459 for (y = 0; y < y_steps; y++) {
1460 for (x = 0; x < x_steps; x++) {
1461 if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1463 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1469 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1470 command_ptr += (param_size >> 2);
1472 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1478 if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1481 *command_ptr++ = MI_BATCH_BUFFER_END;
1484 dri_bo_unmap(command_buffer);
1486 BEGIN_BATCH(batch, 3);
1487 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1488 OUT_RELOC64(batch, command_buffer,
1489 I915_GEM_DOMAIN_COMMAND, 0, 0);
1490 ADVANCE_BATCH(batch);
1492 dri_bo_unreference(command_buffer);
1494 /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1495 * will cause control to pass back to ring buffer
1497 intel_batchbuffer_end_atomic(batch);
1498 intel_batchbuffer_flush(batch);
1499 intel_batchbuffer_start_atomic(batch, 0x1000);
1503 gen8_pp_pipeline_setup(VADriverContextP ctx,
1504 struct i965_post_processing_context *pp_context)
1506 struct intel_batchbuffer *batch = pp_context->batch;
1508 intel_batchbuffer_start_atomic(batch, 0x1000);
1509 intel_batchbuffer_emit_mi_flush(batch);
1510 gen6_pp_pipeline_select(ctx, pp_context);
1511 gen8_pp_state_base_address(ctx, pp_context);
1512 gen8_pp_vfe_state(ctx, pp_context);
1513 gen8_pp_curbe_load(ctx, pp_context);
1514 gen8_interface_descriptor_load(ctx, pp_context);
1515 gen8_pp_vfe_state(ctx, pp_context);
1516 gen8_pp_object_walker(ctx, pp_context);
1517 intel_batchbuffer_end_atomic(batch);
1521 gen8_post_processing(
1522 VADriverContextP ctx,
1523 struct i965_post_processing_context *pp_context,
1524 const struct i965_surface *src_surface,
1525 const VARectangle *src_rect,
1526 struct i965_surface *dst_surface,
1527 const VARectangle *dst_rect,
1534 va_status = gen8_pp_initialize(ctx, pp_context,
1542 if (va_status == VA_STATUS_SUCCESS) {
1543 gen8_pp_states_setup(ctx, pp_context);
1544 gen8_pp_pipeline_setup(ctx, pp_context);
1551 gen8_post_processing_context_finalize(VADriverContextP ctx,
1552 struct i965_post_processing_context *pp_context)
1554 if (pp_context->scaling_gpe_context_initialized) {
1555 gen8_gpe_context_destroy(&pp_context->scaling_gpe_context);
1556 pp_context->scaling_gpe_context_initialized = 0;
1559 if (pp_context->vebox_proc_ctx) {
1560 gen75_vebox_context_destroy(ctx, pp_context->vebox_proc_ctx);
1561 pp_context->vebox_proc_ctx = NULL;
1564 dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1565 pp_context->surface_state_binding_table.bo = NULL;
1567 dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1568 pp_context->pp_dn_context.stmm_bo = NULL;
1570 if (pp_context->instruction_state.bo) {
1571 dri_bo_unreference(pp_context->instruction_state.bo);
1572 pp_context->instruction_state.bo = NULL;
1575 if (pp_context->indirect_state.bo) {
1576 dri_bo_unreference(pp_context->indirect_state.bo);
1577 pp_context->indirect_state.bo = NULL;
1580 if (pp_context->dynamic_state.bo) {
1581 dri_bo_unreference(pp_context->dynamic_state.bo);
1582 pp_context->dynamic_state.bo = NULL;
1585 free(pp_context->pp_static_parameter);
1586 free(pp_context->pp_inline_parameter);
1587 pp_context->pp_static_parameter = NULL;
1588 pp_context->pp_inline_parameter = NULL;
1591 #define VPP_CURBE_ALLOCATION_SIZE 32
1594 gen8_post_processing_context_common_init(VADriverContextP ctx,
1596 struct pp_module *pp_modules,
1598 struct intel_batchbuffer *batch)
1600 struct i965_driver_data *i965 = i965_driver_data(ctx);
1602 unsigned int kernel_offset, end_offset;
1603 unsigned char *kernel_ptr;
1604 struct pp_module *pp_module;
1605 struct i965_post_processing_context *pp_context = data;
1607 if (i965->intel.eu_total > 0)
1608 pp_context->vfe_gpu_state.max_num_threads = 6 * i965->intel.eu_total;
1610 pp_context->vfe_gpu_state.max_num_threads = 60;
1611 pp_context->vfe_gpu_state.num_urb_entries = 59;
1612 pp_context->vfe_gpu_state.gpgpu_mode = 0;
1613 pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1614 pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1616 pp_context->intel_post_processing = gen8_post_processing;
1617 pp_context->finalize = gen8_post_processing_context_finalize;
1619 assert(ARRAY_ELEMS(pp_context->pp_modules) == num_pp_modules);
1621 memcpy(pp_context->pp_modules, pp_modules, sizeof(pp_context->pp_modules));
1623 kernel_size = 4096 ;
1625 for (i = 0; i < NUM_PP_MODULES; i++) {
1626 pp_module = &pp_context->pp_modules[i];
1628 if (pp_module->kernel.bin && pp_module->kernel.size) {
1629 kernel_size += pp_module->kernel.size;
1633 pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1637 if (pp_context->instruction_state.bo == NULL) {
1638 WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1642 assert(pp_context->instruction_state.bo);
1645 pp_context->instruction_state.bo_size = kernel_size;
1646 pp_context->instruction_state.end_offset = 0;
1649 dri_bo_map(pp_context->instruction_state.bo, 1);
1650 kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1652 for (i = 0; i < NUM_PP_MODULES; i++) {
1653 pp_module = &pp_context->pp_modules[i];
1655 kernel_offset = ALIGN(end_offset, 64);
1656 pp_module->kernel.kernel_offset = kernel_offset;
1658 if (pp_module->kernel.bin && pp_module->kernel.size) {
1660 memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1661 end_offset = kernel_offset + pp_module->kernel.size;
1665 pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1667 dri_bo_unmap(pp_context->instruction_state.bo);
1669 /* static & inline parameters */
1670 pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1671 pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1673 pp_context->batch = batch;
1675 pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1676 pp_context->curbe_size = 256;
1681 gen8_post_processing_context_init(VADriverContextP ctx,
1683 struct intel_batchbuffer *batch)
1685 struct i965_driver_data *i965 = i965_driver_data(ctx);
1686 struct i965_post_processing_context *pp_context = data;
1687 struct i965_gpe_context *gpe_context;
1689 gen8_post_processing_context_common_init(ctx, data, pp_modules_gen8, ARRAY_ELEMS(pp_modules_gen8), batch);
1690 avs_init_state(&pp_context->pp_avs_context.state, &gen8_avs_config);
1692 /* initialize the YUV420 8-Bit scaling context. The below is supported.
1698 gpe_context = &pp_context->scaling_gpe_context;
1699 gen8_gpe_load_kernels(ctx, gpe_context, pp_common_scaling_gen8, ARRAY_ELEMS(pp_common_scaling_gen8));
1700 gpe_context->idrt.entry_size = ALIGN(sizeof(struct gen8_interface_descriptor_data), 64);
1701 gpe_context->idrt.max_entries = ALIGN(ARRAY_ELEMS(pp_common_scaling_gen8), 2);
1702 gpe_context->sampler.entry_size = ALIGN(sizeof(struct gen8_sampler_state), 64);
1703 gpe_context->sampler.max_entries = 1;
1704 gpe_context->curbe.length = ALIGN(sizeof(struct scaling_input_parameter), 32);
1706 gpe_context->surface_state_binding_table.max_entries = MAX_SCALING_SURFACES;
1707 gpe_context->surface_state_binding_table.binding_table_offset = 0;
1708 gpe_context->surface_state_binding_table.surface_state_offset = ALIGN(MAX_SCALING_SURFACES * 4, 64);
1709 gpe_context->surface_state_binding_table.length = ALIGN(MAX_SCALING_SURFACES * 4, 64) + ALIGN(MAX_SCALING_SURFACES * SURFACE_STATE_PADDED_SIZE_GEN8, 64);
1711 if (i965->intel.eu_total > 0) {
1712 gpe_context->vfe_state.max_num_threads = i965->intel.eu_total * 6;
1714 if (i965->intel.has_bsd2)
1715 gpe_context->vfe_state.max_num_threads = 300;
1717 gpe_context->vfe_state.max_num_threads = 60;
1720 gpe_context->vfe_state.curbe_allocation_size = 37;
1721 gpe_context->vfe_state.urb_entry_size = 16;
1722 if (i965->intel.has_bsd2)
1723 gpe_context->vfe_state.num_urb_entries = 127;
1725 gpe_context->vfe_state.num_urb_entries = 64;
1727 gpe_context->vfe_state.gpgpu_mode = 0;
1729 gen8_gpe_context_init(ctx, gpe_context);
1730 pp_context->scaling_gpe_context_initialized |= (VPPGPE_8BIT_8BIT | VPPGPE_8BIT_420_RGB32);
1736 gen8_run_kernel_media_object_walker(VADriverContextP ctx,
1737 struct intel_batchbuffer *batch,
1738 struct i965_gpe_context *gpe_context,
1739 struct gpe_media_object_walker_parameter *param)
1741 if (!batch || !gpe_context || !param)
1744 intel_batchbuffer_start_atomic(batch, 0x1000);
1746 intel_batchbuffer_emit_mi_flush(batch);
1748 gen8_gpe_pipeline_setup(ctx, gpe_context, batch);
1749 gen8_gpe_media_object_walker(ctx, gpe_context, batch, param);
1750 gen8_gpe_media_state_flush(ctx, gpe_context, batch);
1753 intel_batchbuffer_end_atomic(batch);
1755 intel_batchbuffer_flush(batch);
1760 gen8_add_dri_buffer_2d_gpe_surface(VADriverContextP ctx,
1761 struct i965_gpe_context *gpe_context,
1763 unsigned int bo_offset,
1765 unsigned int height,
1767 int is_media_block_rw,
1768 unsigned int format,
1772 struct i965_gpe_resource gpe_resource;
1773 struct i965_gpe_surface gpe_surface;
1775 i965_dri_object_to_2d_gpe_resource(&gpe_resource, bo, width, height, pitch);
1776 memset(&gpe_surface, 0, sizeof(gpe_surface));
1777 gpe_surface.gpe_resource = &gpe_resource;
1778 gpe_surface.is_2d_surface = 1;
1779 gpe_surface.is_media_block_rw = !!is_media_block_rw;
1780 gpe_surface.cacheability_control = DEFAULT_MOCS;
1781 gpe_surface.format = format;
1782 gpe_surface.is_override_offset = 1;
1783 gpe_surface.offset = bo_offset;
1784 gpe_surface.is_16bpp = is_10bit;
1786 gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
1788 i965_free_gpe_resource(&gpe_resource);
1792 gen8_vpp_scaling_sample_state(VADriverContextP ctx,
1793 struct i965_gpe_context *gpe_context,
1794 VARectangle *src_rect,
1795 VARectangle *dst_rect)
1797 struct gen8_sampler_state *sampler_state;
1799 if (gpe_context == NULL || !src_rect || !dst_rect)
1801 dri_bo_map(gpe_context->sampler.bo, 1);
1803 if (gpe_context->sampler.bo->virtual == NULL)
1806 assert(gpe_context->sampler.bo->virtual);
1808 sampler_state = (struct gen8_sampler_state *)
1809 (gpe_context->sampler.bo->virtual + gpe_context->sampler.offset);
1811 memset(sampler_state, 0, sizeof(*sampler_state));
1813 if ((src_rect->width == dst_rect->width) &&
1814 (src_rect->height == dst_rect->height)) {
1815 sampler_state->ss0.min_filter = I965_MAPFILTER_NEAREST;
1816 sampler_state->ss0.mag_filter = I965_MAPFILTER_NEAREST;
1818 sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
1819 sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
1822 sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1823 sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1824 sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1826 dri_bo_unmap(gpe_context->sampler.bo);
1830 gen8_gpe_context_yuv420p8_scaling_curbe(VADriverContextP ctx,
1831 struct i965_gpe_context *gpe_context,
1832 VARectangle *src_rect,
1833 struct i965_surface *src_surface,
1834 VARectangle *dst_rect,
1835 struct i965_surface *dst_surface)
1837 struct scaling_input_parameter *scaling_curbe;
1838 float src_width, src_height;
1840 unsigned int fourcc;
1842 if ((gpe_context == NULL) ||
1843 (src_rect == NULL) || (src_surface == NULL) ||
1844 (dst_rect == NULL) || (dst_surface == NULL))
1847 scaling_curbe = i965_gpe_context_map_curbe(gpe_context);
1852 memset(scaling_curbe, 0, sizeof(struct scaling_input_parameter));
1854 scaling_curbe->bti_input = BTI_SCALING_INPUT_Y;
1855 scaling_curbe->bti_output = BTI_SCALING_OUTPUT_Y;
1857 /* As the src_rect/dst_rect is already checked, it is skipped.*/
1858 scaling_curbe->x_dst = dst_rect->x;
1859 scaling_curbe->y_dst = dst_rect->y;
1861 src_width = src_rect->x + src_rect->width;
1862 src_height = src_rect->y + src_rect->height;
1864 scaling_curbe->inv_width = 1 / src_width;
1865 scaling_curbe->inv_height = 1 / src_height;
1867 coeff = (float)(src_rect->width) / dst_rect->width;
1868 scaling_curbe->x_factor = coeff / src_width;
1869 scaling_curbe->x_orig = (float)(src_rect->x) / src_width;
1871 coeff = (float)(src_rect->height) / dst_rect->height;
1872 scaling_curbe->y_factor = coeff / src_height;
1873 scaling_curbe->y_orig = (float)(src_rect->y) / src_height;
1875 fourcc = pp_get_surface_fourcc(ctx, src_surface);
1876 if (fourcc == VA_FOURCC_NV12) {
1877 scaling_curbe->dw2.src_packed = 1;
1880 fourcc = pp_get_surface_fourcc(ctx, dst_surface);
1882 if (fourcc == VA_FOURCC_NV12) {
1883 scaling_curbe->dw2.dst_packed = 1;
1886 i965_gpe_context_unmap_curbe(gpe_context);
1890 gen8_pp_context_get_surface_conf(VADriverContextP ctx,
1891 struct i965_surface *surface,
1898 unsigned int fourcc;
1899 if (!rect || !surface || !width || !height || !pitch || !bo_offset)
1902 if (surface->base == NULL)
1905 fourcc = pp_get_surface_fourcc(ctx, surface);
1906 if (surface->type == I965_SURFACE_TYPE_SURFACE) {
1907 struct object_surface *obj_surface;
1909 obj_surface = (struct object_surface *)surface->base;
1910 width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
1911 height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
1912 pitch[0] = obj_surface->width;
1915 if (fourcc == VA_FOURCC_RGBX ||
1916 fourcc == VA_FOURCC_RGBA ||
1917 fourcc == VA_FOURCC_BGRX ||
1918 fourcc == VA_FOURCC_BGRA) {
1919 /* nothing to do here */
1920 } else if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1921 width[1] = width[0] / 2;
1922 height[1] = height[0] / 2;
1923 pitch[1] = obj_surface->cb_cr_pitch;
1924 bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1926 width[1] = width[0] / 2;
1927 height[1] = height[0] / 2;
1928 pitch[1] = obj_surface->cb_cr_pitch;
1929 bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1930 width[2] = width[0] / 2;
1931 height[2] = height[0] / 2;
1932 pitch[2] = obj_surface->cb_cr_pitch;
1933 bo_offset[2] = obj_surface->width * obj_surface->y_cr_offset;
1937 struct object_image *obj_image;
1939 obj_image = (struct object_image *)surface->base;
1941 width[0] = MIN(rect->x + rect->width, obj_image->image.width);
1942 height[0] = MIN(rect->y + rect->height, obj_image->image.height);
1943 pitch[0] = obj_image->image.pitches[0];
1944 bo_offset[0] = obj_image->image.offsets[0];
1946 if (fourcc == VA_FOURCC_RGBX ||
1947 fourcc == VA_FOURCC_RGBA ||
1948 fourcc == VA_FOURCC_BGRX ||
1949 fourcc == VA_FOURCC_BGRA) {
1950 /* nothing to do here */
1951 } else if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1952 width[1] = width[0] / 2;
1953 height[1] = height[0] / 2;
1954 pitch[1] = obj_image->image.pitches[1];
1955 bo_offset[1] = obj_image->image.offsets[1];
1959 if (fourcc == VA_FOURCC_YV12 || fourcc == VA_FOURCC_IMC1)
1962 width[1] = width[0] / 2;
1963 height[1] = height[0] / 2;
1964 pitch[1] = obj_image->image.pitches[u];
1965 bo_offset[1] = obj_image->image.offsets[u];
1966 width[2] = width[0] / 2;
1967 height[2] = height[0] / 2;
1968 pitch[2] = obj_image->image.pitches[v];
1969 bo_offset[2] = obj_image->image.offsets[v];
1977 gen8_gpe_context_yuv420p8_scaling_surfaces(VADriverContextP ctx,
1978 struct i965_gpe_context *gpe_context,
1979 VARectangle *src_rect,
1980 struct i965_surface *src_surface,
1981 VARectangle *dst_rect,
1982 struct i965_surface *dst_surface)
1984 unsigned int fourcc;
1985 int width[3], height[3], pitch[3], bo_offset[3];
1987 struct object_surface *obj_surface;
1988 struct object_image *obj_image;
1991 if ((gpe_context == NULL) ||
1992 (src_rect == NULL) || (src_surface == NULL) ||
1993 (dst_rect == NULL) || (dst_surface == NULL))
1996 if (src_surface->base == NULL || dst_surface->base == NULL)
1999 fourcc = pp_get_surface_fourcc(ctx, src_surface);
2001 if (src_surface->type == I965_SURFACE_TYPE_SURFACE) {
2002 obj_surface = (struct object_surface *)src_surface->base;
2003 bo = obj_surface->bo;
2005 obj_image = (struct object_image *)src_surface->base;
2010 if (gen8_pp_context_get_surface_conf(ctx, src_surface, src_rect,
2011 width, height, pitch,
2013 bti = BTI_SCALING_INPUT_Y;
2015 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2017 width[0], height[0],
2019 I965_SURFACEFORMAT_R8_UNORM,
2021 if (fourcc == VA_FOURCC_NV12) {
2022 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2024 width[1], height[1],
2026 I965_SURFACEFORMAT_R8G8_UNORM,
2029 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2031 width[1], height[1],
2033 I965_SURFACEFORMAT_R8_UNORM,
2036 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2038 width[2], height[2],
2040 I965_SURFACEFORMAT_R8_UNORM,
2045 fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2047 if (dst_surface->type == I965_SURFACE_TYPE_SURFACE) {
2048 obj_surface = (struct object_surface *)dst_surface->base;
2049 bo = obj_surface->bo;
2051 obj_image = (struct object_image *)dst_surface->base;
2055 if (gen8_pp_context_get_surface_conf(ctx, dst_surface, dst_rect,
2056 width, height, pitch,
2058 bti = BTI_SCALING_OUTPUT_Y;
2060 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2062 width[0], height[0],
2064 I965_SURFACEFORMAT_R8_UINT,
2066 if (fourcc == VA_FOURCC_NV12) {
2067 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2069 width[1] * 2, height[1],
2071 I965_SURFACEFORMAT_R16_UINT,
2074 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2076 width[1], height[1],
2078 I965_SURFACEFORMAT_R8_UINT,
2081 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2083 width[2], height[2],
2085 I965_SURFACEFORMAT_R8_UINT,
2094 gen8_yuv420p8_scaling_post_processing(
2095 VADriverContextP ctx,
2096 struct i965_post_processing_context *pp_context,
2097 struct i965_surface *src_surface,
2098 VARectangle *src_rect,
2099 struct i965_surface *dst_surface,
2100 VARectangle *dst_rect)
2102 struct i965_gpe_context *gpe_context;
2103 struct gpe_media_object_walker_parameter media_object_walker_param;
2104 struct intel_vpp_kernel_walker_parameter kernel_walker_param;
2106 if (!pp_context || !src_surface || !src_rect || !dst_surface || !dst_rect)
2107 return VA_STATUS_ERROR_INVALID_PARAMETER;
2109 if (!(pp_context->scaling_gpe_context_initialized & VPPGPE_8BIT_8BIT))
2110 return VA_STATUS_ERROR_UNIMPLEMENTED;
2112 gpe_context = &pp_context->scaling_gpe_context;
2114 gen8_gpe_context_init(ctx, gpe_context);
2115 gen8_vpp_scaling_sample_state(ctx, gpe_context, src_rect, dst_rect);
2116 gen8_gpe_reset_binding_table(ctx, gpe_context);
2117 gen8_gpe_context_yuv420p8_scaling_curbe(ctx, gpe_context,
2118 src_rect, src_surface,
2119 dst_rect, dst_surface);
2121 gen8_gpe_context_yuv420p8_scaling_surfaces(ctx, gpe_context,
2122 src_rect, src_surface,
2123 dst_rect, dst_surface);
2125 gen8_gpe_setup_interface_data(ctx, gpe_context);
2127 memset(&kernel_walker_param, 0, sizeof(kernel_walker_param));
2128 kernel_walker_param.resolution_x = ALIGN(dst_rect->width, 16) >> 4;
2129 kernel_walker_param.resolution_y = ALIGN(dst_rect->height, 16) >> 4;
2130 kernel_walker_param.no_dependency = 1;
2132 intel_vpp_init_media_object_walker_parameter(&kernel_walker_param, &media_object_walker_param);
2133 media_object_walker_param.interface_offset = 0;
2134 gen8_run_kernel_media_object_walker(ctx, pp_context->batch,
2136 &media_object_walker_param);
2138 return VA_STATUS_SUCCESS;
2141 gen8_gpe_context_8bit_420_rgb32_scaling_curbe(VADriverContextP ctx,
2142 struct i965_gpe_context *gpe_context,
2143 VARectangle *src_rect,
2144 struct i965_surface *src_surface,
2145 VARectangle *dst_rect,
2146 struct i965_surface *dst_surface)
2148 struct scaling_input_parameter *scaling_curbe;
2149 float src_width, src_height;
2151 unsigned int fourcc;
2152 int src_format = SRC_FORMAT_I420, dst_format = DST_FORMAT_RGBX;
2153 const float * yuv_to_rgb_coefs;
2154 size_t yuv_to_rgb_coefs_size;
2156 if ((gpe_context == NULL) ||
2157 (src_rect == NULL) || (src_surface == NULL) ||
2158 (dst_rect == NULL) || (dst_surface == NULL))
2161 scaling_curbe = i965_gpe_context_map_curbe(gpe_context);
2166 memset(scaling_curbe, 0, sizeof(struct scaling_input_parameter));
2168 scaling_curbe->bti_input = BTI_SCALING_INPUT_Y;
2169 scaling_curbe->bti_output = BTI_SCALING_OUTPUT_Y;
2171 /* As the src_rect/dst_rect is already checked, it is skipped.*/
2172 scaling_curbe->x_dst = dst_rect->x;
2173 scaling_curbe->y_dst = dst_rect->y;
2175 src_width = src_rect->x + src_rect->width;
2176 src_height = src_rect->y + src_rect->height;
2178 scaling_curbe->inv_width = 1 / src_width;
2179 scaling_curbe->inv_height = 1 / src_height;
2181 coeff = (float)(src_rect->width) / dst_rect->width;
2182 scaling_curbe->x_factor = coeff / src_width;
2183 scaling_curbe->x_orig = (float)(src_rect->x) / src_width;
2185 coeff = (float)(src_rect->height) / dst_rect->height;
2186 scaling_curbe->y_factor = coeff / src_height;
2187 scaling_curbe->y_orig = (float)(src_rect->y) / src_height;
2189 fourcc = pp_get_surface_fourcc(ctx, src_surface);
2192 case VA_FOURCC_I420:
2193 case VA_FOURCC_IMC3: /* pitch / base address is set via surface_state */
2194 src_format = SRC_FORMAT_I420;
2197 case VA_FOURCC_NV12:
2198 src_format = SRC_FORMAT_NV12;
2201 case VA_FOURCC_YV12:
2202 case VA_FOURCC_IMC1: /* pitch / base address is set via surface_state */
2203 src_format = SRC_FORMAT_YV12;
2210 fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2213 case VA_FOURCC_RGBX:
2214 dst_format = DST_FORMAT_RGBX;
2217 case VA_FOURCC_RGBA:
2218 dst_format = DST_FORMAT_RGBA;
2221 case VA_FOURCC_BGRX:
2222 dst_format = DST_FORMAT_BGRX;
2225 case VA_FOURCC_BGRA:
2226 dst_format = DST_FORMAT_BGRA;
2233 scaling_curbe->dw2.src_format = src_format;
2234 scaling_curbe->dw2.dst_format = dst_format;
2236 yuv_to_rgb_coefs = i915_color_standard_to_coefs(i915_filter_to_color_standard(src_surface->flags & VA_SRC_COLOR_MASK), &yuv_to_rgb_coefs_size);
2237 memcpy(&scaling_curbe->coef_ry, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
2239 i965_gpe_context_unmap_curbe(gpe_context);
2243 gen8_gpe_context_8bit_420_rgb32_scaling_surfaces(VADriverContextP ctx,
2244 struct i965_gpe_context *gpe_context,
2245 VARectangle *src_rect,
2246 struct i965_surface *src_surface,
2247 VARectangle *dst_rect,
2248 struct i965_surface *dst_surface)
2250 unsigned int fourcc;
2251 int width[3], height[3], pitch[3], bo_offset[3];
2253 struct object_surface *obj_surface;
2254 struct object_image *obj_image;
2257 if ((gpe_context == NULL) ||
2258 (src_rect == NULL) || (src_surface == NULL) ||
2259 (dst_rect == NULL) || (dst_surface == NULL))
2262 if (src_surface->base == NULL || dst_surface->base == NULL)
2265 fourcc = pp_get_surface_fourcc(ctx, src_surface);
2267 if (src_surface->type == I965_SURFACE_TYPE_SURFACE) {
2268 obj_surface = (struct object_surface *)src_surface->base;
2269 bo = obj_surface->bo;
2271 obj_image = (struct object_image *)src_surface->base;
2275 if (gen8_pp_context_get_surface_conf(ctx, src_surface, src_rect,
2276 width, height, pitch,
2279 bti = BTI_SCALING_INPUT_Y;
2280 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2282 width[0], height[0],
2284 I965_SURFACEFORMAT_R8_UNORM,
2287 if (fourcc == VA_FOURCC_NV12) {
2288 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2290 width[1], height[1],
2292 I965_SURFACEFORMAT_R8G8_UNORM,
2295 /* The corresponding shader handles U, V plane in order */
2296 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2298 width[1], height[1],
2300 I965_SURFACEFORMAT_R8_UNORM,
2303 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2305 width[2], height[2],
2307 I965_SURFACEFORMAT_R8_UNORM,
2312 fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2314 if (dst_surface->type == I965_SURFACE_TYPE_SURFACE) {
2315 obj_surface = (struct object_surface *)dst_surface->base;
2316 bo = obj_surface->bo;
2318 obj_image = (struct object_image *)dst_surface->base;
2322 if (gen8_pp_context_get_surface_conf(ctx, dst_surface, dst_rect,
2323 width, height, pitch,
2325 assert(fourcc == VA_FOURCC_RGBX ||
2326 fourcc == VA_FOURCC_RGBA ||
2327 fourcc == VA_FOURCC_BGRX ||
2328 fourcc == VA_FOURCC_BGRA);
2329 assert(width[0] * 4 <= pitch[0]);
2331 /* output surface */
2332 bti = BTI_SCALING_OUTPUT_Y;
2333 gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2335 width[0] * 4, height[0],
2337 I965_SURFACEFORMAT_R8_UINT,
2343 gen8_8bit_420_rgb32_scaling_post_processing(VADriverContextP ctx,
2344 struct i965_post_processing_context *pp_context,
2345 struct i965_surface *src_surface,
2346 VARectangle *src_rect,
2347 struct i965_surface *dst_surface,
2348 VARectangle *dst_rect)
2350 struct i965_gpe_context *gpe_context;
2351 struct gpe_media_object_walker_parameter media_object_walker_param;
2352 struct intel_vpp_kernel_walker_parameter kernel_walker_param;
2354 if (!pp_context || !src_surface || !src_rect || !dst_surface || !dst_rect)
2355 return VA_STATUS_ERROR_INVALID_PARAMETER;
2357 if (!(pp_context->scaling_gpe_context_initialized & VPPGPE_8BIT_420_RGB32))
2358 return VA_STATUS_ERROR_UNIMPLEMENTED;
2360 gpe_context = &pp_context->scaling_gpe_context;
2362 gen8_gpe_context_init(ctx, gpe_context);
2363 gen8_vpp_scaling_sample_state(ctx, gpe_context, src_rect, dst_rect);
2364 gen8_gpe_reset_binding_table(ctx, gpe_context);
2365 gen8_gpe_context_8bit_420_rgb32_scaling_curbe(ctx, gpe_context,
2366 src_rect, src_surface,
2367 dst_rect, dst_surface);
2369 gen8_gpe_context_8bit_420_rgb32_scaling_surfaces(ctx, gpe_context,
2370 src_rect, src_surface,
2371 dst_rect, dst_surface);
2373 gen8_gpe_setup_interface_data(ctx, gpe_context);
2375 memset(&kernel_walker_param, 0, sizeof(kernel_walker_param));
2376 kernel_walker_param.resolution_x = ALIGN(dst_rect->width, 16) >> 4;
2377 kernel_walker_param.resolution_y = ALIGN(dst_rect->height, 16) >> 4;
2378 kernel_walker_param.no_dependency = 1;
2380 intel_vpp_init_media_object_walker_parameter(&kernel_walker_param, &media_object_walker_param);
2381 media_object_walker_param.interface_offset = 1;
2382 gen8_run_kernel_media_object_walker(ctx, pp_context->batch,
2384 &media_object_walker_param);
2386 return VA_STATUS_SUCCESS;