2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Li Xiaowei <xiaowei.a.li@intel.com>
26 * Li Zhong <zhong.li@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "gen75_vpp_vebox.h"
40 #include "intel_media.h"
42 #include "i965_post_processing.h"
47 i965_MapBuffer(VADriverContextP ctx, VABufferID buf_id, void **);
50 i965_UnmapBuffer(VADriverContextP ctx, VABufferID buf_id);
53 i965_DeriveImage(VADriverContextP ctx, VABufferID surface, VAImage *out_image);
56 i965_DestroyImage(VADriverContextP ctx, VAImageID image);
59 vpp_surface_convert(VADriverContextP ctx, struct object_surface *src_obj_surf,
60 struct object_surface *dst_obj_surf)
62 VAStatus va_status = VA_STATUS_SUCCESS;
64 assert(src_obj_surf->orig_width == dst_obj_surf->orig_width);
65 assert(src_obj_surf->orig_height == dst_obj_surf->orig_height);
67 VARectangle src_rect, dst_rect;
68 src_rect.x = dst_rect.x = 0;
69 src_rect.y = dst_rect.y = 0;
70 src_rect.width = dst_rect.width = src_obj_surf->orig_width;
71 src_rect.height = dst_rect.height = dst_obj_surf->orig_height;
73 struct i965_surface src_surface, dst_surface;
74 src_surface.base = (struct object_base *)src_obj_surf;
75 src_surface.type = I965_SURFACE_TYPE_SURFACE;
76 src_surface.flags = I965_SURFACE_FLAG_FRAME;
78 dst_surface.base = (struct object_base *)dst_obj_surf;
79 dst_surface.type = I965_SURFACE_TYPE_SURFACE;
80 dst_surface.flags = I965_SURFACE_FLAG_FRAME;
82 va_status = i965_image_processing(ctx,
91 vpp_surface_scaling(VADriverContextP ctx, struct object_surface *src_obj_surf,
92 struct object_surface *dst_obj_surf, uint32_t flags)
94 VAStatus va_status = VA_STATUS_SUCCESS;
96 assert(src_obj_surf->fourcc == VA_FOURCC_NV12);
97 assert(dst_obj_surf->fourcc == VA_FOURCC_NV12);
99 VARectangle src_rect, dst_rect;
102 src_rect.width = src_obj_surf->orig_width;
103 src_rect.height = src_obj_surf->orig_height;
107 dst_rect.width = dst_obj_surf->orig_width;
108 dst_rect.height = dst_obj_surf->orig_height;
110 va_status = i965_scaling_processing(ctx,
121 vpp_sharpness_filtering(VADriverContextP ctx,
122 struct intel_vebox_context *proc_ctx)
124 VAStatus va_status = VA_STATUS_SUCCESS;
126 if(proc_ctx->vpp_gpe_ctx == NULL){
127 proc_ctx->vpp_gpe_ctx = vpp_gpe_context_init(ctx);
130 proc_ctx->vpp_gpe_ctx->pipeline_param = proc_ctx->pipeline_param;
131 proc_ctx->vpp_gpe_ctx->surface_pipeline_input_object = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
132 proc_ctx->vpp_gpe_ctx->surface_output_object = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
134 va_status = vpp_gpe_process_picture(ctx, proc_ctx->vpp_gpe_ctx);
139 void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
141 struct i965_driver_data *i965 = i965_driver_data(ctx);
142 unsigned int* p_table ;
143 unsigned int progressive_dn = 1;
144 unsigned int dndi_top_first = 0;
145 unsigned int is_mcdi_enabled = 0;
147 if (proc_ctx->is_di_enabled) {
148 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
153 /* If we are in "First Frame" mode, i.e. past frames are not
154 available for motion measure, then don't use the TFF flag */
155 dndi_top_first = !(deint_params->flags & (proc_ctx->is_first_frame ?
156 VA_DEINTERLACING_BOTTOM_FIELD :
157 VA_DEINTERLACING_BOTTOM_FIELD_FIRST));
160 (deint_params->algorithm == VAProcDeinterlacingMotionCompensated);
164 VAProcFilterParameterBufferDeinterlacing *di_param =
165 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
167 VAProcFilterParameterBuffer * dn_param =
168 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
170 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
172 if (IS_HASWELL(i965->intel.device_info))
173 *p_table ++ = 0; // reserved . w0
175 *p_table ++ = ( 140 << 24 | // denoise STAD threshold . w1
176 192 << 16 | // dnmh_history_max
177 0 << 12 | // reserved
178 7 << 8 | // dnmh_delta[3:0]
179 38 ); // denoise ASD threshold
181 *p_table ++ = ( 0 << 30 | // reserved . w2
182 0 << 24 | // temporal diff th
183 0 << 22 | // reserved.
184 0 << 16 | // low temporal diff th
186 1 << 8 | // denoise moving pixel th
187 38 ); // denoise th for sum of complexity measure
189 *p_table ++ = ( 0 << 30 | // reserved . w3
190 12<< 24 | // good neighbor th[5:0]
191 9 << 20 | // CAT slope minus 1
192 5 << 16 | // SAD Tight in
193 0 << 14 | // smooth mv th
194 0 << 12 | // reserved
195 1 << 8 | // bne_edge_th[3:0]
196 20 ); // block noise estimate noise th
198 *p_table ++ = ( 0 << 31 | // STMM blending constant select. w4
199 64 << 24 | // STMM trc1
200 125<< 16 | // STMM trc2
201 0 << 14 | // reserved
202 30 << 8 | // VECM_mul
203 150 ); // maximum STMM
205 *p_table ++ = ( 118<< 24 | // minumum STMM . W5
206 0 << 22 | // STMM shift down
207 1 << 20 | // STMM shift up
208 5 << 16 | // STMM output shift
209 100 << 8 | // SDI threshold
212 *p_table ++ = ( 50 << 24 | // SDI fallback mode 1 T1 constant . W6
213 100 << 16 | // SDI fallback mode 1 T2 constant
214 37 << 8 | // SDI fallback mode 2 constant(angle2x1)
215 175 ); // FMD temporal difference threshold
217 *p_table ++ = ( 16 << 24 | // FMD #1 vertical difference th . w7
218 100<< 16 | // FMD #2 vertical difference th
220 2 << 8 | // FMD tear threshold
221 is_mcdi_enabled << 7 | // MCDI Enable, use motion compensated deinterlace algorithm
222 progressive_dn << 6 | // progressive DN
224 dndi_top_first << 3 | // DN/DI Top First
227 *p_table ++ = ( 0 << 29 | // reserved . W8
228 32 << 23 | // dnmh_history_init[5:0]
229 10 << 19 | // neighborPixel th
230 0 << 18 | // reserved
231 0 << 16 | // FMD for 2nd field of previous frame
232 25 << 10 | // MC pixel consistency th
233 0 << 8 | // FMD for 1st field for current frame
237 *p_table ++ = ( 0 << 24 | // reserved
238 140<< 16 | // chr_dnmh_stad_th
239 0 << 13 | // reserved
240 1 << 12 | // chrome denoise enable
241 13 << 6 | // chr temp diff th
242 7 ); // chr temp diff low
244 if (IS_GEN8(i965->intel.device_info) ||
245 IS_GEN9(i965->intel.device_info))
246 *p_table ++ = 0; // parameters for hot pixel,
249 //Set default values for STDE
250 void set_std_table_default(struct intel_vebox_context *proc_ctx, unsigned int *p_table) {
253 *p_table ++ = ( 0 << 31 | // Reserved
254 0x3F8 << 21 | // SATB1 (10 bits, default 8, optimized value -8)
257 0x7A ); // SATP1 (7 bits, default 6, optimized value -6)
260 *p_table ++ = ( 0 << 31 | // Reserved
266 *p_table ++ = ( 0 << 22 | // Reserved
271 *p_table ++ = ( 14 << 25 | // HUEP3
273 0x7A << 11 | // HUEP1 (7 bits, default value -6 = 7Ah)
277 *p_table ++ = ( 0 << 30 | // Reserved
280 0x3F8 ); // HUEB1 (10 bits, default value 8, optimized value -8)
283 *p_table ++ = ( 0 << 22 | // Reserved
288 *p_table ++ = ( 0 << 22 | // Reserved
293 *p_table ++ = ( 0 << 31 | // Reserved
294 0 << 21 | // SATB1_DARK
295 31 << 14 | // SATP3_DARK
296 31 << 7 | // SATP2_DARK
297 0x7B ); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value -5)
300 *p_table ++ = ( 0 << 31 | // Reserved
301 305 << 20 | // SATS0_DARK
302 124 << 10 | // SATB3_DARK
306 *p_table ++ = ( 0 << 22 | // Reserved
307 256 << 11 | // SATS2_DARK
311 *p_table ++ = ( 14 << 25 | // HUEP3_DARK
312 14 << 18 | // HUEP2_DARK
313 14 << 11 | // HUEP1_DARK
317 *p_table ++ = ( 0 << 30 | // Reserved
318 56 << 20 | // HUEB3_DARK
319 56 << 10 | // HUEB2_DARK
323 *p_table ++ = ( 0 << 22 | // Reserved
324 256 << 11 | // HUES1_DARK
328 *p_table ++ = ( 0 << 22 | // Reserved
329 256 << 11 | // HUES3_DARK
333 //Set values for STDE factor 3
334 void set_std_table_3(struct intel_vebox_context *proc_ctx, unsigned int *p_table) {
337 *p_table ++ = ( 0 << 31 | // Reserved
338 1016 << 21 | // SATB1 (10 bits, default 8, optimized value 1016)
341 122 ); // SATP1 (7 bits, default 6, optimized value 122)
344 *p_table ++ = ( 0 << 31 | // Reserved
350 *p_table ++ = ( 0 << 22 | // Reserved
355 *p_table ++ = ( 14 << 25 | // HUEP3
357 122 << 11 | // HUEP1 (7 bits, default value -6 = 7Ah, optimized 122)
361 *p_table ++ = ( 0 << 30 | // Reserved
362 56 << 20 | // HUEB3 (default 256, optimized 56)
364 1016 ); // HUEB1 (10 bits, default value 8, optimized value 1016)
367 *p_table ++ = ( 0 << 22 | // Reserved
372 *p_table ++ = ( 0 << 22 | // Reserved
377 *p_table ++ = ( 0 << 31 | // Reserved
378 0 << 21 | // SATB1_DARK
379 31 << 14 | // SATP3_DARK
380 31 << 7 | // SATP2_DARK
381 123 ); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value 123)
384 *p_table ++ = ( 0 << 31 | // Reserved
385 305 << 20 | // SATS0_DARK
386 124 << 10 | // SATB3_DARK
390 *p_table ++ = ( 0 << 22 | // Reserved
391 256 << 11 | // SATS2_DARK
395 *p_table ++ = ( 14 << 25 | // HUEP3_DARK
396 14 << 18 | // HUEP2_DARK
397 14 << 11 | // HUEP1_DARK
401 *p_table ++ = ( 0 << 30 | // Reserved
402 56 << 20 | // HUEB3_DARK
403 56 << 10 | // HUEB2_DARK
407 *p_table ++ = ( 0 << 22 | // Reserved
408 256 << 11 | // HUES1_DARK
412 *p_table ++ = ( 0 << 22 | // Reserved
413 256 << 11 | // HUES3_DARK
417 //Set values for STDE factor 6
418 void set_std_table_6(struct intel_vebox_context *proc_ctx, unsigned int *p_table) {
421 *p_table ++ = ( 0 << 31 | // Reserved
422 0 << 21 | // SATB1 (10 bits, default 8, optimized value 0)
424 31 << 7 | // SATP2 (default 6, optimized 31)
425 114 ); // SATP1 (7 bits, default 6, optimized value 114)
428 *p_table ++ = ( 0 << 31 | // Reserved
429 467 << 20 | // SATS0 (default 297, optimized 467)
434 *p_table ++ = ( 0 << 22 | // Reserved
435 256 << 11 | // SATS2 (default 297, optimized 256)
439 *p_table ++ = ( 14 << 25 | // HUEP3
441 14 << 11 | // HUEP1 (7 bits, default value -6 = 7Ah, optimized value 14)
445 *p_table ++ = ( 0 << 30 | // Reserved
448 56 ); // HUEB1 (10 bits, default value 8, optimized value 56)
451 *p_table ++ = ( 0 << 22 | // Reserved
456 *p_table ++ = ( 0 << 22 | // Reserved
461 *p_table ++ = ( 0 << 31 | // Reserved
462 0 << 21 | // SATB1_DARK
463 31 << 14 | // SATP3_DARK
464 31 << 7 | // SATP2_DARK
465 123 ); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value 123)
468 *p_table ++ = ( 0 << 31 | // Reserved
469 305 << 20 | // SATS0_DARK
470 124 << 10 | // SATB3_DARK
474 *p_table ++ = ( 0 << 22 | // Reserved
475 256 << 11 | // SATS2_DARK
479 *p_table ++ = ( 14 << 25 | // HUEP3_DARK
480 14 << 18 | // HUEP2_DARK
481 14 << 11 | // HUEP1_DARK
485 *p_table ++ = ( 0 << 30 | // Reserved
486 56 << 20 | // HUEB3_DARK
487 56 << 10 | // HUEB2_DARK
491 *p_table ++ = ( 0 << 22 | // Reserved
492 256 << 11 | // HUES1_DARK
496 *p_table ++ = ( 0 << 22 | // Reserved
497 256 << 11 | // HUES3_DARK
501 //Set values for STDE factor 9
502 void set_std_table_9(struct intel_vebox_context *proc_ctx, unsigned int *p_table) {
505 *p_table ++ = ( 0 << 31 | // Reserved
506 0 << 21 | // SATB1 (10 bits, default 8, optimized value 0)
508 31 << 7 | // SATP2 (default 6, optimized 31)
509 108 ); // SATP1 (7 bits, default 6, optimized value 108)
512 *p_table ++ = ( 0 << 31 | // Reserved
513 721 << 20 | // SATS0 (default 297, optimized 721)
518 *p_table ++ = ( 0 << 22 | // Reserved
519 256 << 11 | // SATS2 (default 297, optimized 256)
520 156 ); // SATS1 (default 176, optimized 156)
523 *p_table ++ = ( 14 << 25 | // HUEP3
525 14 << 11 | // HUEP1 (7 bits, default value -6 = 7Ah, optimized value 14)
529 *p_table ++ = ( 0 << 30 | // Reserved
532 56 ); // HUEB1 (10 bits, default value 8, optimized value 56)
535 *p_table ++ = ( 0 << 22 | // Reserved
540 *p_table ++ = ( 0 << 22 | // Reserved
545 *p_table ++ = ( 0 << 31 | // Reserved
546 0 << 21 | // SATB1_DARK
547 31 << 14 | // SATP3_DARK
548 31 << 7 | // SATP2_DARK
549 123 ); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value 123)
552 *p_table ++ = ( 0 << 31 | // Reserved
553 305 << 20 | // SATS0_DARK
554 124 << 10 | // SATB3_DARK
558 *p_table ++ = ( 0 << 22 | // Reserved
559 256 << 11 | // SATS2_DARK
563 *p_table ++ = ( 14 << 25 | // HUEP3_DARK
564 14 << 18 | // HUEP2_DARK
565 14 << 11 | // HUEP1_DARK
569 *p_table ++ = ( 0 << 30 | // Reserved
570 56 << 20 | // HUEB3_DARK
571 56 << 10 | // HUEB2_DARK
575 *p_table ++ = ( 0 << 22 | // Reserved
576 256 << 11 | // HUES1_DARK
580 *p_table ++ = ( 0 << 22 | // Reserved
581 256 << 11 | // HUES3_DARK
586 void hsw_veb_iecp_std_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
588 unsigned int *p_table = (unsigned int *)proc_ctx->iecp_state_table.ptr;
590 if(!(proc_ctx->filters_mask & VPP_IECP_STD_STE)){
591 memset(p_table, 0, 29 * 4);
593 int stde_factor = 0; //default value
594 VAProcFilterParameterBuffer * std_param = (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_std;
595 stde_factor = std_param->value;
598 *p_table ++ = ( 154 << 24 | // V_Mid
600 14 << 10 | // Hue_Max
603 0 << 2 | // Output Control is set to output the 1=STD score /0=Output Pixels
604 1 << 1 | // Set STE Enable
605 1 ); // Set STD Enable
608 *p_table ++ = ( 0 << 31 | // Reserved
609 4 << 28 | // Diamond Margin
610 0 << 21 | // Diamond_du
611 3 << 18 | // HS_Margin
612 79 << 10 | // Cos(alpha)
617 *p_table ++ = ( 0 << 21 | // Reserved
618 100 << 13 | // Diamond_alpha
619 35 << 7 | // Diamond_Th
623 *p_table ++ = ( 254 << 24 | // Y_point_3
624 47 << 16 | // Y_point_2
625 46 << 8 | // Y_point_1
626 1 << 7 | // VY_STD_Enable
630 *p_table ++ = ( 0 << 18 | // Reserved
631 31 << 13 | // Y_slope_2
632 31 << 8 | // Y_slope_1
636 *p_table ++ = ( 400 << 16 | // INV_Skin_types_margin = 20* Skin_Type_margin => 20*20
637 3300 ); // INV_Margin_VYL => 1/Margin_VYL
640 *p_table ++ = ( 216 << 24 | // P1L
642 1600 ); // INV_Margin_VYU
645 *p_table ++ = ( 130 << 24 | // B1L
651 *p_table ++ = ( 0 << 27 | // Reserved
652 0x7FB << 16 | // S0L (11 bits, Default value: -5 = FBh, pad it with 1s to make it 11bits)
657 *p_table ++ = ( 0 << 22 | // Reserved
662 *p_table ++ = ( 0 << 27 | // Reserved
668 *p_table ++ = ( 163 << 24 | // B1U
674 *p_table ++ = ( 0 << 27 | // Reserved
680 *p_table ++ = ( 0 << 22 | // Reserved
681 0x74D << 11 | // S2U (11 bits, Default value -179 = F4Dh)
685 *p_table ++ = ( 0 << 28 | // Reserved
686 20 << 20 | // Skin_types_margin
687 120 << 12 | // Skin_types_thresh
688 1 << 11 | // Skin_Types_Enable
691 //Set DWord 15 through DWord 28 in their respective methods.
692 switch(stde_factor) {
694 set_std_table_3(proc_ctx, p_table);
698 set_std_table_6(proc_ctx, p_table);
702 set_std_table_9(proc_ctx, p_table);
706 set_std_table_default(proc_ctx, p_table);
712 void hsw_veb_iecp_ace_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
714 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 116);
716 if(!(proc_ctx->filters_mask & VPP_IECP_ACE)){
717 memset(p_table, 0, 13 * 4);
719 *p_table ++ = 0x00000068;
720 *p_table ++ = 0x4c382410;
721 *p_table ++ = 0x9c887460;
722 *p_table ++ = 0xebd8c4b0;
723 *p_table ++ = 0x604c3824;
725 *p_table ++ = 0xb09c8874;
726 *p_table ++ = 0x0000d8c4;
727 *p_table ++ = 0x00000000;
728 *p_table ++ = 0x00000000;
729 *p_table ++ = 0x00000000;
731 *p_table ++ = 0x00000000;
732 *p_table ++ = 0x00000000;
733 *p_table ++ = 0x00000000;
737 void hsw_veb_iecp_tcc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
739 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 168);
740 // VAProcFilterParameterBuffer * tcc_param =
741 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
743 if(!(proc_ctx->filters_mask & VPP_IECP_TCC)){
744 memset(p_table, 0, 11 * 4);
746 *p_table ++ = 0x00000000;
747 *p_table ++ = 0x00000000;
748 *p_table ++ = 0x1e34cc91;
749 *p_table ++ = 0x3e3cce91;
750 *p_table ++ = 0x02e80195;
752 *p_table ++ = 0x0197046b;
753 *p_table ++ = 0x01790174;
754 *p_table ++ = 0x00000000;
755 *p_table ++ = 0x00000000;
756 *p_table ++ = 0x03030000;
758 *p_table ++ = 0x009201c0;
762 void hsw_veb_iecp_pro_amp_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
764 unsigned int contrast = 0x80; //default
765 int brightness = 0x00; //default
766 int cos_c_s = 256 ; //default
767 int sin_c_s = 0; //default
768 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 212);
770 if(!(proc_ctx->filters_mask & VPP_IECP_PRO_AMP)){
771 memset(p_table, 0, 2 * 4);
773 float src_saturation = 1.0;
775 float src_contrast = 1.0;
776 float src_brightness = 0.0;
777 float tmp_value = 0.0;
780 VAProcFilterParameterBufferColorBalance * amp_params =
781 (VAProcFilterParameterBufferColorBalance *) proc_ctx->filter_iecp_amp;
783 for (i = 0; i < proc_ctx->filter_iecp_amp_num_elements; i++){
784 VAProcColorBalanceType attrib = amp_params[i].attrib;
786 if(attrib == VAProcColorBalanceHue) {
787 src_hue = amp_params[i].value; //(-180.0, 180.0)
788 }else if(attrib == VAProcColorBalanceSaturation) {
789 src_saturation = amp_params[i].value; //(0.0, 10.0)
790 }else if(attrib == VAProcColorBalanceBrightness) {
791 src_brightness = amp_params[i].value; // (-100.0, 100.0)
792 brightness = intel_format_convert(src_brightness, 7, 4, 1);
793 }else if(attrib == VAProcColorBalanceContrast) {
794 src_contrast = amp_params[i].value; // (0.0, 10.0)
795 contrast = intel_format_convert(src_contrast, 4, 7, 0);
799 tmp_value = cos(src_hue/180*PI) * src_contrast * src_saturation;
800 cos_c_s = intel_format_convert(tmp_value, 7, 8, 1);
802 tmp_value = sin(src_hue/180*PI) * src_contrast * src_saturation;
803 sin_c_s = intel_format_convert(tmp_value, 7, 8, 1);
805 *p_table ++ = ( 0 << 28 | //reserved
806 contrast << 17 | //contrast value (U4.7 format)
808 brightness << 1| // S7.4 format
811 *p_table ++ = ( cos_c_s << 16 | // cos(h) * contrast * saturation
812 sin_c_s); // sin(h) * contrast * saturation
818 void hsw_veb_iecp_csc_transform_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
820 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
821 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
822 float v_coef[3] = {0.0, 0.0, 0.0};
823 float u_coef[3] = {0.0, 0.0, 0.0};
824 int is_transform_enabled = 0;
826 if(!(proc_ctx->filters_mask & VPP_IECP_CSC_TRANSFORM)){
827 memset(p_table, 0, 8 * 4);
831 if(proc_ctx->fourcc_input == VA_FOURCC_RGBA &&
832 (proc_ctx->fourcc_output == VA_FOURCC_NV12 ||
833 proc_ctx->fourcc_output == VA_FOURCC_YV12 ||
834 proc_ctx->fourcc_output == VA_FOURCC_YVY2 ||
835 proc_ctx->fourcc_output == VA_FOURCC_AYUV)) {
837 tran_coef[0] = 0.257;
838 tran_coef[1] = 0.504;
839 tran_coef[2] = 0.098;
840 tran_coef[3] = -0.148;
841 tran_coef[4] = -0.291;
842 tran_coef[5] = 0.439;
843 tran_coef[6] = 0.439;
844 tran_coef[7] = -0.368;
845 tran_coef[8] = -0.071;
851 is_transform_enabled = 1;
852 }else if((proc_ctx->fourcc_input == VA_FOURCC_NV12 ||
853 proc_ctx->fourcc_input == VA_FOURCC_YV12 ||
854 proc_ctx->fourcc_input == VA_FOURCC_YUY2 ||
855 proc_ctx->fourcc_input == VA_FOURCC_AYUV) &&
856 proc_ctx->fourcc_output == VA_FOURCC_RGBA) {
857 tran_coef[0] = 1.164;
858 tran_coef[1] = 0.000;
859 tran_coef[2] = 1.569;
860 tran_coef[3] = 1.164;
861 tran_coef[4] = -0.813;
862 tran_coef[5] = -0.392;
863 tran_coef[6] = 1.164;
864 tran_coef[7] = 2.017;
865 tran_coef[8] = 0.000;
868 v_coef[1] = -128 * 4;
869 v_coef[2] = -128 * 4;
871 is_transform_enabled = 1;
872 }else if(proc_ctx->fourcc_input != proc_ctx->fourcc_output){
873 //enable when input and output format are different.
874 is_transform_enabled = 1;
877 if(is_transform_enabled == 0){
878 memset(p_table, 0, 8 * 4);
880 *p_table ++ = ( 0 << 29 | //reserved
881 intel_format_convert(tran_coef[1], 2, 10, 1) << 16 | //c1, s2.10 format
882 intel_format_convert(tran_coef[0], 2, 10, 1) << 3 | //c0, s2.10 format
884 0 << 1 | // yuv_channel swap
885 is_transform_enabled);
887 *p_table ++ = ( 0 << 26 | //reserved
888 intel_format_convert(tran_coef[3], 2, 10, 1) << 13 |
889 intel_format_convert(tran_coef[2], 2, 10, 1));
891 *p_table ++ = ( 0 << 26 | //reserved
892 intel_format_convert(tran_coef[5], 2, 10, 1) << 13 |
893 intel_format_convert(tran_coef[4], 2, 10, 1));
895 *p_table ++ = ( 0 << 26 | //reserved
896 intel_format_convert(tran_coef[7], 2, 10, 1) << 13 |
897 intel_format_convert(tran_coef[6], 2, 10, 1));
899 *p_table ++ = ( 0 << 13 | //reserved
900 intel_format_convert(tran_coef[8], 2, 10, 1));
902 *p_table ++ = ( 0 << 22 | //reserved
903 intel_format_convert(u_coef[0], 10, 0, 1) << 11 |
904 intel_format_convert(v_coef[0], 10, 0, 1));
906 *p_table ++ = ( 0 << 22 | //reserved
907 intel_format_convert(u_coef[1], 10, 0, 1) << 11 |
908 intel_format_convert(v_coef[1], 10, 0, 1));
910 *p_table ++ = ( 0 << 22 | //reserved
911 intel_format_convert(u_coef[2], 10, 0, 1) << 11 |
912 intel_format_convert(v_coef[2], 10, 0, 1));
916 void hsw_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
918 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 252);
919 // VAProcFilterParameterBuffer * tcc_param =
920 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
922 if(!(proc_ctx->filters_mask & VPP_IECP_AOI)){
923 memset(p_table, 0, 3 * 4);
925 *p_table ++ = 0x00000000;
926 *p_table ++ = 0x00030000;
927 *p_table ++ = 0x00030000;
931 void hsw_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
933 if(proc_ctx->filters_mask & VPP_DNDI_MASK) {
934 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
935 dri_bo_map(dndi_bo, 1);
936 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
938 hsw_veb_dndi_table(ctx, proc_ctx);
940 dri_bo_unmap(dndi_bo);
943 if(proc_ctx->filters_mask & VPP_IECP_MASK) {
944 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
945 dri_bo_map(iecp_bo, 1);
946 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
947 memset(proc_ctx->iecp_state_table.ptr, 0, 97 * 4);
949 hsw_veb_iecp_std_table(ctx, proc_ctx);
950 hsw_veb_iecp_ace_table(ctx, proc_ctx);
951 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
952 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
953 hsw_veb_iecp_csc_transform_table(ctx, proc_ctx);
954 hsw_veb_iecp_aoi_table(ctx, proc_ctx);
956 dri_bo_unmap(iecp_bo);
960 void hsw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
962 struct intel_batchbuffer *batch = proc_ctx->batch;
964 BEGIN_VEB_BATCH(batch, 6);
965 OUT_VEB_BATCH(batch, VEB_STATE | (6 - 2));
967 0 << 26 | // state surface control bits
968 0 << 11 | // reserved.
969 0 << 10 | // pipe sync disable
970 proc_ctx->current_output_type << 8 | // DI output frame
971 1 << 7 | // 444->422 downsample method
972 1 << 6 | // 422->420 downsample method
973 proc_ctx->is_first_frame << 5 | // DN/DI first frame
974 proc_ctx->is_di_enabled << 4 | // DI enable
975 proc_ctx->is_dn_enabled << 3 | // DN enable
976 proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
977 0 << 1 | // ColorGamutCompressionEnable
978 0 ) ; // ColorGamutExpansionEnable.
981 proc_ctx->dndi_state_table.bo,
982 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
985 proc_ctx->iecp_state_table.bo,
986 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
989 proc_ctx->gamut_state_table.bo,
990 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
993 proc_ctx->vertex_state_table.bo,
994 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
996 ADVANCE_VEB_BATCH(batch);
999 void hsw_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
1001 struct intel_batchbuffer *batch = proc_ctx->batch;
1002 unsigned int u_offset_y = 0, v_offset_y = 0;
1003 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
1004 unsigned int surface_format = PLANAR_420_8;
1005 struct object_surface* obj_surf = NULL;
1006 unsigned int surface_pitch = 0;
1007 unsigned int half_pitch_chroma = 0;
1010 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
1012 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
1015 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
1016 obj_surf->fourcc == VA_FOURCC_YUY2 ||
1017 obj_surf->fourcc == VA_FOURCC_AYUV ||
1018 obj_surf->fourcc == VA_FOURCC_RGBA);
1020 if (obj_surf->fourcc == VA_FOURCC_NV12) {
1021 surface_format = PLANAR_420_8;
1022 surface_pitch = obj_surf->width;
1023 is_uv_interleaved = 1;
1024 half_pitch_chroma = 0;
1025 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
1026 surface_format = YCRCB_NORMAL;
1027 surface_pitch = obj_surf->width * 2;
1028 is_uv_interleaved = 0;
1029 half_pitch_chroma = 0;
1030 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
1031 surface_format = PACKED_444A_8;
1032 surface_pitch = obj_surf->width * 4;
1033 is_uv_interleaved = 0;
1034 half_pitch_chroma = 0;
1035 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
1036 surface_format = R8G8B8A8_UNORM_SRGB;
1037 surface_pitch = obj_surf->width * 4;
1038 is_uv_interleaved = 0;
1039 half_pitch_chroma = 0;
1042 u_offset_y = obj_surf->y_cb_offset;
1043 v_offset_y = obj_surf->y_cr_offset;
1045 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
1047 BEGIN_VEB_BATCH(batch, 6);
1048 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (6 - 2));
1049 OUT_VEB_BATCH(batch,
1050 0 << 1 | // reserved
1051 is_output); // surface indentification.
1053 OUT_VEB_BATCH(batch,
1054 (obj_surf->orig_height - 1) << 18 | // height . w3
1055 (obj_surf->orig_width - 1) << 4 | // width
1058 OUT_VEB_BATCH(batch,
1059 surface_format << 28 | // surface format, YCbCr420. w4
1060 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
1061 0 << 20 | // reserved
1062 (surface_pitch - 1) << 3 | // surface pitch, 64 align
1063 half_pitch_chroma << 2 | // half pitch for chrome
1064 !!tiling << 1 | // tiled surface, linear surface used
1065 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
1067 OUT_VEB_BATCH(batch,
1068 0 << 29 | // reserved . w5
1069 0 << 16 | // X offset for V(Cb)
1070 0 << 15 | // reserved
1071 u_offset_y); // Y offset for V(Cb)
1073 OUT_VEB_BATCH(batch,
1074 0 << 29 | // reserved . w6
1075 0 << 16 | // X offset for V(Cr)
1076 0 << 15 | // reserved
1077 v_offset_y ); // Y offset for V(Cr)
1079 ADVANCE_VEB_BATCH(batch);
1082 void hsw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1084 struct intel_batchbuffer *batch = proc_ctx->batch;
1085 unsigned char frame_ctrl_bits = 0;
1086 struct object_surface *obj_surface = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
1087 unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
1089 assert(obj_surface);
1090 if (width64 > obj_surface->orig_width)
1091 width64 = obj_surface->orig_width;
1093 /* s1:update the previous and current input */
1094 /* tempFrame = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
1095 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_IN_CURRENT]; ;
1096 proc_ctx->frame_store[FRAME_IN_CURRENT] = tempFrame;
1098 if(proc_ctx->surface_input_vebox != -1){
1099 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
1100 proc_ctx->surface_input_vebox);
1102 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
1103 proc_ctx->surface_input);
1106 /*s2: update the STMM input and output */
1107 /* tempFrame = proc_ctx->frame_store[FRAME_IN_STMM];
1108 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM]; ;
1109 proc_ctx->frame_store[FRAME_OUT_STMM] = tempFrame;
1111 /*s3:set reloc buffer address */
1112 BEGIN_VEB_BATCH(batch, 10);
1113 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (10 - 2));
1114 OUT_VEB_BATCH(batch, (width64 - 1));
1116 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
1117 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
1119 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
1120 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
1122 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
1123 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
1125 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
1126 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1128 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
1129 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1131 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
1132 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1134 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
1135 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1137 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
1138 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1140 ADVANCE_VEB_BATCH(batch);
1144 frame_store_reset(VEBFrameStore *fs)
1146 fs->obj_surface = NULL;
1147 fs->surface_id = VA_INVALID_ID;
1148 fs->is_internal_surface = 0;
1149 fs->is_scratch_surface = 0;
1153 frame_store_clear(VEBFrameStore *fs, VADriverContextP ctx)
1155 if (fs->obj_surface && fs->is_scratch_surface) {
1156 VASurfaceID surface_id = fs->obj_surface->base.id;
1157 i965_DestroySurfaces(ctx, &surface_id, 1);
1159 frame_store_reset(fs);
1163 gen75_vebox_ensure_surfaces_storage(VADriverContextP ctx,
1164 struct intel_vebox_context *proc_ctx)
1166 struct i965_driver_data * const i965 = i965_driver_data(ctx);
1167 struct object_surface *input_obj_surface, *output_obj_surface;
1168 unsigned int input_fourcc, output_fourcc;
1169 unsigned int input_sampling, output_sampling;
1170 unsigned int input_tiling, output_tiling;
1171 unsigned int i, swizzle;
1175 /* Determine input surface info. Use native VEBOX format whenever
1176 possible. i.e. when the input surface format is not supported
1177 by the VEBOX engine, then allocate a temporary surface (live
1178 during the whole VPP pipeline lifetime)
1180 XXX: derive an actual surface format compatible with the input
1181 surface chroma format */
1182 input_obj_surface = proc_ctx->surface_input_vebox_object ?
1183 proc_ctx->surface_input_vebox_object : proc_ctx->surface_input_object;
1184 if (input_obj_surface->bo) {
1185 input_fourcc = input_obj_surface->fourcc;
1186 input_sampling = input_obj_surface->subsampling;
1187 dri_bo_get_tiling(input_obj_surface->bo, &input_tiling, &swizzle);
1188 input_tiling = !!input_tiling;
1191 input_fourcc = VA_FOURCC_NV12;
1192 input_sampling = SUBSAMPLE_YUV420;
1194 status = i965_check_alloc_surface_bo(ctx, input_obj_surface,
1195 input_tiling, input_fourcc, input_sampling);
1196 if (status != VA_STATUS_SUCCESS)
1200 /* Determine output surface info.
1202 XXX: derive an actual surface format compatible with the input
1203 surface chroma format */
1204 output_obj_surface = proc_ctx->surface_output_vebox_object ?
1205 proc_ctx->surface_output_vebox_object : proc_ctx->surface_output_object;
1206 if (output_obj_surface->bo) {
1207 output_fourcc = output_obj_surface->fourcc;
1208 output_sampling = output_obj_surface->subsampling;
1209 dri_bo_get_tiling(output_obj_surface->bo, &output_tiling, &swizzle);
1210 output_tiling = !!output_tiling;
1213 output_fourcc = VA_FOURCC_NV12;
1214 output_sampling = SUBSAMPLE_YUV420;
1216 status = i965_check_alloc_surface_bo(ctx, output_obj_surface,
1217 output_tiling, output_fourcc, output_sampling);
1218 if (status != VA_STATUS_SUCCESS)
1222 /* Update VEBOX pipeline formats */
1223 proc_ctx->fourcc_input = input_fourcc;
1224 proc_ctx->fourcc_output = output_fourcc;
1225 if (input_fourcc != output_fourcc) {
1226 proc_ctx->filters_mask |= VPP_IECP_CSC;
1228 if (input_fourcc == VA_FOURCC_RGBA &&
1229 (output_fourcc == VA_FOURCC_NV12 ||
1230 output_fourcc == VA_FOURCC_P010)) {
1231 proc_ctx->filters_mask |= VPP_IECP_CSC_TRANSFORM;
1232 } else if (output_fourcc == VA_FOURCC_RGBA &&
1233 (input_fourcc == VA_FOURCC_NV12 ||
1234 input_fourcc == VA_FOURCC_P010)) {
1235 proc_ctx->filters_mask |= VPP_IECP_CSC_TRANSFORM;
1239 proc_ctx->is_iecp_enabled = (proc_ctx->filters_mask & VPP_IECP_MASK) != 0;
1241 /* Create pipeline surfaces */
1242 for (i = 0; i < ARRAY_ELEMS(proc_ctx->frame_store); i ++) {
1243 struct object_surface *obj_surface;
1244 VASurfaceID new_surface;
1246 if (proc_ctx->frame_store[i].obj_surface)
1247 continue; // user allocated surface, not VEBOX internal
1249 status = i965_CreateSurfaces(ctx, proc_ctx->width_input,
1250 proc_ctx->height_input, VA_RT_FORMAT_YUV420, 1, &new_surface);
1251 if (status != VA_STATUS_SUCCESS)
1254 obj_surface = SURFACE(new_surface);
1255 assert(obj_surface != NULL);
1257 if (i <= FRAME_IN_PREVIOUS || i == FRAME_OUT_CURRENT_DN) {
1258 status = i965_check_alloc_surface_bo(ctx, obj_surface,
1259 input_tiling, input_fourcc, input_sampling);
1261 else if (i == FRAME_IN_STMM || i == FRAME_OUT_STMM) {
1262 status = i965_check_alloc_surface_bo(ctx, obj_surface,
1263 1, input_fourcc, input_sampling);
1265 else if (i >= FRAME_OUT_CURRENT) {
1266 status = i965_check_alloc_surface_bo(ctx, obj_surface,
1267 output_tiling, output_fourcc, output_sampling);
1269 if (status != VA_STATUS_SUCCESS)
1272 proc_ctx->frame_store[i].obj_surface = obj_surface;
1273 proc_ctx->frame_store[i].is_internal_surface = 1;
1274 proc_ctx->frame_store[i].is_scratch_surface = 1;
1277 /* Allocate DNDI state table */
1278 drm_intel_bo_unreference(proc_ctx->dndi_state_table.bo);
1279 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: dndi state Buffer",
1281 proc_ctx->dndi_state_table.bo = bo;
1283 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1285 /* Allocate IECP state table */
1286 drm_intel_bo_unreference(proc_ctx->iecp_state_table.bo);
1287 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: iecp state Buffer",
1289 proc_ctx->iecp_state_table.bo = bo;
1291 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1293 /* Allocate Gamut state table */
1294 drm_intel_bo_unreference(proc_ctx->gamut_state_table.bo);
1295 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: gamut state Buffer",
1297 proc_ctx->gamut_state_table.bo = bo;
1299 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1301 /* Allocate vertex state table */
1302 drm_intel_bo_unreference(proc_ctx->vertex_state_table.bo);
1303 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: vertex state Buffer",
1305 proc_ctx->vertex_state_table.bo = bo;
1307 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1309 return VA_STATUS_SUCCESS;
1313 gen75_vebox_ensure_surfaces(VADriverContextP ctx,
1314 struct intel_vebox_context *proc_ctx)
1316 struct i965_driver_data * const i965 = i965_driver_data(ctx);
1317 struct object_surface *obj_surface;
1318 VEBFrameStore *ifs, *ofs;
1319 bool is_new_frame = 0;
1322 /* Update the previous input surface */
1323 obj_surface = proc_ctx->surface_input_object;
1325 is_new_frame = proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id !=
1326 obj_surface->base.id;
1328 ifs = &proc_ctx->frame_store[FRAME_IN_PREVIOUS];
1329 ofs = &proc_ctx->frame_store[proc_ctx->is_dn_enabled ?
1330 FRAME_OUT_CURRENT_DN : FRAME_IN_CURRENT];
1332 const VAProcPipelineParameterBuffer * const pipe =
1333 proc_ctx->pipeline_param;
1335 if (pipe->num_forward_references < 1)
1337 if (pipe->forward_references[0] == VA_INVALID_ID)
1340 obj_surface = SURFACE(pipe->forward_references[0]);
1341 if (!obj_surface || obj_surface->base.id == ifs->surface_id)
1344 frame_store_clear(ifs, ctx);
1345 if (obj_surface->base.id == ofs->surface_id) {
1347 frame_store_reset(ofs);
1350 ifs->obj_surface = obj_surface;
1351 ifs->surface_id = obj_surface->base.id;
1352 ifs->is_internal_surface = 0;
1353 ifs->is_scratch_surface = 0;
1358 /* Update the input surface */
1359 obj_surface = proc_ctx->surface_input_vebox_object ?
1360 proc_ctx->surface_input_vebox_object : proc_ctx->surface_input_object;
1362 ifs = &proc_ctx->frame_store[FRAME_IN_CURRENT];
1363 frame_store_clear(ifs, ctx);
1364 ifs->obj_surface = obj_surface;
1365 ifs->surface_id = proc_ctx->surface_input_object->base.id;
1366 ifs->is_internal_surface = proc_ctx->surface_input_vebox_object != NULL;
1367 ifs->is_scratch_surface = 0;
1369 /* Update the Spatial Temporal Motion Measure (STMM) surfaces */
1371 const VEBFrameStore tmpfs = proc_ctx->frame_store[FRAME_IN_STMM];
1372 proc_ctx->frame_store[FRAME_IN_STMM] =
1373 proc_ctx->frame_store[FRAME_OUT_STMM];
1374 proc_ctx->frame_store[FRAME_OUT_STMM] = tmpfs;
1377 /* Reset the output surfaces to defaults. i.e. clean from user surfaces */
1378 for (i = FRAME_OUT_CURRENT_DN; i <= FRAME_OUT_PREVIOUS; i++) {
1379 ofs = &proc_ctx->frame_store[i];
1380 if (!ofs->is_scratch_surface)
1381 ofs->obj_surface = NULL;
1382 ofs->surface_id = proc_ctx->surface_input_object->base.id;
1385 /* Update the output surfaces */
1386 obj_surface = proc_ctx->surface_output_vebox_object ?
1387 proc_ctx->surface_output_vebox_object : proc_ctx->surface_output_object;
1389 proc_ctx->current_output_type = 2;
1390 if (proc_ctx->filters_mask == VPP_DNDI_DN && !proc_ctx->is_iecp_enabled)
1391 proc_ctx->current_output = FRAME_OUT_CURRENT_DN;
1392 else if (proc_ctx->is_di_adv_enabled && !proc_ctx->is_first_frame) {
1393 proc_ctx->current_output_type = 0;
1394 proc_ctx->current_output = proc_ctx->is_second_field ?
1395 FRAME_OUT_CURRENT : FRAME_OUT_PREVIOUS;
1398 proc_ctx->current_output = FRAME_OUT_CURRENT;
1399 ofs = &proc_ctx->frame_store[proc_ctx->current_output];
1400 frame_store_clear(ofs, ctx);
1401 ofs->obj_surface = obj_surface;
1402 ofs->surface_id = proc_ctx->surface_input_object->base.id;
1403 ofs->is_internal_surface = proc_ctx->surface_output_vebox_object != NULL;
1404 ofs->is_scratch_surface = 0;
1406 return VA_STATUS_SUCCESS;
1409 VAStatus hsw_veb_pre_format_convert(VADriverContextP ctx,
1410 struct intel_vebox_context *proc_ctx)
1413 struct i965_driver_data *i965 = i965_driver_data(ctx);
1414 struct object_surface* obj_surf_input = proc_ctx->surface_input_object;
1415 struct object_surface* obj_surf_output = proc_ctx->surface_output_object;
1416 struct object_surface* obj_surf_input_vebox;
1417 struct object_surface* obj_surf_output_vebox;
1419 proc_ctx->format_convert_flags = 0;
1421 if ((obj_surf_input == NULL) &&
1422 (proc_ctx->pipeline_param->surface_region == NULL))
1423 ASSERT_RET(0, VA_STATUS_ERROR_INVALID_PARAMETER);
1425 if ((obj_surf_output == NULL) &&
1426 (proc_ctx->pipeline_param->output_region == NULL))
1427 ASSERT_RET(0, VA_STATUS_ERROR_INVALID_PARAMETER);
1429 if (proc_ctx->pipeline_param->surface_region) {
1430 proc_ctx->width_input = proc_ctx->pipeline_param->surface_region->width;
1431 proc_ctx->height_input = proc_ctx->pipeline_param->surface_region->height;
1433 proc_ctx->width_input = obj_surf_input->orig_width;
1434 proc_ctx->height_input = obj_surf_input->orig_height;
1437 if (proc_ctx->pipeline_param->output_region) {
1438 proc_ctx->width_output = proc_ctx->pipeline_param->output_region->width;
1439 proc_ctx->height_output = proc_ctx->pipeline_param->output_region->height;
1441 proc_ctx->width_output = obj_surf_output->orig_width;
1442 proc_ctx->height_output = obj_surf_output->orig_height;
1445 /* only partial frame is not supported to be processed */
1447 assert(proc_ctx->width_input == proc_ctx->pipeline_param->surface_region->width);
1448 assert(proc_ctx->height_input == proc_ctx->pipeline_param->surface_region->height);
1449 assert(proc_ctx->width_output == proc_ctx->pipeline_param->output_region->width);
1450 assert(proc_ctx->height_output == proc_ctx->pipeline_param->output_region->height);
1453 if(proc_ctx->width_output != proc_ctx->width_input ||
1454 proc_ctx->height_output != proc_ctx->height_input){
1455 proc_ctx->format_convert_flags |= POST_SCALING_CONVERT;
1458 /* convert the following format to NV12 format */
1459 if(obj_surf_input->fourcc == VA_FOURCC_YV12 ||
1460 obj_surf_input->fourcc == VA_FOURCC_I420 ||
1461 obj_surf_input->fourcc == VA_FOURCC_IMC1 ||
1462 obj_surf_input->fourcc == VA_FOURCC_IMC3 ||
1463 obj_surf_input->fourcc == VA_FOURCC_RGBA ||
1464 obj_surf_input->fourcc == VA_FOURCC_BGRA){
1466 proc_ctx->format_convert_flags |= PRE_FORMAT_CONVERT;
1468 } else if(obj_surf_input->fourcc == VA_FOURCC_AYUV ||
1469 obj_surf_input->fourcc == VA_FOURCC_YUY2 ||
1470 obj_surf_input->fourcc == VA_FOURCC_NV12 ||
1471 obj_surf_input->fourcc == VA_FOURCC_P010){
1473 // nothing to do here
1475 /* not support other format as input */
1476 ASSERT_RET(0, VA_STATUS_ERROR_UNIMPLEMENTED);
1479 if (proc_ctx->format_convert_flags & PRE_FORMAT_CONVERT) {
1480 if(proc_ctx->surface_input_vebox_object == NULL){
1481 va_status = i965_CreateSurfaces(ctx,
1482 proc_ctx->width_input,
1483 proc_ctx->height_input,
1484 VA_RT_FORMAT_YUV420,
1486 &(proc_ctx->surface_input_vebox));
1487 assert(va_status == VA_STATUS_SUCCESS);
1488 obj_surf_input_vebox = SURFACE(proc_ctx->surface_input_vebox);
1489 assert(obj_surf_input_vebox);
1491 if (obj_surf_input_vebox) {
1492 proc_ctx->surface_input_vebox_object = obj_surf_input_vebox;
1493 i965_check_alloc_surface_bo(ctx, obj_surf_input_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1497 vpp_surface_convert(ctx, proc_ctx->surface_input_object, proc_ctx->surface_input_vebox_object);
1500 /* create one temporary NV12 surfaces for conversion*/
1501 if(obj_surf_output->fourcc == VA_FOURCC_YV12 ||
1502 obj_surf_output->fourcc == VA_FOURCC_I420 ||
1503 obj_surf_output->fourcc == VA_FOURCC_IMC1 ||
1504 obj_surf_output->fourcc == VA_FOURCC_IMC3 ||
1505 obj_surf_output->fourcc == VA_FOURCC_RGBA ||
1506 obj_surf_output->fourcc == VA_FOURCC_BGRA) {
1508 proc_ctx->format_convert_flags |= POST_FORMAT_CONVERT;
1509 } else if(obj_surf_output->fourcc == VA_FOURCC_AYUV ||
1510 obj_surf_output->fourcc == VA_FOURCC_YUY2 ||
1511 obj_surf_output->fourcc == VA_FOURCC_NV12 ||
1512 obj_surf_output->fourcc == VA_FOURCC_P010) {
1514 /* Nothing to do here */
1516 /* not support other format as input */
1517 ASSERT_RET(0, VA_STATUS_ERROR_UNIMPLEMENTED);
1520 if(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT ||
1521 proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1522 if(proc_ctx->surface_output_vebox_object == NULL){
1523 va_status = i965_CreateSurfaces(ctx,
1524 proc_ctx->width_input,
1525 proc_ctx->height_input,
1526 VA_RT_FORMAT_YUV420,
1528 &(proc_ctx->surface_output_vebox));
1529 assert(va_status == VA_STATUS_SUCCESS);
1530 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_vebox);
1531 assert(obj_surf_output_vebox);
1533 if (obj_surf_output_vebox) {
1534 proc_ctx->surface_output_vebox_object = obj_surf_output_vebox;
1535 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1540 if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1541 if(proc_ctx->surface_output_scaled_object == NULL){
1542 va_status = i965_CreateSurfaces(ctx,
1543 proc_ctx->width_output,
1544 proc_ctx->height_output,
1545 VA_RT_FORMAT_YUV420,
1547 &(proc_ctx->surface_output_scaled));
1548 assert(va_status == VA_STATUS_SUCCESS);
1549 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_scaled);
1550 assert(obj_surf_output_vebox);
1552 if (obj_surf_output_vebox) {
1553 proc_ctx->surface_output_scaled_object = obj_surf_output_vebox;
1554 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1559 return VA_STATUS_SUCCESS;
1563 hsw_veb_post_format_convert(VADriverContextP ctx,
1564 struct intel_vebox_context *proc_ctx)
1566 struct object_surface *obj_surface = NULL;
1567 VAStatus va_status = VA_STATUS_SUCCESS;
1569 obj_surface = proc_ctx->frame_store[proc_ctx->current_output].obj_surface;
1571 if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1572 /* copy the saved frame in the second call */
1573 va_status = vpp_surface_convert(ctx, obj_surface, proc_ctx->surface_output_object);
1574 } else if(!(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1575 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1576 /* Output surface format is covered by vebox pipeline and
1577 * processed picture is already store in output surface
1578 * so nothing will be done here */
1579 } else if ((proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1580 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1581 /* convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1582 va_status = vpp_surface_convert(ctx, obj_surface, proc_ctx->surface_output_object);
1584 } else if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1585 VAProcPipelineParameterBuffer * const pipe = proc_ctx->pipeline_param;
1586 /* scaling, convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1587 assert(obj_surface->fourcc == VA_FOURCC_NV12);
1589 /* first step :surface scaling */
1590 vpp_surface_scaling(ctx, obj_surface,
1591 proc_ctx->surface_output_scaled_object, pipe->filter_flags);
1593 /* second step: color format convert and copy to output */
1594 obj_surface = proc_ctx->surface_output_object;
1596 va_status = vpp_surface_convert(ctx, proc_ctx->surface_output_scaled_object, obj_surface);
1603 gen75_vebox_init_pipe_params(VADriverContextP ctx,
1604 struct intel_vebox_context *proc_ctx)
1606 struct i965_driver_data * const i965 = i965_driver_data(ctx);
1607 const VAProcPipelineParameterBuffer * const pipe = proc_ctx->pipeline_param;
1608 VAProcFilterParameterBuffer *filter;
1611 proc_ctx->filters_mask = 0;
1612 for (i = 0; i < pipe->num_filters; i++) {
1613 struct object_buffer * const obj_buffer = BUFFER(pipe->filters[i]);
1615 assert(obj_buffer && obj_buffer->buffer_store);
1616 if (!obj_buffer || !obj_buffer->buffer_store)
1617 return VA_STATUS_ERROR_INVALID_PARAMETER;
1619 filter = (VAProcFilterParameterBuffer *)
1620 obj_buffer->buffer_store->buffer;
1621 switch (filter->type) {
1622 case VAProcFilterNoiseReduction:
1623 proc_ctx->filters_mask |= VPP_DNDI_DN;
1624 proc_ctx->filter_dn = filter;
1626 case VAProcFilterDeinterlacing:
1627 proc_ctx->filters_mask |= VPP_DNDI_DI;
1628 proc_ctx->filter_di = filter;
1630 case VAProcFilterColorBalance:
1631 proc_ctx->filters_mask |= VPP_IECP_PRO_AMP;
1632 proc_ctx->filter_iecp_amp = filter;
1633 proc_ctx->filter_iecp_amp_num_elements = obj_buffer->num_elements;
1635 case VAProcFilterSkinToneEnhancement:
1636 proc_ctx->filters_mask |= VPP_IECP_STD_STE;
1637 proc_ctx->filter_iecp_std = filter;
1639 case VAProcFilterSharpening:
1640 proc_ctx->filters_mask |= VPP_SHARP;
1643 WARN_ONCE("unsupported filter (type: %d)\n", filter->type);
1644 return VA_STATUS_ERROR_UNSUPPORTED_FILTER;
1648 if(proc_ctx->filters_mask == 0)
1649 proc_ctx->filters_mask |= VPP_IECP_CSC;
1651 return VA_STATUS_SUCCESS;
1655 gen75_vebox_init_filter_params(VADriverContextP ctx,
1656 struct intel_vebox_context *proc_ctx)
1658 proc_ctx->format_convert_flags = 0; /* initialized in hsw_veb_pre_format_convert() */
1660 proc_ctx->is_iecp_enabled = (proc_ctx->filters_mask & VPP_IECP_MASK) != 0;
1661 proc_ctx->is_dn_enabled = (proc_ctx->filters_mask & VPP_DNDI_DN) != 0;
1662 proc_ctx->is_di_enabled = (proc_ctx->filters_mask & VPP_DNDI_DI) != 0;
1663 proc_ctx->is_di_adv_enabled = 0;
1664 proc_ctx->is_first_frame = 0;
1665 proc_ctx->is_second_field = 0;
1667 /* Check whether we are deinterlacing the second field */
1668 if (proc_ctx->is_di_enabled) {
1669 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
1670 proc_ctx->filter_di;
1672 const unsigned int tff =
1673 !(deint_params->flags & VA_DEINTERLACING_BOTTOM_FIELD_FIRST);
1674 const unsigned int is_top_field =
1675 !(deint_params->flags & VA_DEINTERLACING_BOTTOM_FIELD);
1677 if ((tff ^ is_top_field) != 0) {
1678 struct object_surface * const obj_surface =
1679 proc_ctx->surface_input_object;
1681 if (proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id != obj_surface->base.id) {
1682 WARN_ONCE("invalid surface provided for second field\n");
1683 return VA_STATUS_ERROR_INVALID_PARAMETER;
1685 proc_ctx->is_second_field = 1;
1689 /* Check whether we are deinterlacing the first frame */
1690 if (proc_ctx->is_di_enabled) {
1691 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
1692 proc_ctx->filter_di;
1694 switch (deint_params->algorithm) {
1695 case VAProcDeinterlacingBob:
1696 proc_ctx->is_first_frame = 1;
1698 case VAProcDeinterlacingMotionAdaptive:
1699 case VAProcDeinterlacingMotionCompensated:
1700 if (proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id == VA_INVALID_ID)
1701 proc_ctx->is_first_frame = 1;
1702 else if (proc_ctx->is_second_field) {
1703 /* At this stage, we have already deinterlaced the
1704 first field successfully. So, the first frame flag
1705 is trigerred if the previous field was deinterlaced
1706 without reference frame */
1707 if (proc_ctx->frame_store[FRAME_IN_PREVIOUS].surface_id == VA_INVALID_ID)
1708 proc_ctx->is_first_frame = 1;
1711 const VAProcPipelineParameterBuffer * const pipe =
1712 proc_ctx->pipeline_param;
1714 if (pipe->num_forward_references < 1 ||
1715 pipe->forward_references[0] == VA_INVALID_ID) {
1716 WARN_ONCE("A forward temporal reference is needed for Motion adaptive/compensated deinterlacing !!!\n");
1717 return VA_STATUS_ERROR_INVALID_PARAMETER;
1720 proc_ctx->is_di_adv_enabled = 1;
1723 WARN_ONCE("unsupported deinterlacing algorithm (%d)\n",
1724 deint_params->algorithm);
1725 return VA_STATUS_ERROR_UNSUPPORTED_FILTER;
1728 return VA_STATUS_SUCCESS;
1732 gen75_vebox_process_picture(VADriverContextP ctx,
1733 struct intel_vebox_context *proc_ctx)
1737 status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
1738 if (status != VA_STATUS_SUCCESS)
1741 status = gen75_vebox_init_filter_params(ctx, proc_ctx);
1742 if (status != VA_STATUS_SUCCESS)
1745 status = hsw_veb_pre_format_convert(ctx, proc_ctx);
1746 if (status != VA_STATUS_SUCCESS)
1749 status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
1750 if (status != VA_STATUS_SUCCESS)
1753 status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
1754 if (status != VA_STATUS_SUCCESS)
1757 if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
1758 vpp_sharpness_filtering(ctx, proc_ctx);
1759 } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1760 assert(proc_ctx->is_second_field);
1761 /* directly copy the saved frame in the second call */
1763 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1764 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1765 hsw_veb_state_table_setup(ctx, proc_ctx);
1766 hsw_veb_state_command(ctx, proc_ctx);
1767 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1768 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1769 hsw_veb_dndi_iecp_command(ctx, proc_ctx);
1770 intel_batchbuffer_end_atomic(proc_ctx->batch);
1771 intel_batchbuffer_flush(proc_ctx->batch);
1774 status = hsw_veb_post_format_convert(ctx, proc_ctx);
1779 void gen75_vebox_context_destroy(VADriverContextP ctx,
1780 struct intel_vebox_context *proc_ctx)
1784 if(proc_ctx->vpp_gpe_ctx){
1785 vpp_gpe_context_destroy(ctx,proc_ctx->vpp_gpe_ctx);
1786 proc_ctx->vpp_gpe_ctx = NULL;
1789 if(proc_ctx->surface_input_vebox != VA_INVALID_ID){
1790 i965_DestroySurfaces(ctx, &proc_ctx->surface_input_vebox, 1);
1791 proc_ctx->surface_input_vebox = VA_INVALID_ID;
1792 proc_ctx->surface_input_vebox_object = NULL;
1795 if(proc_ctx->surface_output_vebox != VA_INVALID_ID){
1796 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_vebox, 1);
1797 proc_ctx->surface_output_vebox = VA_INVALID_ID;
1798 proc_ctx->surface_output_vebox_object = NULL;
1801 if(proc_ctx->surface_output_scaled != VA_INVALID_ID){
1802 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_scaled, 1);
1803 proc_ctx->surface_output_scaled = VA_INVALID_ID;
1804 proc_ctx->surface_output_scaled_object = NULL;
1807 for (i = 0; i < ARRAY_ELEMS(proc_ctx->frame_store); i++)
1808 frame_store_clear(&proc_ctx->frame_store[i], ctx);
1810 /* dndi state table */
1811 drm_intel_bo_unreference(proc_ctx->dndi_state_table.bo);
1812 proc_ctx->dndi_state_table.bo = NULL;
1814 /* iecp state table */
1815 drm_intel_bo_unreference(proc_ctx->iecp_state_table.bo);
1816 proc_ctx->iecp_state_table.bo = NULL;
1818 /* gamut statu table */
1819 drm_intel_bo_unreference(proc_ctx->gamut_state_table.bo);
1820 proc_ctx->gamut_state_table.bo = NULL;
1822 /* vertex state table */
1823 drm_intel_bo_unreference(proc_ctx->vertex_state_table.bo);
1824 proc_ctx->vertex_state_table.bo = NULL;
1826 intel_batchbuffer_free(proc_ctx->batch);
1831 struct intel_vebox_context * gen75_vebox_context_init(VADriverContextP ctx)
1833 struct intel_driver_data *intel = intel_driver_data(ctx);
1834 struct intel_vebox_context *proc_context = calloc(1, sizeof(struct intel_vebox_context));
1837 assert(proc_context);
1838 proc_context->batch = intel_batchbuffer_new(intel, I915_EXEC_VEBOX, 0);
1840 for (i = 0; i < ARRAY_ELEMS(proc_context->frame_store); i++)
1841 proc_context->frame_store[i].surface_id = VA_INVALID_ID;
1843 proc_context->filters_mask = 0;
1844 proc_context->surface_output_object = NULL;
1845 proc_context->surface_input_object = NULL;
1846 proc_context->surface_input_vebox = VA_INVALID_ID;
1847 proc_context->surface_input_vebox_object = NULL;
1848 proc_context->surface_output_vebox = VA_INVALID_ID;
1849 proc_context->surface_output_vebox_object = NULL;
1850 proc_context->surface_output_scaled = VA_INVALID_ID;
1851 proc_context->surface_output_scaled_object = NULL;
1852 proc_context->filters_mask = 0;
1853 proc_context->format_convert_flags = 0;
1854 proc_context->vpp_gpe_ctx = NULL;
1856 return proc_context;
1859 void bdw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1861 struct intel_batchbuffer *batch = proc_ctx->batch;
1863 BEGIN_VEB_BATCH(batch, 0xc);
1864 OUT_VEB_BATCH(batch, VEB_STATE | (0xc - 2));
1865 OUT_VEB_BATCH(batch,
1866 0 << 25 | // state surface control bits
1867 0 << 23 | // reserved.
1868 0 << 22 | // gamut expansion position
1869 0 << 15 | // reserved.
1870 0 << 14 | // single slice vebox enable
1871 0 << 13 | // hot pixel filter enable
1872 0 << 12 | // alpha plane enable
1873 0 << 11 | // vignette enable
1874 0 << 10 | // demosaic enable
1875 proc_ctx->current_output_type << 8 | // DI output frame
1876 1 << 7 | // 444->422 downsample method
1877 1 << 6 | // 422->420 downsample method
1878 proc_ctx->is_first_frame << 5 | // DN/DI first frame
1879 proc_ctx->is_di_enabled << 4 | // DI enable
1880 proc_ctx->is_dn_enabled << 3 | // DN enable
1881 proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
1882 0 << 1 | // ColorGamutCompressionEnable
1883 0 ) ; // ColorGamutExpansionEnable.
1886 proc_ctx->dndi_state_table.bo,
1887 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1890 proc_ctx->iecp_state_table.bo,
1891 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1894 proc_ctx->gamut_state_table.bo,
1895 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1898 proc_ctx->vertex_state_table.bo,
1899 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1902 OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/
1903 OUT_VEB_BATCH(batch, 0);
1905 ADVANCE_VEB_BATCH(batch);
1908 void bdw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1910 struct intel_batchbuffer *batch = proc_ctx->batch;
1911 unsigned char frame_ctrl_bits = 0;
1912 struct object_surface *obj_surface = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
1913 unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
1915 assert(obj_surface);
1916 if (width64 > obj_surface->orig_width)
1917 width64 = obj_surface->orig_width;
1919 BEGIN_VEB_BATCH(batch, 0x14);
1920 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (0x14 - 2));//DWord 0
1921 OUT_VEB_BATCH(batch, (width64 - 1));
1924 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
1925 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 2
1928 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
1929 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 4
1932 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
1933 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 6
1936 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
1937 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 8
1940 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
1941 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 10
1944 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
1945 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 12
1948 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
1949 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 14
1952 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
1953 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 16
1955 OUT_VEB_BATCH(batch,0);//DWord 18
1956 OUT_VEB_BATCH(batch,0);//DWord 19
1958 ADVANCE_VEB_BATCH(batch);
1962 gen8_vebox_process_picture(VADriverContextP ctx,
1963 struct intel_vebox_context *proc_ctx)
1967 status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
1968 if (status != VA_STATUS_SUCCESS)
1971 status = gen75_vebox_init_filter_params(ctx, proc_ctx);
1972 if (status != VA_STATUS_SUCCESS)
1975 status = hsw_veb_pre_format_convert(ctx, proc_ctx);
1976 if (status != VA_STATUS_SUCCESS)
1979 status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
1980 if (status != VA_STATUS_SUCCESS)
1983 status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
1984 if (status != VA_STATUS_SUCCESS)
1987 if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
1988 vpp_sharpness_filtering(ctx, proc_ctx);
1989 } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1990 assert(proc_ctx->is_second_field);
1991 /* directly copy the saved frame in the second call */
1993 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1994 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1995 hsw_veb_state_table_setup(ctx, proc_ctx);
1996 bdw_veb_state_command(ctx, proc_ctx);
1997 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1998 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1999 bdw_veb_dndi_iecp_command(ctx, proc_ctx);
2000 intel_batchbuffer_end_atomic(proc_ctx->batch);
2001 intel_batchbuffer_flush(proc_ctx->batch);
2004 status = hsw_veb_post_format_convert(ctx, proc_ctx);
2011 skl_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2013 unsigned int* p_table ;
2014 unsigned int progressive_dn = 1;
2015 unsigned int dndi_top_first = 0;
2016 unsigned int is_mcdi_enabled = 0;
2018 if (proc_ctx->is_di_enabled) {
2019 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
2020 proc_ctx->filter_di;
2024 /* If we are in "First Frame" mode, i.e. past frames are not
2025 available for motion measure, then don't use the TFF flag */
2026 dndi_top_first = !(deint_params->flags & (proc_ctx->is_first_frame ?
2027 VA_DEINTERLACING_BOTTOM_FIELD :
2028 VA_DEINTERLACING_BOTTOM_FIELD_FIRST));
2031 (deint_params->algorithm == VAProcDeinterlacingMotionCompensated);
2035 VAProcFilterParameterBufferDeinterlacing *di_param =
2036 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
2038 VAProcFilterParameterBuffer * dn_param =
2039 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
2041 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
2043 *p_table ++ = ( 140 << 20 | // denoise stad threshold . w1
2044 192 << 12 | // dnmh_history_max
2045 7 << 8 | // dnmh_delta[3:0]
2046 1 ); // denoise moving pixel threshold
2048 *p_table ++ = ( 38 << 20 | // denoise asd threshold
2049 0 << 10 | // temporal diff th
2050 0 ); // low temporal diff th
2052 *p_table ++ = ( progressive_dn << 28 | // progressive dn
2053 38 << 16 | // denoise th for sum of complexity measure
2054 32 << 10 | // dnmh_history_init[5:0]
2057 *p_table ++ = ( 0 << 28 | // hot pixel count
2058 0 << 20 | // hot pixel threshold
2059 1 << 12 | // block noise estimate edge threshold
2060 20 ); // block noise estimate noise threshold
2062 *p_table ++ = ( 140<< 16 | // chroma denoise stad threshold
2063 0 << 13 | // reserved
2064 1 << 12 | // chrome denoise enable
2065 13 << 6 | // chr temp diff th
2066 7 ); // chr temp diff low
2068 *p_table ++ = 0; // weight
2070 *p_table ++ = ( 0 << 16 | // dn_thmax
2073 *p_table ++ = ( 0 << 16 | // dn_prt5
2074 0 ); // dn_dyn_thmin
2076 *p_table ++ = ( 0 << 16 | // dn_prt4
2079 *p_table ++ = ( 0 << 16 | // dn_prt2
2082 *p_table ++ = ( 0 << 16 | // dn_prt0
2083 0 << 10 | // dn_wd22
2087 *p_table ++ = ( 0 << 25 | // dn_wd12
2088 0 << 20 | // dn_wd11
2089 0 << 15 | // dn_wd10
2090 0 << 10 | // dn_wd02
2094 *p_table ++ = ( 2 << 10 | // stmm c2
2095 9 << 6 | // cat slope minus 1
2096 5 << 2 | // sad tight threshold
2097 0 ); // smooth mv th
2099 *p_table ++ = ( 0 << 31 | // stmm blending constant select
2100 64 << 24 | // stmm trc1
2101 125<< 16 | // stmm trc2
2102 0 << 14 | // reserved
2103 30 << 8 | // multiplier for vecm
2104 150 ); // maximum stmm
2106 *p_table ++ = ( 118<< 24 | // minumum stmm
2107 0 << 22 | // stmm shift down
2108 1 << 20 | // stmm shift up
2109 5 << 16 | // stmm output shift
2110 100 << 8 | // sdi threshold
2113 *p_table ++ = ( 50 << 24 | // sdi fallback mode 1 t1 constant
2114 100 << 16 | // sdi fallback mode 1 t2 constant
2115 37 << 8 | // sdi fallback mode 2 constant(angle2x1)
2116 175 ); // fmd temporal difference threshold
2118 *p_table ++ = ( 16 << 24 | // fmd #1 vertical difference th . w7
2119 100<< 16 | // fmd #2 vertical difference th
2120 0 << 14 | // cat threshold
2121 2 << 8 | // fmd tear threshold
2122 is_mcdi_enabled << 7 | // mcdi enable, use motion compensated deinterlace algorithm
2123 dndi_top_first << 3 | // dn/di top first
2126 *p_table ++ = ( 10 << 19 | // neighbor pixel threshold
2127 0 << 16 | // fmd for 2nd field of previous frame
2128 25 << 10 | // mc pixel consistency threshold
2129 0 << 8 | // fmd for 1st field for current frame
2130 10 << 4 | // sad thb
2134 void skl_veb_iecp_csc_transform_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2136 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
2137 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
2138 float v_coef[3] = {0.0, 0.0, 0.0};
2139 float u_coef[3] = {0.0, 0.0, 0.0};
2140 int is_transform_enabled = 0;
2142 if(!(proc_ctx->filters_mask & VPP_IECP_CSC_TRANSFORM)){
2143 memset(p_table, 0, 12 * 4);
2147 if(proc_ctx->fourcc_input == VA_FOURCC_RGBA &&
2148 (proc_ctx->fourcc_output == VA_FOURCC_NV12 ||
2149 proc_ctx->fourcc_output == VA_FOURCC_YV12 ||
2150 proc_ctx->fourcc_output == VA_FOURCC_YVY2 ||
2151 proc_ctx->fourcc_output == VA_FOURCC_AYUV)) {
2153 tran_coef[0] = 0.257;
2154 tran_coef[1] = 0.504;
2155 tran_coef[2] = 0.098;
2156 tran_coef[3] = -0.148;
2157 tran_coef[4] = -0.291;
2158 tran_coef[5] = 0.439;
2159 tran_coef[6] = 0.439;
2160 tran_coef[7] = -0.368;
2161 tran_coef[8] = -0.071;
2164 u_coef[1] = 128 * 4;
2165 u_coef[2] = 128 * 4;
2167 is_transform_enabled = 1;
2168 }else if((proc_ctx->fourcc_input == VA_FOURCC_NV12 ||
2169 proc_ctx->fourcc_input == VA_FOURCC_YV12 ||
2170 proc_ctx->fourcc_input == VA_FOURCC_YUY2 ||
2171 proc_ctx->fourcc_input == VA_FOURCC_AYUV) &&
2172 proc_ctx->fourcc_output == VA_FOURCC_RGBA) {
2173 tran_coef[0] = 1.164;
2174 tran_coef[1] = 0.000;
2175 tran_coef[2] = 1.569;
2176 tran_coef[3] = 1.164;
2177 tran_coef[4] = -0.813;
2178 tran_coef[5] = -0.392;
2179 tran_coef[6] = 1.164;
2180 tran_coef[7] = 2.017;
2181 tran_coef[8] = 0.000;
2183 v_coef[0] = -16 * 4;
2184 v_coef[1] = -128 * 4;
2185 v_coef[2] = -128 * 4;
2187 is_transform_enabled = 1;
2188 }else if(proc_ctx->fourcc_input != proc_ctx->fourcc_output){
2189 //enable when input and output format are different.
2190 is_transform_enabled = 1;
2193 if(is_transform_enabled == 0){
2194 memset(p_table, 0, 12 * 4);
2196 *p_table ++ = ( is_transform_enabled << 31 |
2197 0 << 29 | // yuv_channel swap
2198 intel_format_convert(tran_coef[0], 2, 16, 1)); //c0, s2.16 format
2200 *p_table ++ = ( 0 << 19 | //reserved
2201 intel_format_convert(tran_coef[1], 2, 16, 1)); //c1, s2.16 format
2203 *p_table ++ = ( 0 << 19 | //reserved
2204 intel_format_convert(tran_coef[2], 2, 16, 1)); //c2, s2.16 format
2206 *p_table ++ = ( 0 << 19 | //reserved
2207 intel_format_convert(tran_coef[3], 2, 16, 1)); //c3, s2.16 format
2209 *p_table ++ = ( 0 << 19 | //reserved
2210 intel_format_convert(tran_coef[4], 2, 16, 1)); //c4, s2.16 format
2212 *p_table ++ = ( 0 << 19 | //reserved
2213 intel_format_convert(tran_coef[5], 2, 16, 1)); //c5, s2.16 format
2215 *p_table ++ = ( 0 << 19 | //reserved
2216 intel_format_convert(tran_coef[6], 2, 16, 1)); //c6, s2.16 format
2218 *p_table ++ = ( 0 << 19 | //reserved
2219 intel_format_convert(tran_coef[7], 2, 16, 1)); //c7, s2.16 format
2221 *p_table ++ = ( 0 << 19 | //reserved
2222 intel_format_convert(tran_coef[8], 2, 16, 1)); //c8, s2.16 format
2224 *p_table ++ = ( intel_format_convert(u_coef[0], 16, 0, 1) << 16 |
2225 intel_format_convert(v_coef[0], 16, 0, 1));
2227 *p_table ++ = ( intel_format_convert(u_coef[1], 16, 0, 1) << 16 |
2228 intel_format_convert(v_coef[1], 16, 0, 1));
2230 *p_table ++ = ( intel_format_convert(u_coef[2], 16, 0, 1) << 16 |
2231 intel_format_convert(v_coef[2], 16, 0, 1));
2235 void skl_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2237 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 27 * sizeof(unsigned int));
2239 if (!(proc_ctx->filters_mask & VPP_IECP_AOI)) {
2240 memset(p_table, 0, 3 * 4);
2242 *p_table ++ = 0x00000000;
2243 *p_table ++ = 0x00030000;
2244 *p_table ++ = 0x00030000;
2248 void skl_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2250 if(proc_ctx->filters_mask & VPP_DNDI_MASK) {
2251 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
2252 dri_bo_map(dndi_bo, 1);
2253 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
2255 skl_veb_dndi_table(ctx, proc_ctx);
2257 dri_bo_unmap(dndi_bo);
2260 if(proc_ctx->filters_mask & VPP_IECP_MASK) {
2261 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
2262 dri_bo_map(iecp_bo, 1);
2263 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
2264 memset(proc_ctx->iecp_state_table.ptr, 0, 90 * 4);
2266 hsw_veb_iecp_std_table(ctx, proc_ctx);
2267 hsw_veb_iecp_ace_table(ctx, proc_ctx);
2268 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
2269 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
2270 skl_veb_iecp_csc_transform_table(ctx, proc_ctx);
2271 skl_veb_iecp_aoi_table(ctx, proc_ctx);
2273 dri_bo_unmap(iecp_bo);
2278 skl_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2280 struct i965_driver_data *i965 = i965_driver_data(ctx);
2281 struct intel_batchbuffer *batch = proc_ctx->batch;
2283 BEGIN_VEB_BATCH(batch, 0x10);
2284 OUT_VEB_BATCH(batch, VEB_STATE | (0x10 - 2));
2285 OUT_VEB_BATCH(batch,
2286 ((i965->intel.mocs_state) << 25) | // state surface control bits
2287 0 << 23 | // reserved.
2288 0 << 22 | // gamut expansion position
2289 0 << 15 | // reserved.
2290 0 << 14 | // single slice vebox enable
2291 0 << 13 | // hot pixel filter enable
2292 0 << 12 | // alpha plane enable
2293 0 << 11 | // vignette enable
2294 0 << 10 | // demosaic enable
2295 proc_ctx->current_output_type << 8 | // DI output frame
2296 1 << 7 | // 444->422 downsample method
2297 1 << 6 | // 422->420 downsample method
2298 proc_ctx->is_first_frame << 5 | // DN/DI first frame
2299 proc_ctx->is_di_enabled << 4 | // DI enable
2300 proc_ctx->is_dn_enabled << 3 | // DN enable
2301 proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
2302 0 << 1 | // ColorGamutCompressionEnable
2303 0 ) ; // ColorGamutExpansionEnable.
2306 proc_ctx->dndi_state_table.bo,
2307 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2309 OUT_VEB_BATCH(batch, 0);
2312 proc_ctx->iecp_state_table.bo,
2313 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2315 OUT_VEB_BATCH(batch, 0);
2318 proc_ctx->gamut_state_table.bo,
2319 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2321 OUT_VEB_BATCH(batch, 0);
2324 proc_ctx->vertex_state_table.bo,
2325 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2327 OUT_VEB_BATCH(batch, 0);
2329 OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/
2330 OUT_VEB_BATCH(batch, 0);
2332 OUT_VEB_BATCH(batch, 0);/*lace lut table state pointer*/
2333 OUT_VEB_BATCH(batch, 0);
2335 OUT_VEB_BATCH(batch, 0);/*gamma correction values address*/
2336 OUT_VEB_BATCH(batch, 0);
2338 ADVANCE_VEB_BATCH(batch);
2341 void skl_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
2343 struct intel_batchbuffer *batch = proc_ctx->batch;
2344 unsigned int u_offset_y = 0, v_offset_y = 0;
2345 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
2346 unsigned int surface_format = PLANAR_420_8;
2347 struct object_surface* obj_surf = NULL;
2348 unsigned int surface_pitch = 0;
2349 unsigned int half_pitch_chroma = 0;
2350 unsigned int derived_pitch;
2353 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
2355 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
2358 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
2359 obj_surf->fourcc == VA_FOURCC_YUY2 ||
2360 obj_surf->fourcc == VA_FOURCC_AYUV ||
2361 obj_surf->fourcc == VA_FOURCC_RGBA ||
2362 obj_surf->fourcc == VA_FOURCC_P010);
2364 if (obj_surf->fourcc == VA_FOURCC_NV12) {
2365 surface_format = PLANAR_420_8;
2366 surface_pitch = obj_surf->width;
2367 is_uv_interleaved = 1;
2368 half_pitch_chroma = 0;
2369 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
2370 surface_format = YCRCB_NORMAL;
2371 surface_pitch = obj_surf->width * 2;
2372 is_uv_interleaved = 0;
2373 half_pitch_chroma = 0;
2374 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
2375 surface_format = PACKED_444A_8;
2376 surface_pitch = obj_surf->width * 4;
2377 is_uv_interleaved = 0;
2378 half_pitch_chroma = 0;
2379 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
2380 surface_format = R8G8B8A8_UNORM_SRGB;
2381 surface_pitch = obj_surf->width * 4;
2382 is_uv_interleaved = 0;
2383 half_pitch_chroma = 0;
2384 } else if (obj_surf->fourcc == VA_FOURCC_P010) {
2385 surface_format = PLANAR_420_16;
2386 surface_pitch = obj_surf->width;
2387 is_uv_interleaved = 1;
2388 half_pitch_chroma = 0;
2391 derived_pitch = surface_pitch;
2393 u_offset_y = obj_surf->y_cb_offset;
2394 v_offset_y = obj_surf->y_cr_offset;
2396 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
2398 BEGIN_VEB_BATCH(batch, 9);
2399 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (9 - 2));
2400 OUT_VEB_BATCH(batch,
2401 0 << 1 | // reserved
2402 is_output); // surface indentification.
2404 OUT_VEB_BATCH(batch,
2405 (obj_surf->orig_height - 1) << 18 | // height . w3
2406 (obj_surf->orig_width - 1) << 4 | // width
2409 OUT_VEB_BATCH(batch,
2410 surface_format << 28 | // surface format, YCbCr420. w4
2411 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
2412 0 << 20 | // reserved
2413 (surface_pitch - 1) << 3 | // surface pitch, 64 align
2414 half_pitch_chroma << 2 | // half pitch for chrome
2415 !!tiling << 1 | // tiled surface, linear surface used
2416 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
2418 OUT_VEB_BATCH(batch,
2419 0 << 16 | // X offset for V(Cb)
2420 u_offset_y); // Y offset for V(Cb)
2422 OUT_VEB_BATCH(batch,
2423 0 << 16 | // X offset for V(Cr)
2424 v_offset_y ); // Y offset for V(Cr)
2426 OUT_VEB_BATCH(batch, 0);
2428 OUT_VEB_BATCH(batch, derived_pitch - 1);
2430 OUT_VEB_BATCH(batch, 0);
2432 ADVANCE_VEB_BATCH(batch);
2436 gen9_vebox_process_picture(VADriverContextP ctx,
2437 struct intel_vebox_context *proc_ctx)
2441 status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
2442 if (status != VA_STATUS_SUCCESS)
2445 status = gen75_vebox_init_filter_params(ctx, proc_ctx);
2446 if (status != VA_STATUS_SUCCESS)
2449 status = hsw_veb_pre_format_convert(ctx, proc_ctx);
2450 if (status != VA_STATUS_SUCCESS)
2453 status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
2454 if (status != VA_STATUS_SUCCESS)
2457 status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
2458 if (status != VA_STATUS_SUCCESS)
2461 if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
2462 vpp_sharpness_filtering(ctx, proc_ctx);
2463 } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
2464 assert(proc_ctx->is_second_field);
2465 /* directly copy the saved frame in the second call */
2467 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
2468 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
2469 skl_veb_state_table_setup(ctx, proc_ctx);
2470 skl_veb_state_command(ctx, proc_ctx);
2471 skl_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
2472 skl_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
2473 bdw_veb_dndi_iecp_command(ctx, proc_ctx);
2474 intel_batchbuffer_end_atomic(proc_ctx->batch);
2475 intel_batchbuffer_flush(proc_ctx->batch);
2478 status = hsw_veb_post_format_convert(ctx, proc_ctx);