2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Li Xiaowei <xiaowei.a.li@intel.com>
26 * Li Zhong <zhong.li@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "gen75_vpp_vebox.h"
40 #include "intel_media.h"
42 #include "i965_post_processing.h"
47 i965_MapBuffer(VADriverContextP ctx, VABufferID buf_id, void **);
50 i965_UnmapBuffer(VADriverContextP ctx, VABufferID buf_id);
53 i965_DeriveImage(VADriverContextP ctx, VABufferID surface, VAImage *out_image);
56 i965_DestroyImage(VADriverContextP ctx, VAImageID image);
59 vpp_surface_convert(VADriverContextP ctx, struct object_surface *src_obj_surf,
60 struct object_surface *dst_obj_surf)
62 VAStatus va_status = VA_STATUS_SUCCESS;
64 assert(src_obj_surf->orig_width == dst_obj_surf->orig_width);
65 assert(src_obj_surf->orig_height == dst_obj_surf->orig_height);
67 VARectangle src_rect, dst_rect;
68 src_rect.x = dst_rect.x = 0;
69 src_rect.y = dst_rect.y = 0;
70 src_rect.width = dst_rect.width = src_obj_surf->orig_width;
71 src_rect.height = dst_rect.height = src_obj_surf->orig_height;
73 struct i965_surface src_surface, dst_surface;
74 src_surface.base = (struct object_base *)src_obj_surf;
75 src_surface.type = I965_SURFACE_TYPE_SURFACE;
76 src_surface.flags = I965_SURFACE_FLAG_FRAME;
78 dst_surface.base = (struct object_base *)dst_obj_surf;
79 dst_surface.type = I965_SURFACE_TYPE_SURFACE;
80 dst_surface.flags = I965_SURFACE_FLAG_FRAME;
82 va_status = i965_image_processing(ctx,
91 vpp_surface_scaling(VADriverContextP ctx, struct object_surface *src_obj_surf,
92 struct object_surface *dst_obj_surf, uint32_t flags)
94 VAStatus va_status = VA_STATUS_SUCCESS;
96 assert(src_obj_surf->fourcc == VA_FOURCC_NV12);
97 assert(dst_obj_surf->fourcc == VA_FOURCC_NV12);
99 VARectangle src_rect, dst_rect;
102 src_rect.width = src_obj_surf->orig_width;
103 src_rect.height = src_obj_surf->orig_height;
107 dst_rect.width = dst_obj_surf->orig_width;
108 dst_rect.height = dst_obj_surf->orig_height;
110 va_status = i965_scaling_processing(ctx,
121 vpp_sharpness_filtering(VADriverContextP ctx,
122 struct intel_vebox_context *proc_ctx)
124 VAStatus va_status = VA_STATUS_SUCCESS;
126 if (proc_ctx->vpp_gpe_ctx == NULL) {
127 proc_ctx->vpp_gpe_ctx = vpp_gpe_context_init(ctx);
130 proc_ctx->vpp_gpe_ctx->pipeline_param = proc_ctx->pipeline_param;
131 proc_ctx->vpp_gpe_ctx->surface_pipeline_input_object = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
132 proc_ctx->vpp_gpe_ctx->surface_output_object = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
134 va_status = vpp_gpe_process_picture(ctx, proc_ctx->vpp_gpe_ctx);
139 void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
141 struct i965_driver_data *i965 = i965_driver_data(ctx);
142 unsigned int* p_table ;
143 unsigned int progressive_dn = 1;
144 unsigned int dndi_top_first = 0;
145 unsigned int is_mcdi_enabled = 0;
147 if (proc_ctx->is_di_enabled) {
148 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
153 /* If we are in "First Frame" mode, i.e. past frames are not
154 available for motion measure, then don't use the TFF flag */
155 dndi_top_first = !(deint_params->flags & (proc_ctx->is_first_frame ?
156 VA_DEINTERLACING_BOTTOM_FIELD :
157 VA_DEINTERLACING_BOTTOM_FIELD_FIRST));
160 (deint_params->algorithm == VAProcDeinterlacingMotionCompensated);
164 VAProcFilterParameterBufferDeinterlacing *di_param =
165 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
167 VAProcFilterParameterBuffer * dn_param =
168 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
170 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
172 if (IS_HASWELL(i965->intel.device_info))
173 *p_table ++ = 0; // reserved . w0
175 *p_table ++ = (140 << 24 | // denoise STAD threshold . w1
176 192 << 16 | // dnmh_history_max
177 0 << 12 | // reserved
178 7 << 8 | // dnmh_delta[3:0]
179 38); // denoise ASD threshold
181 *p_table ++ = (0 << 30 | // reserved . w2
182 0 << 24 | // temporal diff th
183 0 << 22 | // reserved.
184 0 << 16 | // low temporal diff th
186 1 << 8 | // denoise moving pixel th
187 38); // denoise th for sum of complexity measure
189 *p_table ++ = (0 << 30 | // reserved . w3
190 12 << 24 | // good neighbor th[5:0]
191 9 << 20 | // CAT slope minus 1
192 5 << 16 | // SAD Tight in
193 0 << 14 | // smooth mv th
194 0 << 12 | // reserved
195 1 << 8 | // bne_edge_th[3:0]
196 20); // block noise estimate noise th
198 *p_table ++ = (0 << 31 | // STMM blending constant select. w4
199 64 << 24 | // STMM trc1
200 125 << 16 | // STMM trc2
201 0 << 14 | // reserved
202 30 << 8 | // VECM_mul
203 150); // maximum STMM
205 *p_table ++ = (118 << 24 | // minumum STMM . W5
206 0 << 22 | // STMM shift down
207 1 << 20 | // STMM shift up
208 5 << 16 | // STMM output shift
209 100 << 8 | // SDI threshold
212 *p_table ++ = (50 << 24 | // SDI fallback mode 1 T1 constant . W6
213 100 << 16 | // SDI fallback mode 1 T2 constant
214 37 << 8 | // SDI fallback mode 2 constant(angle2x1)
215 175); // FMD temporal difference threshold
217 *p_table ++ = (16 << 24 | // FMD #1 vertical difference th . w7
218 100 << 16 | // FMD #2 vertical difference th
220 2 << 8 | // FMD tear threshold
221 is_mcdi_enabled << 7 | // MCDI Enable, use motion compensated deinterlace algorithm
222 progressive_dn << 6 | // progressive DN
224 dndi_top_first << 3 | // DN/DI Top First
227 *p_table ++ = (0 << 29 | // reserved . W8
228 32 << 23 | // dnmh_history_init[5:0]
229 10 << 19 | // neighborPixel th
230 0 << 18 | // reserved
231 0 << 16 | // FMD for 2nd field of previous frame
232 25 << 10 | // MC pixel consistency th
233 0 << 8 | // FMD for 1st field for current frame
237 *p_table ++ = (0 << 24 | // reserved
238 140 << 16 | // chr_dnmh_stad_th
239 0 << 13 | // reserved
240 1 << 12 | // chrome denoise enable
241 13 << 6 | // chr temp diff th
242 7); // chr temp diff low
244 if (IS_GEN8(i965->intel.device_info) ||
245 IS_GEN9(i965->intel.device_info) ||
246 IS_GEN10(i965->intel.device_info))
247 *p_table ++ = 0; // parameters for hot pixel,
250 //Set default values for STDE
251 void set_std_table_default(struct intel_vebox_context *proc_ctx, unsigned int *p_table)
255 *p_table ++ = (0 << 31 | // Reserved
256 0x3F8 << 21 | // SATB1 (10 bits, default 8, optimized value -8)
259 0x7A); // SATP1 (7 bits, default 6, optimized value -6)
262 *p_table ++ = (0 << 31 | // Reserved
268 *p_table ++ = (0 << 22 | // Reserved
273 *p_table ++ = (14 << 25 | // HUEP3
275 0x7A << 11 | // HUEP1 (7 bits, default value -6 = 7Ah)
279 *p_table ++ = (0 << 30 | // Reserved
282 0x3F8); // HUEB1 (10 bits, default value 8, optimized value -8)
285 *p_table ++ = (0 << 22 | // Reserved
290 *p_table ++ = (0 << 22 | // Reserved
295 *p_table ++ = (0 << 31 | // Reserved
296 0 << 21 | // SATB1_DARK
297 31 << 14 | // SATP3_DARK
298 31 << 7 | // SATP2_DARK
299 0x7B); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value -5)
302 *p_table ++ = (0 << 31 | // Reserved
303 305 << 20 | // SATS0_DARK
304 124 << 10 | // SATB3_DARK
308 *p_table ++ = (0 << 22 | // Reserved
309 256 << 11 | // SATS2_DARK
313 *p_table ++ = (14 << 25 | // HUEP3_DARK
314 14 << 18 | // HUEP2_DARK
315 14 << 11 | // HUEP1_DARK
319 *p_table ++ = (0 << 30 | // Reserved
320 56 << 20 | // HUEB3_DARK
321 56 << 10 | // HUEB2_DARK
325 *p_table ++ = (0 << 22 | // Reserved
326 256 << 11 | // HUES1_DARK
330 *p_table ++ = (0 << 22 | // Reserved
331 256 << 11 | // HUES3_DARK
335 //Set values for STDE factor 3
336 void set_std_table_3(struct intel_vebox_context *proc_ctx, unsigned int *p_table)
340 *p_table ++ = (0 << 31 | // Reserved
341 1016 << 21 | // SATB1 (10 bits, default 8, optimized value 1016)
344 122); // SATP1 (7 bits, default 6, optimized value 122)
347 *p_table ++ = (0 << 31 | // Reserved
353 *p_table ++ = (0 << 22 | // Reserved
358 *p_table ++ = (14 << 25 | // HUEP3
360 122 << 11 | // HUEP1 (7 bits, default value -6 = 7Ah, optimized 122)
364 *p_table ++ = (0 << 30 | // Reserved
365 56 << 20 | // HUEB3 (default 256, optimized 56)
367 1016); // HUEB1 (10 bits, default value 8, optimized value 1016)
370 *p_table ++ = (0 << 22 | // Reserved
375 *p_table ++ = (0 << 22 | // Reserved
380 *p_table ++ = (0 << 31 | // Reserved
381 0 << 21 | // SATB1_DARK
382 31 << 14 | // SATP3_DARK
383 31 << 7 | // SATP2_DARK
384 123); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value 123)
387 *p_table ++ = (0 << 31 | // Reserved
388 305 << 20 | // SATS0_DARK
389 124 << 10 | // SATB3_DARK
393 *p_table ++ = (0 << 22 | // Reserved
394 256 << 11 | // SATS2_DARK
398 *p_table ++ = (14 << 25 | // HUEP3_DARK
399 14 << 18 | // HUEP2_DARK
400 14 << 11 | // HUEP1_DARK
404 *p_table ++ = (0 << 30 | // Reserved
405 56 << 20 | // HUEB3_DARK
406 56 << 10 | // HUEB2_DARK
410 *p_table ++ = (0 << 22 | // Reserved
411 256 << 11 | // HUES1_DARK
415 *p_table ++ = (0 << 22 | // Reserved
416 256 << 11 | // HUES3_DARK
420 //Set values for STDE factor 6
421 void set_std_table_6(struct intel_vebox_context *proc_ctx, unsigned int *p_table)
425 *p_table ++ = (0 << 31 | // Reserved
426 0 << 21 | // SATB1 (10 bits, default 8, optimized value 0)
428 31 << 7 | // SATP2 (default 6, optimized 31)
429 114); // SATP1 (7 bits, default 6, optimized value 114)
432 *p_table ++ = (0 << 31 | // Reserved
433 467 << 20 | // SATS0 (default 297, optimized 467)
438 *p_table ++ = (0 << 22 | // Reserved
439 256 << 11 | // SATS2 (default 297, optimized 256)
443 *p_table ++ = (14 << 25 | // HUEP3
445 14 << 11 | // HUEP1 (7 bits, default value -6 = 7Ah, optimized value 14)
449 *p_table ++ = (0 << 30 | // Reserved
452 56); // HUEB1 (10 bits, default value 8, optimized value 56)
455 *p_table ++ = (0 << 22 | // Reserved
460 *p_table ++ = (0 << 22 | // Reserved
465 *p_table ++ = (0 << 31 | // Reserved
466 0 << 21 | // SATB1_DARK
467 31 << 14 | // SATP3_DARK
468 31 << 7 | // SATP2_DARK
469 123); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value 123)
472 *p_table ++ = (0 << 31 | // Reserved
473 305 << 20 | // SATS0_DARK
474 124 << 10 | // SATB3_DARK
478 *p_table ++ = (0 << 22 | // Reserved
479 256 << 11 | // SATS2_DARK
483 *p_table ++ = (14 << 25 | // HUEP3_DARK
484 14 << 18 | // HUEP2_DARK
485 14 << 11 | // HUEP1_DARK
489 *p_table ++ = (0 << 30 | // Reserved
490 56 << 20 | // HUEB3_DARK
491 56 << 10 | // HUEB2_DARK
495 *p_table ++ = (0 << 22 | // Reserved
496 256 << 11 | // HUES1_DARK
500 *p_table ++ = (0 << 22 | // Reserved
501 256 << 11 | // HUES3_DARK
505 //Set values for STDE factor 9
506 void set_std_table_9(struct intel_vebox_context *proc_ctx, unsigned int *p_table)
510 *p_table ++ = (0 << 31 | // Reserved
511 0 << 21 | // SATB1 (10 bits, default 8, optimized value 0)
513 31 << 7 | // SATP2 (default 6, optimized 31)
514 108); // SATP1 (7 bits, default 6, optimized value 108)
517 *p_table ++ = (0 << 31 | // Reserved
518 721 << 20 | // SATS0 (default 297, optimized 721)
523 *p_table ++ = (0 << 22 | // Reserved
524 256 << 11 | // SATS2 (default 297, optimized 256)
525 156); // SATS1 (default 176, optimized 156)
528 *p_table ++ = (14 << 25 | // HUEP3
530 14 << 11 | // HUEP1 (7 bits, default value -6 = 7Ah, optimized value 14)
534 *p_table ++ = (0 << 30 | // Reserved
537 56); // HUEB1 (10 bits, default value 8, optimized value 56)
540 *p_table ++ = (0 << 22 | // Reserved
545 *p_table ++ = (0 << 22 | // Reserved
550 *p_table ++ = (0 << 31 | // Reserved
551 0 << 21 | // SATB1_DARK
552 31 << 14 | // SATP3_DARK
553 31 << 7 | // SATP2_DARK
554 123); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value 123)
557 *p_table ++ = (0 << 31 | // Reserved
558 305 << 20 | // SATS0_DARK
559 124 << 10 | // SATB3_DARK
563 *p_table ++ = (0 << 22 | // Reserved
564 256 << 11 | // SATS2_DARK
568 *p_table ++ = (14 << 25 | // HUEP3_DARK
569 14 << 18 | // HUEP2_DARK
570 14 << 11 | // HUEP1_DARK
574 *p_table ++ = (0 << 30 | // Reserved
575 56 << 20 | // HUEB3_DARK
576 56 << 10 | // HUEB2_DARK
580 *p_table ++ = (0 << 22 | // Reserved
581 256 << 11 | // HUES1_DARK
585 *p_table ++ = (0 << 22 | // Reserved
586 256 << 11 | // HUES3_DARK
591 void hsw_veb_iecp_std_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
593 unsigned int *p_table = (unsigned int *)proc_ctx->iecp_state_table.ptr;
595 if (!(proc_ctx->filters_mask & VPP_IECP_STD_STE)) {
596 memset(p_table, 0, 29 * 4);
598 int stde_factor = 0; //default value
599 VAProcFilterParameterBuffer * std_param = (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_std;
600 stde_factor = std_param->value;
603 *p_table ++ = (154 << 24 | // V_Mid
605 14 << 10 | // Hue_Max
608 0 << 2 | // Output Control is set to output the 1=STD score /0=Output Pixels
609 1 << 1 | // Set STE Enable
610 1); // Set STD Enable
613 *p_table ++ = (0 << 31 | // Reserved
614 4 << 28 | // Diamond Margin
615 0 << 21 | // Diamond_du
616 3 << 18 | // HS_Margin
617 79 << 10 | // Cos(alpha)
622 *p_table ++ = (0 << 21 | // Reserved
623 100 << 13 | // Diamond_alpha
624 35 << 7 | // Diamond_Th
628 *p_table ++ = (254 << 24 | // Y_point_3
629 47 << 16 | // Y_point_2
630 46 << 8 | // Y_point_1
631 1 << 7 | // VY_STD_Enable
635 *p_table ++ = (0 << 18 | // Reserved
636 31 << 13 | // Y_slope_2
637 31 << 8 | // Y_slope_1
641 *p_table ++ = (400 << 16 | // INV_Skin_types_margin = 20* Skin_Type_margin => 20*20
642 3300); // INV_Margin_VYL => 1/Margin_VYL
645 *p_table ++ = (216 << 24 | // P1L
647 1600); // INV_Margin_VYU
650 *p_table ++ = (130 << 24 | // B1L
656 *p_table ++ = (0 << 27 | // Reserved
657 0x7FB << 16 | // S0L (11 bits, Default value: -5 = FBh, pad it with 1s to make it 11bits)
662 *p_table ++ = (0 << 22 | // Reserved
667 *p_table ++ = (0 << 27 | // Reserved
673 *p_table ++ = (163 << 24 | // B1U
679 *p_table ++ = (0 << 27 | // Reserved
685 *p_table ++ = (0 << 22 | // Reserved
686 0x74D << 11 | // S2U (11 bits, Default value -179 = F4Dh)
690 *p_table ++ = (0 << 28 | // Reserved
691 20 << 20 | // Skin_types_margin
692 120 << 12 | // Skin_types_thresh
693 1 << 11 | // Skin_Types_Enable
696 //Set DWord 15 through DWord 28 in their respective methods.
697 switch (stde_factor) {
699 set_std_table_3(proc_ctx, p_table);
703 set_std_table_6(proc_ctx, p_table);
707 set_std_table_9(proc_ctx, p_table);
711 set_std_table_default(proc_ctx, p_table);
717 void hsw_veb_iecp_ace_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
719 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 116);
721 if (!(proc_ctx->filters_mask & VPP_IECP_ACE)) {
722 memset(p_table, 0, 13 * 4);
724 *p_table ++ = 0x00000068;
725 *p_table ++ = 0x4c382410;
726 *p_table ++ = 0x9c887460;
727 *p_table ++ = 0xebd8c4b0;
728 *p_table ++ = 0x604c3824;
730 *p_table ++ = 0xb09c8874;
731 *p_table ++ = 0x0000d8c4;
732 *p_table ++ = 0x00000000;
733 *p_table ++ = 0x00000000;
734 *p_table ++ = 0x00000000;
736 *p_table ++ = 0x00000000;
737 *p_table ++ = 0x00000000;
738 *p_table ++ = 0x00000000;
742 void hsw_veb_iecp_tcc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
744 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 168);
745 // VAProcFilterParameterBuffer * tcc_param =
746 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
748 if (!(proc_ctx->filters_mask & VPP_IECP_TCC)) {
749 memset(p_table, 0, 11 * 4);
751 *p_table ++ = 0x00000000;
752 *p_table ++ = 0x00000000;
753 *p_table ++ = 0x1e34cc91;
754 *p_table ++ = 0x3e3cce91;
755 *p_table ++ = 0x02e80195;
757 *p_table ++ = 0x0197046b;
758 *p_table ++ = 0x01790174;
759 *p_table ++ = 0x00000000;
760 *p_table ++ = 0x00000000;
761 *p_table ++ = 0x03030000;
763 *p_table ++ = 0x009201c0;
767 void hsw_veb_iecp_pro_amp_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
769 unsigned int contrast = 0x80; //default
770 int brightness = 0x00; //default
771 int cos_c_s = 256 ; //default
772 int sin_c_s = 0; //default
773 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 212);
775 if (!(proc_ctx->filters_mask & VPP_IECP_PRO_AMP)) {
776 memset(p_table, 0, 2 * 4);
778 float src_saturation = 1.0;
780 float src_contrast = 1.0;
781 float src_brightness = 0.0;
782 float tmp_value = 0.0;
785 VAProcFilterParameterBufferColorBalance * amp_params =
786 (VAProcFilterParameterBufferColorBalance *) proc_ctx->filter_iecp_amp;
788 for (i = 0; i < proc_ctx->filter_iecp_amp_num_elements; i++) {
789 VAProcColorBalanceType attrib = amp_params[i].attrib;
791 if (attrib == VAProcColorBalanceHue) {
792 src_hue = amp_params[i].value; //(-180.0, 180.0)
793 } else if (attrib == VAProcColorBalanceSaturation) {
794 src_saturation = amp_params[i].value; //(0.0, 10.0)
795 } else if (attrib == VAProcColorBalanceBrightness) {
796 src_brightness = amp_params[i].value; // (-100.0, 100.0)
797 brightness = intel_format_convert(src_brightness, 7, 4, 1);
798 } else if (attrib == VAProcColorBalanceContrast) {
799 src_contrast = amp_params[i].value; // (0.0, 10.0)
800 contrast = intel_format_convert(src_contrast, 4, 7, 0);
804 tmp_value = cos(src_hue / 180 * PI) * src_contrast * src_saturation;
805 cos_c_s = intel_format_convert(tmp_value, 7, 8, 1);
807 tmp_value = sin(src_hue / 180 * PI) * src_contrast * src_saturation;
808 sin_c_s = intel_format_convert(tmp_value, 7, 8, 1);
810 *p_table ++ = (0 << 28 | //reserved
811 contrast << 17 | //contrast value (U4.7 format)
813 brightness << 1 | // S7.4 format
816 *p_table ++ = (cos_c_s << 16 | // cos(h) * contrast * saturation
817 sin_c_s); // sin(h) * contrast * saturation
823 void hsw_veb_iecp_csc_transform_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
825 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
826 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
827 float v_coef[3] = {0.0, 0.0, 0.0};
828 float u_coef[3] = {0.0, 0.0, 0.0};
829 int is_transform_enabled = 0;
831 if (!(proc_ctx->filters_mask & VPP_IECP_CSC_TRANSFORM)) {
832 memset(p_table, 0, 8 * 4);
836 if (proc_ctx->fourcc_input == VA_FOURCC_RGBA &&
837 (proc_ctx->fourcc_output == VA_FOURCC_NV12 ||
838 proc_ctx->fourcc_output == VA_FOURCC_YV12 ||
839 proc_ctx->fourcc_output == VA_FOURCC_YVY2 ||
840 proc_ctx->fourcc_output == VA_FOURCC_AYUV)) {
842 tran_coef[0] = 0.257;
843 tran_coef[1] = 0.504;
844 tran_coef[2] = 0.098;
845 tran_coef[3] = -0.148;
846 tran_coef[4] = -0.291;
847 tran_coef[5] = 0.439;
848 tran_coef[6] = 0.439;
849 tran_coef[7] = -0.368;
850 tran_coef[8] = -0.071;
856 is_transform_enabled = 1;
857 } else if ((proc_ctx->fourcc_input == VA_FOURCC_NV12 ||
858 proc_ctx->fourcc_input == VA_FOURCC_YV12 ||
859 proc_ctx->fourcc_input == VA_FOURCC_YUY2 ||
860 proc_ctx->fourcc_input == VA_FOURCC_AYUV) &&
861 proc_ctx->fourcc_output == VA_FOURCC_RGBA) {
862 tran_coef[0] = 1.164;
863 tran_coef[1] = 0.000;
864 tran_coef[2] = 1.569;
865 tran_coef[3] = 1.164;
866 tran_coef[4] = -0.813;
867 tran_coef[5] = -0.392;
868 tran_coef[6] = 1.164;
869 tran_coef[7] = 2.017;
870 tran_coef[8] = 0.000;
873 v_coef[1] = -128 * 4;
874 v_coef[2] = -128 * 4;
876 is_transform_enabled = 1;
877 } else if (proc_ctx->fourcc_input != proc_ctx->fourcc_output) {
878 //enable when input and output format are different.
879 is_transform_enabled = 1;
882 if (is_transform_enabled == 0) {
883 memset(p_table, 0, 8 * 4);
885 *p_table ++ = (0 << 29 | //reserved
886 intel_format_convert(tran_coef[1], 2, 10, 1) << 16 | //c1, s2.10 format
887 intel_format_convert(tran_coef[0], 2, 10, 1) << 3 | //c0, s2.10 format
889 0 << 1 | // yuv_channel swap
890 is_transform_enabled);
892 *p_table ++ = (0 << 26 | //reserved
893 intel_format_convert(tran_coef[3], 2, 10, 1) << 13 |
894 intel_format_convert(tran_coef[2], 2, 10, 1));
896 *p_table ++ = (0 << 26 | //reserved
897 intel_format_convert(tran_coef[5], 2, 10, 1) << 13 |
898 intel_format_convert(tran_coef[4], 2, 10, 1));
900 *p_table ++ = (0 << 26 | //reserved
901 intel_format_convert(tran_coef[7], 2, 10, 1) << 13 |
902 intel_format_convert(tran_coef[6], 2, 10, 1));
904 *p_table ++ = (0 << 13 | //reserved
905 intel_format_convert(tran_coef[8], 2, 10, 1));
907 *p_table ++ = (0 << 22 | //reserved
908 intel_format_convert(u_coef[0], 10, 0, 1) << 11 |
909 intel_format_convert(v_coef[0], 10, 0, 1));
911 *p_table ++ = (0 << 22 | //reserved
912 intel_format_convert(u_coef[1], 10, 0, 1) << 11 |
913 intel_format_convert(v_coef[1], 10, 0, 1));
915 *p_table ++ = (0 << 22 | //reserved
916 intel_format_convert(u_coef[2], 10, 0, 1) << 11 |
917 intel_format_convert(v_coef[2], 10, 0, 1));
921 void hsw_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
923 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 252);
924 // VAProcFilterParameterBuffer * tcc_param =
925 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
927 if (!(proc_ctx->filters_mask & VPP_IECP_AOI)) {
928 memset(p_table, 0, 3 * 4);
930 *p_table ++ = 0x00000000;
931 *p_table ++ = 0x00030000;
932 *p_table ++ = 0x00030000;
936 void hsw_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
938 if (proc_ctx->filters_mask & VPP_DNDI_MASK) {
939 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
940 dri_bo_map(dndi_bo, 1);
941 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
943 hsw_veb_dndi_table(ctx, proc_ctx);
945 dri_bo_unmap(dndi_bo);
948 if (proc_ctx->filters_mask & VPP_IECP_MASK) {
949 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
950 dri_bo_map(iecp_bo, 1);
951 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
952 memset(proc_ctx->iecp_state_table.ptr, 0, 97 * 4);
954 hsw_veb_iecp_std_table(ctx, proc_ctx);
955 hsw_veb_iecp_ace_table(ctx, proc_ctx);
956 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
957 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
958 hsw_veb_iecp_csc_transform_table(ctx, proc_ctx);
959 hsw_veb_iecp_aoi_table(ctx, proc_ctx);
961 dri_bo_unmap(iecp_bo);
965 void hsw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
967 struct intel_batchbuffer *batch = proc_ctx->batch;
969 BEGIN_VEB_BATCH(batch, 6);
970 OUT_VEB_BATCH(batch, VEB_STATE | (6 - 2));
972 0 << 26 | // state surface control bits
973 0 << 11 | // reserved.
974 0 << 10 | // pipe sync disable
975 proc_ctx->current_output_type << 8 | // DI output frame
976 1 << 7 | // 444->422 downsample method
977 1 << 6 | // 422->420 downsample method
978 proc_ctx->is_first_frame << 5 | // DN/DI first frame
979 proc_ctx->is_di_enabled << 4 | // DI enable
980 proc_ctx->is_dn_enabled << 3 | // DN enable
981 proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
982 0 << 1 | // ColorGamutCompressionEnable
983 0) ; // ColorGamutExpansionEnable.
986 proc_ctx->dndi_state_table.bo,
987 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
990 proc_ctx->iecp_state_table.bo,
991 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
994 proc_ctx->gamut_state_table.bo,
995 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
998 proc_ctx->vertex_state_table.bo,
999 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1001 ADVANCE_VEB_BATCH(batch);
1004 void hsw_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
1006 struct intel_batchbuffer *batch = proc_ctx->batch;
1007 unsigned int u_offset_y = 0, v_offset_y = 0;
1008 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
1009 unsigned int surface_format = PLANAR_420_8;
1010 struct object_surface* obj_surf = NULL;
1011 unsigned int surface_pitch = 0;
1012 unsigned int half_pitch_chroma = 0;
1015 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
1017 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
1020 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
1021 obj_surf->fourcc == VA_FOURCC_YUY2 ||
1022 obj_surf->fourcc == VA_FOURCC_AYUV ||
1023 obj_surf->fourcc == VA_FOURCC_RGBA);
1025 if (obj_surf->fourcc == VA_FOURCC_NV12) {
1026 surface_format = PLANAR_420_8;
1027 surface_pitch = obj_surf->width;
1028 is_uv_interleaved = 1;
1029 half_pitch_chroma = 0;
1030 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
1031 surface_format = YCRCB_NORMAL;
1032 surface_pitch = obj_surf->width * 2;
1033 is_uv_interleaved = 0;
1034 half_pitch_chroma = 0;
1035 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
1036 surface_format = PACKED_444A_8;
1037 surface_pitch = obj_surf->width * 4;
1038 is_uv_interleaved = 0;
1039 half_pitch_chroma = 0;
1040 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
1041 surface_format = R8G8B8A8_UNORM_SRGB;
1042 surface_pitch = obj_surf->width * 4;
1043 is_uv_interleaved = 0;
1044 half_pitch_chroma = 0;
1047 u_offset_y = obj_surf->y_cb_offset;
1048 v_offset_y = obj_surf->y_cr_offset;
1050 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
1052 BEGIN_VEB_BATCH(batch, 6);
1053 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (6 - 2));
1054 OUT_VEB_BATCH(batch,
1055 0 << 1 | // reserved
1056 is_output); // surface indentification.
1058 OUT_VEB_BATCH(batch,
1059 (obj_surf->orig_height - 1) << 18 | // height . w3
1060 (obj_surf->orig_width - 1) << 4 | // width
1063 OUT_VEB_BATCH(batch,
1064 surface_format << 28 | // surface format, YCbCr420. w4
1065 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
1066 0 << 20 | // reserved
1067 (surface_pitch - 1) << 3 | // surface pitch, 64 align
1068 half_pitch_chroma << 2 | // half pitch for chrome
1069 !!tiling << 1 | // tiled surface, linear surface used
1070 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
1072 OUT_VEB_BATCH(batch,
1073 0 << 29 | // reserved . w5
1074 0 << 16 | // X offset for V(Cb)
1075 0 << 15 | // reserved
1076 u_offset_y); // Y offset for V(Cb)
1078 OUT_VEB_BATCH(batch,
1079 0 << 29 | // reserved . w6
1080 0 << 16 | // X offset for V(Cr)
1081 0 << 15 | // reserved
1082 v_offset_y); // Y offset for V(Cr)
1084 ADVANCE_VEB_BATCH(batch);
1087 void hsw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1089 struct intel_batchbuffer *batch = proc_ctx->batch;
1090 unsigned char frame_ctrl_bits = 0;
1091 struct object_surface *obj_surface = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
1092 unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
1094 assert(obj_surface);
1095 if (width64 > obj_surface->orig_width)
1096 width64 = obj_surface->orig_width;
1098 /* s1:update the previous and current input */
1099 /* tempFrame = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
1100 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_IN_CURRENT]; ;
1101 proc_ctx->frame_store[FRAME_IN_CURRENT] = tempFrame;
1103 if(proc_ctx->surface_input_vebox != -1){
1104 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
1105 proc_ctx->surface_input_vebox);
1107 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
1108 proc_ctx->surface_input);
1111 /*s2: update the STMM input and output */
1112 /* tempFrame = proc_ctx->frame_store[FRAME_IN_STMM];
1113 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM]; ;
1114 proc_ctx->frame_store[FRAME_OUT_STMM] = tempFrame;
1116 /*s3:set reloc buffer address */
1117 BEGIN_VEB_BATCH(batch, 10);
1118 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (10 - 2));
1119 OUT_VEB_BATCH(batch, (width64 - 1));
1121 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
1122 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
1124 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
1125 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
1127 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
1128 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
1130 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
1131 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1133 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
1134 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1136 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
1137 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1139 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
1140 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1142 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
1143 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
1145 ADVANCE_VEB_BATCH(batch);
1149 frame_store_reset(VEBFrameStore *fs)
1151 fs->obj_surface = NULL;
1152 fs->surface_id = VA_INVALID_ID;
1153 fs->is_internal_surface = 0;
1154 fs->is_scratch_surface = 0;
1158 frame_store_clear(VEBFrameStore *fs, VADriverContextP ctx)
1160 if (fs->obj_surface && fs->is_scratch_surface) {
1161 VASurfaceID surface_id = fs->obj_surface->base.id;
1162 i965_DestroySurfaces(ctx, &surface_id, 1);
1164 frame_store_reset(fs);
1168 gen75_vebox_ensure_surfaces_storage(VADriverContextP ctx,
1169 struct intel_vebox_context *proc_ctx)
1171 struct i965_driver_data * const i965 = i965_driver_data(ctx);
1172 struct object_surface *input_obj_surface, *output_obj_surface;
1173 unsigned int input_fourcc, output_fourcc;
1174 unsigned int input_sampling, output_sampling;
1175 unsigned int input_tiling, output_tiling;
1176 unsigned int i, swizzle;
1180 /* Determine input surface info. Use native VEBOX format whenever
1181 possible. i.e. when the input surface format is not supported
1182 by the VEBOX engine, then allocate a temporary surface (live
1183 during the whole VPP pipeline lifetime)
1185 XXX: derive an actual surface format compatible with the input
1186 surface chroma format */
1187 input_obj_surface = proc_ctx->surface_input_vebox_object ?
1188 proc_ctx->surface_input_vebox_object : proc_ctx->surface_input_object;
1189 if (input_obj_surface->bo) {
1190 input_fourcc = input_obj_surface->fourcc;
1191 input_sampling = input_obj_surface->subsampling;
1192 dri_bo_get_tiling(input_obj_surface->bo, &input_tiling, &swizzle);
1193 input_tiling = !!input_tiling;
1195 input_fourcc = VA_FOURCC_NV12;
1196 input_sampling = SUBSAMPLE_YUV420;
1198 status = i965_check_alloc_surface_bo(ctx, input_obj_surface,
1199 input_tiling, input_fourcc, input_sampling);
1200 if (status != VA_STATUS_SUCCESS)
1204 /* Determine output surface info.
1206 XXX: derive an actual surface format compatible with the input
1207 surface chroma format */
1208 output_obj_surface = proc_ctx->surface_output_vebox_object ?
1209 proc_ctx->surface_output_vebox_object : proc_ctx->surface_output_object;
1210 if (output_obj_surface->bo) {
1211 output_fourcc = output_obj_surface->fourcc;
1212 output_sampling = output_obj_surface->subsampling;
1213 dri_bo_get_tiling(output_obj_surface->bo, &output_tiling, &swizzle);
1214 output_tiling = !!output_tiling;
1216 output_fourcc = VA_FOURCC_NV12;
1217 output_sampling = SUBSAMPLE_YUV420;
1219 status = i965_check_alloc_surface_bo(ctx, output_obj_surface,
1220 output_tiling, output_fourcc, output_sampling);
1221 if (status != VA_STATUS_SUCCESS)
1225 /* Update VEBOX pipeline formats */
1226 proc_ctx->fourcc_input = input_fourcc;
1227 proc_ctx->fourcc_output = output_fourcc;
1228 if (input_fourcc != output_fourcc) {
1229 proc_ctx->filters_mask |= VPP_IECP_CSC;
1231 if (input_fourcc == VA_FOURCC_RGBA &&
1232 (output_fourcc == VA_FOURCC_NV12 ||
1233 output_fourcc == VA_FOURCC_P010)) {
1234 proc_ctx->filters_mask |= VPP_IECP_CSC_TRANSFORM;
1235 } else if (output_fourcc == VA_FOURCC_RGBA &&
1236 (input_fourcc == VA_FOURCC_NV12 ||
1237 input_fourcc == VA_FOURCC_P010)) {
1238 proc_ctx->filters_mask |= VPP_IECP_CSC_TRANSFORM;
1242 proc_ctx->is_iecp_enabled = (proc_ctx->filters_mask & VPP_IECP_MASK) != 0;
1244 /* Create pipeline surfaces */
1245 for (i = 0; i < ARRAY_ELEMS(proc_ctx->frame_store); i ++) {
1246 struct object_surface *obj_surface;
1247 VASurfaceID new_surface;
1249 if (proc_ctx->frame_store[i].obj_surface)
1250 continue; // user allocated surface, not VEBOX internal
1252 status = i965_CreateSurfaces(ctx, proc_ctx->width_input,
1253 proc_ctx->height_input, VA_RT_FORMAT_YUV420, 1, &new_surface);
1254 if (status != VA_STATUS_SUCCESS)
1257 obj_surface = SURFACE(new_surface);
1258 assert(obj_surface != NULL);
1260 if (i <= FRAME_IN_PREVIOUS || i == FRAME_OUT_CURRENT_DN) {
1261 status = i965_check_alloc_surface_bo(ctx, obj_surface,
1262 input_tiling, input_fourcc, input_sampling);
1263 } else if (i == FRAME_IN_STMM || i == FRAME_OUT_STMM) {
1264 status = i965_check_alloc_surface_bo(ctx, obj_surface,
1265 1, input_fourcc, input_sampling);
1266 } else if (i >= FRAME_OUT_CURRENT) {
1267 status = i965_check_alloc_surface_bo(ctx, obj_surface,
1268 output_tiling, output_fourcc, output_sampling);
1270 if (status != VA_STATUS_SUCCESS)
1273 proc_ctx->frame_store[i].obj_surface = obj_surface;
1274 proc_ctx->frame_store[i].is_internal_surface = 1;
1275 proc_ctx->frame_store[i].is_scratch_surface = 1;
1278 /* Allocate DNDI state table */
1279 drm_intel_bo_unreference(proc_ctx->dndi_state_table.bo);
1280 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: dndi state Buffer",
1282 proc_ctx->dndi_state_table.bo = bo;
1284 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1286 /* Allocate IECP state table */
1287 drm_intel_bo_unreference(proc_ctx->iecp_state_table.bo);
1288 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: iecp state Buffer",
1290 proc_ctx->iecp_state_table.bo = bo;
1292 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1294 /* Allocate Gamut state table */
1295 drm_intel_bo_unreference(proc_ctx->gamut_state_table.bo);
1296 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: gamut state Buffer",
1298 proc_ctx->gamut_state_table.bo = bo;
1300 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1302 /* Allocate vertex state table */
1303 drm_intel_bo_unreference(proc_ctx->vertex_state_table.bo);
1304 bo = drm_intel_bo_alloc(i965->intel.bufmgr, "vebox: vertex state Buffer",
1306 proc_ctx->vertex_state_table.bo = bo;
1308 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1310 return VA_STATUS_SUCCESS;
1314 gen75_vebox_ensure_surfaces(VADriverContextP ctx,
1315 struct intel_vebox_context *proc_ctx)
1317 struct i965_driver_data * const i965 = i965_driver_data(ctx);
1318 struct object_surface *obj_surface;
1319 VEBFrameStore *ifs, *ofs;
1320 bool is_new_frame = 0;
1323 /* Update the previous input surface */
1324 obj_surface = proc_ctx->surface_input_object;
1326 is_new_frame = proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id !=
1327 obj_surface->base.id;
1329 ifs = &proc_ctx->frame_store[FRAME_IN_PREVIOUS];
1330 ofs = &proc_ctx->frame_store[proc_ctx->is_dn_enabled ?
1331 FRAME_OUT_CURRENT_DN : FRAME_IN_CURRENT];
1333 const VAProcPipelineParameterBuffer * const pipe =
1334 proc_ctx->pipeline_param;
1336 if (pipe->num_forward_references < 1)
1338 if (pipe->forward_references[0] == VA_INVALID_ID)
1341 obj_surface = SURFACE(pipe->forward_references[0]);
1342 if (!obj_surface || obj_surface->base.id == ifs->surface_id)
1345 frame_store_clear(ifs, ctx);
1346 if (obj_surface->base.id == ofs->surface_id) {
1348 frame_store_reset(ofs);
1350 ifs->obj_surface = obj_surface;
1351 ifs->surface_id = obj_surface->base.id;
1352 ifs->is_internal_surface = 0;
1353 ifs->is_scratch_surface = 0;
1358 /* Update the input surface */
1359 obj_surface = proc_ctx->surface_input_vebox_object ?
1360 proc_ctx->surface_input_vebox_object : proc_ctx->surface_input_object;
1362 ifs = &proc_ctx->frame_store[FRAME_IN_CURRENT];
1363 frame_store_clear(ifs, ctx);
1364 ifs->obj_surface = obj_surface;
1365 ifs->surface_id = proc_ctx->surface_input_object->base.id;
1366 ifs->is_internal_surface = proc_ctx->surface_input_vebox_object != NULL;
1367 ifs->is_scratch_surface = 0;
1369 /* Update the Spatial Temporal Motion Measure (STMM) surfaces */
1371 const VEBFrameStore tmpfs = proc_ctx->frame_store[FRAME_IN_STMM];
1372 proc_ctx->frame_store[FRAME_IN_STMM] =
1373 proc_ctx->frame_store[FRAME_OUT_STMM];
1374 proc_ctx->frame_store[FRAME_OUT_STMM] = tmpfs;
1377 /* Reset the output surfaces to defaults. i.e. clean from user surfaces */
1378 for (i = FRAME_OUT_CURRENT_DN; i <= FRAME_OUT_PREVIOUS; i++) {
1379 ofs = &proc_ctx->frame_store[i];
1380 if (!ofs->is_scratch_surface)
1381 ofs->obj_surface = NULL;
1382 ofs->surface_id = proc_ctx->surface_input_object->base.id;
1385 /* Update the output surfaces */
1386 obj_surface = proc_ctx->surface_output_vebox_object ?
1387 proc_ctx->surface_output_vebox_object : proc_ctx->surface_output_object;
1389 proc_ctx->current_output_type = 2;
1390 if (proc_ctx->filters_mask == VPP_DNDI_DN && !proc_ctx->is_iecp_enabled)
1391 proc_ctx->current_output = FRAME_OUT_CURRENT_DN;
1392 else if (proc_ctx->is_di_adv_enabled && !proc_ctx->is_first_frame) {
1393 proc_ctx->current_output_type = 0;
1394 proc_ctx->current_output = proc_ctx->is_second_field ?
1395 FRAME_OUT_CURRENT : FRAME_OUT_PREVIOUS;
1397 proc_ctx->current_output = FRAME_OUT_CURRENT;
1398 ofs = &proc_ctx->frame_store[proc_ctx->current_output];
1399 frame_store_clear(ofs, ctx);
1400 ofs->obj_surface = obj_surface;
1401 ofs->surface_id = proc_ctx->surface_input_object->base.id;
1402 ofs->is_internal_surface = proc_ctx->surface_output_vebox_object != NULL;
1403 ofs->is_scratch_surface = 0;
1405 return VA_STATUS_SUCCESS;
1408 VAStatus hsw_veb_pre_format_convert(VADriverContextP ctx,
1409 struct intel_vebox_context *proc_ctx)
1412 struct i965_driver_data *i965 = i965_driver_data(ctx);
1413 struct object_surface* obj_surf_input = proc_ctx->surface_input_object;
1414 struct object_surface* obj_surf_output = proc_ctx->surface_output_object;
1415 struct object_surface* obj_surf_input_vebox;
1416 struct object_surface* obj_surf_output_vebox;
1418 proc_ctx->format_convert_flags = 0;
1420 if ((obj_surf_input == NULL) || (obj_surf_output == NULL)) {
1421 ASSERT_RET(0, VA_STATUS_ERROR_INVALID_PARAMETER);
1424 if (proc_ctx->pipeline_param->surface_region) {
1425 proc_ctx->width_input = proc_ctx->pipeline_param->surface_region->width;
1426 proc_ctx->height_input = proc_ctx->pipeline_param->surface_region->height;
1428 proc_ctx->width_input = obj_surf_input->orig_width;
1429 proc_ctx->height_input = obj_surf_input->orig_height;
1432 if (proc_ctx->pipeline_param->output_region) {
1433 proc_ctx->width_output = proc_ctx->pipeline_param->output_region->width;
1434 proc_ctx->height_output = proc_ctx->pipeline_param->output_region->height;
1436 proc_ctx->width_output = obj_surf_output->orig_width;
1437 proc_ctx->height_output = obj_surf_output->orig_height;
1440 /* only partial frame is not supported to be processed */
1442 assert(proc_ctx->width_input == proc_ctx->pipeline_param->surface_region->width);
1443 assert(proc_ctx->height_input == proc_ctx->pipeline_param->surface_region->height);
1444 assert(proc_ctx->width_output == proc_ctx->pipeline_param->output_region->width);
1445 assert(proc_ctx->height_output == proc_ctx->pipeline_param->output_region->height);
1448 if (proc_ctx->width_output != proc_ctx->width_input ||
1449 proc_ctx->height_output != proc_ctx->height_input) {
1450 proc_ctx->format_convert_flags |= POST_SCALING_CONVERT;
1453 /* convert the following format to NV12 format */
1454 if (obj_surf_input->fourcc == VA_FOURCC_YV12 ||
1455 obj_surf_input->fourcc == VA_FOURCC_I420 ||
1456 obj_surf_input->fourcc == VA_FOURCC_IMC1 ||
1457 obj_surf_input->fourcc == VA_FOURCC_IMC3 ||
1458 obj_surf_input->fourcc == VA_FOURCC_RGBA ||
1459 obj_surf_input->fourcc == VA_FOURCC_BGRA) {
1461 proc_ctx->format_convert_flags |= PRE_FORMAT_CONVERT;
1463 } else if (obj_surf_input->fourcc == VA_FOURCC_AYUV ||
1464 obj_surf_input->fourcc == VA_FOURCC_YUY2 ||
1465 obj_surf_input->fourcc == VA_FOURCC_NV12 ||
1466 obj_surf_input->fourcc == VA_FOURCC_P010) {
1468 // nothing to do here
1470 /* not support other format as input */
1471 ASSERT_RET(0, VA_STATUS_ERROR_UNIMPLEMENTED);
1474 if (proc_ctx->format_convert_flags & PRE_FORMAT_CONVERT) {
1475 if (proc_ctx->surface_input_vebox_object == NULL) {
1476 va_status = i965_CreateSurfaces(ctx,
1477 proc_ctx->width_input,
1478 proc_ctx->height_input,
1479 VA_RT_FORMAT_YUV420,
1481 &(proc_ctx->surface_input_vebox));
1482 assert(va_status == VA_STATUS_SUCCESS);
1483 obj_surf_input_vebox = SURFACE(proc_ctx->surface_input_vebox);
1484 assert(obj_surf_input_vebox);
1486 if (obj_surf_input_vebox) {
1487 proc_ctx->surface_input_vebox_object = obj_surf_input_vebox;
1488 i965_check_alloc_surface_bo(ctx, obj_surf_input_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1492 vpp_surface_convert(ctx, proc_ctx->surface_input_object, proc_ctx->surface_input_vebox_object);
1495 /* create one temporary NV12 surfaces for conversion*/
1496 if (obj_surf_output->fourcc == VA_FOURCC_YV12 ||
1497 obj_surf_output->fourcc == VA_FOURCC_I420 ||
1498 obj_surf_output->fourcc == VA_FOURCC_IMC1 ||
1499 obj_surf_output->fourcc == VA_FOURCC_IMC3 ||
1500 obj_surf_output->fourcc == VA_FOURCC_RGBA ||
1501 obj_surf_output->fourcc == VA_FOURCC_BGRA) {
1503 proc_ctx->format_convert_flags |= POST_FORMAT_CONVERT;
1504 } else if (obj_surf_output->fourcc == VA_FOURCC_AYUV ||
1505 obj_surf_output->fourcc == VA_FOURCC_YUY2 ||
1506 obj_surf_output->fourcc == VA_FOURCC_NV12 ||
1507 obj_surf_output->fourcc == VA_FOURCC_P010) {
1509 /* Nothing to do here */
1511 /* not support other format as input */
1512 ASSERT_RET(0, VA_STATUS_ERROR_UNIMPLEMENTED);
1515 if (proc_ctx->format_convert_flags & POST_FORMAT_CONVERT ||
1516 proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1517 if (proc_ctx->surface_output_vebox_object == NULL) {
1518 va_status = i965_CreateSurfaces(ctx,
1519 proc_ctx->width_input,
1520 proc_ctx->height_input,
1521 VA_RT_FORMAT_YUV420,
1523 &(proc_ctx->surface_output_vebox));
1524 assert(va_status == VA_STATUS_SUCCESS);
1525 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_vebox);
1526 assert(obj_surf_output_vebox);
1528 if (obj_surf_output_vebox) {
1529 proc_ctx->surface_output_vebox_object = obj_surf_output_vebox;
1530 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1535 if (proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1536 if (proc_ctx->surface_output_scaled_object == NULL) {
1537 va_status = i965_CreateSurfaces(ctx,
1538 proc_ctx->width_output,
1539 proc_ctx->height_output,
1540 VA_RT_FORMAT_YUV420,
1542 &(proc_ctx->surface_output_scaled));
1543 assert(va_status == VA_STATUS_SUCCESS);
1544 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_scaled);
1545 assert(obj_surf_output_vebox);
1547 if (obj_surf_output_vebox) {
1548 proc_ctx->surface_output_scaled_object = obj_surf_output_vebox;
1549 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1554 return VA_STATUS_SUCCESS;
1558 hsw_veb_post_format_convert(VADriverContextP ctx,
1559 struct intel_vebox_context *proc_ctx)
1561 struct object_surface *obj_surface = NULL;
1562 VAStatus va_status = VA_STATUS_SUCCESS;
1564 obj_surface = proc_ctx->frame_store[proc_ctx->current_output].obj_surface;
1566 if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1567 /* copy the saved frame in the second call */
1568 va_status = vpp_surface_convert(ctx, obj_surface, proc_ctx->surface_output_object);
1569 } else if (!(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1570 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)) {
1571 /* Output surface format is covered by vebox pipeline and
1572 * processed picture is already store in output surface
1573 * so nothing will be done here */
1574 } else if ((proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1575 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)) {
1576 /* convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1577 va_status = vpp_surface_convert(ctx, obj_surface, proc_ctx->surface_output_object);
1579 } else if (proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1580 VAProcPipelineParameterBuffer * const pipe = proc_ctx->pipeline_param;
1581 /* scaling, convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1582 assert(obj_surface->fourcc == VA_FOURCC_NV12);
1584 /* first step :surface scaling */
1585 vpp_surface_scaling(ctx, obj_surface,
1586 proc_ctx->surface_output_scaled_object, pipe->filter_flags);
1588 /* second step: color format convert and copy to output */
1589 obj_surface = proc_ctx->surface_output_object;
1591 va_status = vpp_surface_convert(ctx, proc_ctx->surface_output_scaled_object, obj_surface);
1598 gen75_vebox_init_pipe_params(VADriverContextP ctx,
1599 struct intel_vebox_context *proc_ctx)
1601 struct i965_driver_data * const i965 = i965_driver_data(ctx);
1602 const VAProcPipelineParameterBuffer * const pipe = proc_ctx->pipeline_param;
1603 VAProcFilterParameterBuffer *filter;
1606 proc_ctx->filters_mask = 0;
1607 for (i = 0; i < pipe->num_filters; i++) {
1608 struct object_buffer * const obj_buffer = BUFFER(pipe->filters[i]);
1610 assert(obj_buffer && obj_buffer->buffer_store);
1611 if (!obj_buffer || !obj_buffer->buffer_store)
1612 return VA_STATUS_ERROR_INVALID_PARAMETER;
1614 filter = (VAProcFilterParameterBuffer *)
1615 obj_buffer->buffer_store->buffer;
1616 switch (filter->type) {
1617 case VAProcFilterNoiseReduction:
1618 proc_ctx->filters_mask |= VPP_DNDI_DN;
1619 proc_ctx->filter_dn = filter;
1621 case VAProcFilterDeinterlacing:
1622 proc_ctx->filters_mask |= VPP_DNDI_DI;
1623 proc_ctx->filter_di = filter;
1625 case VAProcFilterColorBalance:
1626 proc_ctx->filters_mask |= VPP_IECP_PRO_AMP;
1627 proc_ctx->filter_iecp_amp = filter;
1628 proc_ctx->filter_iecp_amp_num_elements = obj_buffer->num_elements;
1630 case VAProcFilterSkinToneEnhancement:
1631 proc_ctx->filters_mask |= VPP_IECP_STD_STE;
1632 proc_ctx->filter_iecp_std = filter;
1634 case VAProcFilterSharpening:
1635 proc_ctx->filters_mask |= VPP_SHARP;
1638 WARN_ONCE("unsupported filter (type: %d)\n", filter->type);
1639 return VA_STATUS_ERROR_UNSUPPORTED_FILTER;
1643 if (proc_ctx->filters_mask == 0)
1644 proc_ctx->filters_mask |= VPP_IECP_CSC;
1646 return VA_STATUS_SUCCESS;
1650 gen75_vebox_init_filter_params(VADriverContextP ctx,
1651 struct intel_vebox_context *proc_ctx)
1653 proc_ctx->format_convert_flags = 0; /* initialized in hsw_veb_pre_format_convert() */
1655 proc_ctx->is_iecp_enabled = (proc_ctx->filters_mask & VPP_IECP_MASK) != 0;
1656 proc_ctx->is_dn_enabled = (proc_ctx->filters_mask & VPP_DNDI_DN) != 0;
1657 proc_ctx->is_di_enabled = (proc_ctx->filters_mask & VPP_DNDI_DI) != 0;
1658 proc_ctx->is_di_adv_enabled = 0;
1659 proc_ctx->is_first_frame = 0;
1660 proc_ctx->is_second_field = 0;
1662 /* Check whether we are deinterlacing the second field */
1663 if (proc_ctx->is_di_enabled) {
1664 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
1665 proc_ctx->filter_di;
1667 const unsigned int tff =
1668 !(deint_params->flags & VA_DEINTERLACING_BOTTOM_FIELD_FIRST);
1669 const unsigned int is_top_field =
1670 !(deint_params->flags & VA_DEINTERLACING_BOTTOM_FIELD);
1672 if ((tff ^ is_top_field) != 0) {
1673 struct object_surface * const obj_surface =
1674 proc_ctx->surface_input_object;
1676 if (proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id != obj_surface->base.id) {
1677 WARN_ONCE("invalid surface provided for second field\n");
1678 return VA_STATUS_ERROR_INVALID_PARAMETER;
1680 proc_ctx->is_second_field = 1;
1684 /* Check whether we are deinterlacing the first frame */
1685 if (proc_ctx->is_di_enabled) {
1686 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
1687 proc_ctx->filter_di;
1689 switch (deint_params->algorithm) {
1690 case VAProcDeinterlacingBob:
1691 proc_ctx->is_first_frame = 1;
1693 case VAProcDeinterlacingMotionAdaptive:
1694 case VAProcDeinterlacingMotionCompensated:
1695 if (proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id == VA_INVALID_ID)
1696 proc_ctx->is_first_frame = 1;
1697 else if (proc_ctx->is_second_field) {
1698 /* At this stage, we have already deinterlaced the
1699 first field successfully. So, the first frame flag
1700 is trigerred if the previous field was deinterlaced
1701 without reference frame */
1702 if (proc_ctx->frame_store[FRAME_IN_PREVIOUS].surface_id == VA_INVALID_ID)
1703 proc_ctx->is_first_frame = 1;
1705 const VAProcPipelineParameterBuffer * const pipe =
1706 proc_ctx->pipeline_param;
1708 if (pipe->num_forward_references < 1 ||
1709 pipe->forward_references[0] == VA_INVALID_ID) {
1710 WARN_ONCE("A forward temporal reference is needed for Motion adaptive/compensated deinterlacing !!!\n");
1711 return VA_STATUS_ERROR_INVALID_PARAMETER;
1714 proc_ctx->is_di_adv_enabled = 1;
1717 WARN_ONCE("unsupported deinterlacing algorithm (%d)\n",
1718 deint_params->algorithm);
1719 return VA_STATUS_ERROR_UNSUPPORTED_FILTER;
1722 return VA_STATUS_SUCCESS;
1726 gen75_vebox_process_picture(VADriverContextP ctx,
1727 struct intel_vebox_context *proc_ctx)
1731 status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
1732 if (status != VA_STATUS_SUCCESS)
1735 status = gen75_vebox_init_filter_params(ctx, proc_ctx);
1736 if (status != VA_STATUS_SUCCESS)
1739 status = hsw_veb_pre_format_convert(ctx, proc_ctx);
1740 if (status != VA_STATUS_SUCCESS)
1743 status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
1744 if (status != VA_STATUS_SUCCESS)
1747 status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
1748 if (status != VA_STATUS_SUCCESS)
1751 if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
1752 vpp_sharpness_filtering(ctx, proc_ctx);
1753 } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1754 assert(proc_ctx->is_second_field);
1755 /* directly copy the saved frame in the second call */
1757 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1758 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1759 hsw_veb_state_table_setup(ctx, proc_ctx);
1760 hsw_veb_state_command(ctx, proc_ctx);
1761 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1762 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1763 hsw_veb_dndi_iecp_command(ctx, proc_ctx);
1764 intel_batchbuffer_end_atomic(proc_ctx->batch);
1765 intel_batchbuffer_flush(proc_ctx->batch);
1768 status = hsw_veb_post_format_convert(ctx, proc_ctx);
1773 void gen75_vebox_context_destroy(VADriverContextP ctx,
1774 struct intel_vebox_context *proc_ctx)
1778 if (proc_ctx->vpp_gpe_ctx) {
1779 vpp_gpe_context_destroy(ctx, proc_ctx->vpp_gpe_ctx);
1780 proc_ctx->vpp_gpe_ctx = NULL;
1783 if (proc_ctx->surface_input_vebox != VA_INVALID_ID) {
1784 i965_DestroySurfaces(ctx, &proc_ctx->surface_input_vebox, 1);
1785 proc_ctx->surface_input_vebox = VA_INVALID_ID;
1786 proc_ctx->surface_input_vebox_object = NULL;
1789 if (proc_ctx->surface_output_vebox != VA_INVALID_ID) {
1790 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_vebox, 1);
1791 proc_ctx->surface_output_vebox = VA_INVALID_ID;
1792 proc_ctx->surface_output_vebox_object = NULL;
1795 if (proc_ctx->surface_output_scaled != VA_INVALID_ID) {
1796 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_scaled, 1);
1797 proc_ctx->surface_output_scaled = VA_INVALID_ID;
1798 proc_ctx->surface_output_scaled_object = NULL;
1801 for (i = 0; i < ARRAY_ELEMS(proc_ctx->frame_store); i++)
1802 frame_store_clear(&proc_ctx->frame_store[i], ctx);
1804 /* dndi state table */
1805 drm_intel_bo_unreference(proc_ctx->dndi_state_table.bo);
1806 proc_ctx->dndi_state_table.bo = NULL;
1808 /* iecp state table */
1809 drm_intel_bo_unreference(proc_ctx->iecp_state_table.bo);
1810 proc_ctx->iecp_state_table.bo = NULL;
1812 /* gamut statu table */
1813 drm_intel_bo_unreference(proc_ctx->gamut_state_table.bo);
1814 proc_ctx->gamut_state_table.bo = NULL;
1816 /* vertex state table */
1817 drm_intel_bo_unreference(proc_ctx->vertex_state_table.bo);
1818 proc_ctx->vertex_state_table.bo = NULL;
1820 intel_batchbuffer_free(proc_ctx->batch);
1825 struct intel_vebox_context * gen75_vebox_context_init(VADriverContextP ctx)
1827 struct intel_driver_data *intel = intel_driver_data(ctx);
1828 struct intel_vebox_context *proc_context = calloc(1, sizeof(struct intel_vebox_context));
1831 assert(proc_context);
1832 proc_context->batch = intel_batchbuffer_new(intel, I915_EXEC_VEBOX, 0);
1834 for (i = 0; i < ARRAY_ELEMS(proc_context->frame_store); i++)
1835 proc_context->frame_store[i].surface_id = VA_INVALID_ID;
1837 proc_context->filters_mask = 0;
1838 proc_context->surface_output_object = NULL;
1839 proc_context->surface_input_object = NULL;
1840 proc_context->surface_input_vebox = VA_INVALID_ID;
1841 proc_context->surface_input_vebox_object = NULL;
1842 proc_context->surface_output_vebox = VA_INVALID_ID;
1843 proc_context->surface_output_vebox_object = NULL;
1844 proc_context->surface_output_scaled = VA_INVALID_ID;
1845 proc_context->surface_output_scaled_object = NULL;
1846 proc_context->filters_mask = 0;
1847 proc_context->format_convert_flags = 0;
1848 proc_context->vpp_gpe_ctx = NULL;
1850 return proc_context;
1853 void bdw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1855 struct intel_batchbuffer *batch = proc_ctx->batch;
1857 BEGIN_VEB_BATCH(batch, 0xc);
1858 OUT_VEB_BATCH(batch, VEB_STATE | (0xc - 2));
1859 OUT_VEB_BATCH(batch,
1860 0 << 25 | // state surface control bits
1861 0 << 23 | // reserved.
1862 0 << 22 | // gamut expansion position
1863 0 << 15 | // reserved.
1864 0 << 14 | // single slice vebox enable
1865 0 << 13 | // hot pixel filter enable
1866 0 << 12 | // alpha plane enable
1867 0 << 11 | // vignette enable
1868 0 << 10 | // demosaic enable
1869 proc_ctx->current_output_type << 8 | // DI output frame
1870 1 << 7 | // 444->422 downsample method
1871 1 << 6 | // 422->420 downsample method
1872 proc_ctx->is_first_frame << 5 | // DN/DI first frame
1873 proc_ctx->is_di_enabled << 4 | // DI enable
1874 proc_ctx->is_dn_enabled << 3 | // DN enable
1875 proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
1876 0 << 1 | // ColorGamutCompressionEnable
1877 0) ; // ColorGamutExpansionEnable.
1880 proc_ctx->dndi_state_table.bo,
1881 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1884 proc_ctx->iecp_state_table.bo,
1885 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1888 proc_ctx->gamut_state_table.bo,
1889 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1892 proc_ctx->vertex_state_table.bo,
1893 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1896 OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/
1897 OUT_VEB_BATCH(batch, 0);
1899 ADVANCE_VEB_BATCH(batch);
1902 void bdw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1904 struct intel_batchbuffer *batch = proc_ctx->batch;
1905 unsigned char frame_ctrl_bits = 0;
1906 struct object_surface *obj_surface = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
1907 unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
1909 assert(obj_surface);
1910 if (width64 > obj_surface->orig_width)
1911 width64 = obj_surface->orig_width;
1913 BEGIN_VEB_BATCH(batch, 0x14);
1914 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (0x14 - 2));//DWord 0
1915 OUT_VEB_BATCH(batch, (width64 - 1));
1918 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
1919 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 2
1922 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
1923 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 4
1926 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
1927 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 6
1930 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
1931 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 8
1934 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
1935 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 10
1938 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
1939 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 12
1942 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
1943 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 14
1946 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
1947 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 16
1949 OUT_VEB_BATCH(batch, 0); //DWord 18
1950 OUT_VEB_BATCH(batch, 0); //DWord 19
1952 ADVANCE_VEB_BATCH(batch);
1956 gen8_vebox_process_picture(VADriverContextP ctx,
1957 struct intel_vebox_context *proc_ctx)
1961 status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
1962 if (status != VA_STATUS_SUCCESS)
1965 status = gen75_vebox_init_filter_params(ctx, proc_ctx);
1966 if (status != VA_STATUS_SUCCESS)
1969 status = hsw_veb_pre_format_convert(ctx, proc_ctx);
1970 if (status != VA_STATUS_SUCCESS)
1973 status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
1974 if (status != VA_STATUS_SUCCESS)
1977 status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
1978 if (status != VA_STATUS_SUCCESS)
1981 if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
1982 vpp_sharpness_filtering(ctx, proc_ctx);
1983 } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1984 assert(proc_ctx->is_second_field);
1985 /* directly copy the saved frame in the second call */
1987 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1988 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1989 hsw_veb_state_table_setup(ctx, proc_ctx);
1990 bdw_veb_state_command(ctx, proc_ctx);
1991 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1992 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1993 bdw_veb_dndi_iecp_command(ctx, proc_ctx);
1994 intel_batchbuffer_end_atomic(proc_ctx->batch);
1995 intel_batchbuffer_flush(proc_ctx->batch);
1998 status = hsw_veb_post_format_convert(ctx, proc_ctx);
2005 skl_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2007 unsigned int* p_table ;
2008 unsigned int progressive_dn = 1;
2009 unsigned int dndi_top_first = 0;
2010 unsigned int is_mcdi_enabled = 0;
2012 if (proc_ctx->is_di_enabled) {
2013 const VAProcFilterParameterBufferDeinterlacing * const deint_params =
2014 proc_ctx->filter_di;
2018 /* If we are in "First Frame" mode, i.e. past frames are not
2019 available for motion measure, then don't use the TFF flag */
2020 dndi_top_first = !(deint_params->flags & (proc_ctx->is_first_frame ?
2021 VA_DEINTERLACING_BOTTOM_FIELD :
2022 VA_DEINTERLACING_BOTTOM_FIELD_FIRST));
2025 (deint_params->algorithm == VAProcDeinterlacingMotionCompensated);
2029 VAProcFilterParameterBufferDeinterlacing *di_param =
2030 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
2032 VAProcFilterParameterBuffer * dn_param =
2033 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
2035 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
2037 *p_table ++ = (140 << 20 | // denoise stad threshold . w1
2038 192 << 12 | // dnmh_history_max
2039 7 << 8 | // dnmh_delta[3:0]
2040 1); // denoise moving pixel threshold
2042 *p_table ++ = (38 << 20 | // denoise asd threshold
2043 0 << 10 | // temporal diff th
2044 0); // low temporal diff th
2046 *p_table ++ = (progressive_dn << 28 | // progressive dn
2047 38 << 16 | // denoise th for sum of complexity measure
2048 32 << 10 | // dnmh_history_init[5:0]
2051 *p_table ++ = (0 << 28 | // hot pixel count
2052 0 << 20 | // hot pixel threshold
2053 1 << 12 | // block noise estimate edge threshold
2054 20); // block noise estimate noise threshold
2056 *p_table ++ = (140 << 16 | // chroma denoise stad threshold
2057 0 << 13 | // reserved
2058 1 << 12 | // chrome denoise enable
2059 13 << 6 | // chr temp diff th
2060 7); // chr temp diff low
2062 *p_table ++ = 0; // weight
2064 *p_table ++ = (0 << 16 | // dn_thmax
2067 *p_table ++ = (0 << 16 | // dn_prt5
2070 *p_table ++ = (0 << 16 | // dn_prt4
2073 *p_table ++ = (0 << 16 | // dn_prt2
2076 *p_table ++ = (0 << 16 | // dn_prt0
2077 0 << 10 | // dn_wd22
2081 *p_table ++ = (0 << 25 | // dn_wd12
2082 0 << 20 | // dn_wd11
2083 0 << 15 | // dn_wd10
2084 0 << 10 | // dn_wd02
2088 *p_table ++ = (2 << 10 | // stmm c2
2089 9 << 6 | // cat slope minus 1
2090 5 << 2 | // sad tight threshold
2093 *p_table ++ = (0 << 31 | // stmm blending constant select
2094 64 << 24 | // stmm trc1
2095 125 << 16 | // stmm trc2
2096 0 << 14 | // reserved
2097 30 << 8 | // multiplier for vecm
2098 150); // maximum stmm
2100 *p_table ++ = (118 << 24 | // minumum stmm
2101 0 << 22 | // stmm shift down
2102 1 << 20 | // stmm shift up
2103 5 << 16 | // stmm output shift
2104 100 << 8 | // sdi threshold
2107 *p_table ++ = (50 << 24 | // sdi fallback mode 1 t1 constant
2108 100 << 16 | // sdi fallback mode 1 t2 constant
2109 37 << 8 | // sdi fallback mode 2 constant(angle2x1)
2110 175); // fmd temporal difference threshold
2112 *p_table ++ = (16 << 24 | // fmd #1 vertical difference th . w7
2113 100 << 16 | // fmd #2 vertical difference th
2114 0 << 14 | // cat threshold
2115 2 << 8 | // fmd tear threshold
2116 is_mcdi_enabled << 7 | // mcdi enable, use motion compensated deinterlace algorithm
2117 dndi_top_first << 3 | // dn/di top first
2120 *p_table ++ = (10 << 19 | // neighbor pixel threshold
2121 0 << 16 | // fmd for 2nd field of previous frame
2122 25 << 10 | // mc pixel consistency threshold
2123 0 << 8 | // fmd for 1st field for current frame
2124 10 << 4 | // sad thb
2128 void skl_veb_iecp_csc_transform_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2130 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
2131 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
2132 float v_coef[3] = {0.0, 0.0, 0.0};
2133 float u_coef[3] = {0.0, 0.0, 0.0};
2134 int is_transform_enabled = 0;
2136 if (!(proc_ctx->filters_mask & VPP_IECP_CSC_TRANSFORM)) {
2137 memset(p_table, 0, 12 * 4);
2141 if (proc_ctx->fourcc_input == VA_FOURCC_RGBA &&
2142 (proc_ctx->fourcc_output == VA_FOURCC_NV12 ||
2143 proc_ctx->fourcc_output == VA_FOURCC_YV12 ||
2144 proc_ctx->fourcc_output == VA_FOURCC_YVY2 ||
2145 proc_ctx->fourcc_output == VA_FOURCC_AYUV)) {
2147 tran_coef[0] = 0.257;
2148 tran_coef[1] = 0.504;
2149 tran_coef[2] = 0.098;
2150 tran_coef[3] = -0.148;
2151 tran_coef[4] = -0.291;
2152 tran_coef[5] = 0.439;
2153 tran_coef[6] = 0.439;
2154 tran_coef[7] = -0.368;
2155 tran_coef[8] = -0.071;
2158 u_coef[1] = 128 * 4;
2159 u_coef[2] = 128 * 4;
2161 is_transform_enabled = 1;
2162 } else if ((proc_ctx->fourcc_input == VA_FOURCC_NV12 ||
2163 proc_ctx->fourcc_input == VA_FOURCC_YV12 ||
2164 proc_ctx->fourcc_input == VA_FOURCC_YUY2 ||
2165 proc_ctx->fourcc_input == VA_FOURCC_AYUV) &&
2166 proc_ctx->fourcc_output == VA_FOURCC_RGBA) {
2167 tran_coef[0] = 1.164;
2168 tran_coef[1] = 0.000;
2169 tran_coef[2] = 1.569;
2170 tran_coef[3] = 1.164;
2171 tran_coef[4] = -0.813;
2172 tran_coef[5] = -0.392;
2173 tran_coef[6] = 1.164;
2174 tran_coef[7] = 2.017;
2175 tran_coef[8] = 0.000;
2177 v_coef[0] = -16 * 4;
2178 v_coef[1] = -128 * 4;
2179 v_coef[2] = -128 * 4;
2181 is_transform_enabled = 1;
2182 } else if (proc_ctx->fourcc_input != proc_ctx->fourcc_output) {
2183 //enable when input and output format are different.
2184 is_transform_enabled = 1;
2187 if (is_transform_enabled == 0) {
2188 memset(p_table, 0, 12 * 4);
2190 *p_table ++ = (is_transform_enabled << 31 |
2191 0 << 29 | // yuv_channel swap
2192 intel_format_convert(tran_coef[0], 2, 16, 1)); //c0, s2.16 format
2194 *p_table ++ = (0 << 19 | //reserved
2195 intel_format_convert(tran_coef[1], 2, 16, 1)); //c1, s2.16 format
2197 *p_table ++ = (0 << 19 | //reserved
2198 intel_format_convert(tran_coef[2], 2, 16, 1)); //c2, s2.16 format
2200 *p_table ++ = (0 << 19 | //reserved
2201 intel_format_convert(tran_coef[3], 2, 16, 1)); //c3, s2.16 format
2203 *p_table ++ = (0 << 19 | //reserved
2204 intel_format_convert(tran_coef[4], 2, 16, 1)); //c4, s2.16 format
2206 *p_table ++ = (0 << 19 | //reserved
2207 intel_format_convert(tran_coef[5], 2, 16, 1)); //c5, s2.16 format
2209 *p_table ++ = (0 << 19 | //reserved
2210 intel_format_convert(tran_coef[6], 2, 16, 1)); //c6, s2.16 format
2212 *p_table ++ = (0 << 19 | //reserved
2213 intel_format_convert(tran_coef[7], 2, 16, 1)); //c7, s2.16 format
2215 *p_table ++ = (0 << 19 | //reserved
2216 intel_format_convert(tran_coef[8], 2, 16, 1)); //c8, s2.16 format
2218 *p_table ++ = (intel_format_convert(u_coef[0], 16, 0, 1) << 16 |
2219 intel_format_convert(v_coef[0], 16, 0, 1));
2221 *p_table ++ = (intel_format_convert(u_coef[1], 16, 0, 1) << 16 |
2222 intel_format_convert(v_coef[1], 16, 0, 1));
2224 *p_table ++ = (intel_format_convert(u_coef[2], 16, 0, 1) << 16 |
2225 intel_format_convert(v_coef[2], 16, 0, 1));
2229 void skl_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2231 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 27 * sizeof(unsigned int));
2233 if (!(proc_ctx->filters_mask & VPP_IECP_AOI)) {
2234 memset(p_table, 0, 3 * 4);
2236 *p_table ++ = 0x00000000;
2237 *p_table ++ = 0x00030000;
2238 *p_table ++ = 0x00030000;
2242 void skl_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2244 if (proc_ctx->filters_mask & VPP_DNDI_MASK) {
2245 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
2246 dri_bo_map(dndi_bo, 1);
2247 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
2249 skl_veb_dndi_table(ctx, proc_ctx);
2251 dri_bo_unmap(dndi_bo);
2254 if (proc_ctx->filters_mask & VPP_IECP_MASK) {
2255 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
2256 dri_bo_map(iecp_bo, 1);
2257 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
2258 memset(proc_ctx->iecp_state_table.ptr, 0, 2048); // Change the size to 2048 in case a large table used in the future
2260 hsw_veb_iecp_std_table(ctx, proc_ctx);
2261 hsw_veb_iecp_ace_table(ctx, proc_ctx);
2262 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
2263 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
2264 skl_veb_iecp_csc_transform_table(ctx, proc_ctx);
2265 skl_veb_iecp_aoi_table(ctx, proc_ctx);
2267 dri_bo_unmap(iecp_bo);
2272 skl_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2274 struct i965_driver_data *i965 = i965_driver_data(ctx);
2275 struct intel_batchbuffer *batch = proc_ctx->batch;
2277 BEGIN_VEB_BATCH(batch, 0x10);
2278 OUT_VEB_BATCH(batch, VEB_STATE | (0x10 - 2));
2279 OUT_VEB_BATCH(batch,
2280 ((i965->intel.mocs_state) << 25) | // state surface control bits
2281 0 << 23 | // reserved.
2282 0 << 22 | // gamut expansion position
2283 0 << 15 | // reserved.
2284 0 << 14 | // single slice vebox enable
2285 0 << 13 | // hot pixel filter enable
2286 0 << 12 | // alpha plane enable
2287 0 << 11 | // vignette enable
2288 0 << 10 | // demosaic enable
2289 proc_ctx->current_output_type << 8 | // DI output frame
2290 1 << 7 | // 444->422 downsample method
2291 1 << 6 | // 422->420 downsample method
2292 proc_ctx->is_first_frame << 5 | // DN/DI first frame
2293 proc_ctx->is_di_enabled << 4 | // DI enable
2294 proc_ctx->is_dn_enabled << 3 | // DN enable
2295 proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
2296 0 << 1 | // ColorGamutCompressionEnable
2297 0) ; // ColorGamutExpansionEnable.
2300 proc_ctx->dndi_state_table.bo,
2301 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2303 OUT_VEB_BATCH(batch, 0);
2306 proc_ctx->iecp_state_table.bo,
2307 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2309 OUT_VEB_BATCH(batch, 0);
2312 proc_ctx->gamut_state_table.bo,
2313 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2315 OUT_VEB_BATCH(batch, 0);
2318 proc_ctx->vertex_state_table.bo,
2319 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
2321 OUT_VEB_BATCH(batch, 0);
2323 OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/
2324 OUT_VEB_BATCH(batch, 0);
2326 OUT_VEB_BATCH(batch, 0);/*lace lut table state pointer*/
2327 OUT_VEB_BATCH(batch, 0);
2329 OUT_VEB_BATCH(batch, 0);/*gamma correction values address*/
2330 OUT_VEB_BATCH(batch, 0);
2332 ADVANCE_VEB_BATCH(batch);
2335 void skl_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
2337 struct intel_batchbuffer *batch = proc_ctx->batch;
2338 unsigned int u_offset_y = 0, v_offset_y = 0;
2339 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
2340 unsigned int surface_format = PLANAR_420_8;
2341 struct object_surface* obj_surf = NULL;
2342 unsigned int surface_pitch = 0;
2343 unsigned int half_pitch_chroma = 0;
2344 unsigned int derived_pitch;
2347 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
2349 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
2352 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
2353 obj_surf->fourcc == VA_FOURCC_YUY2 ||
2354 obj_surf->fourcc == VA_FOURCC_AYUV ||
2355 obj_surf->fourcc == VA_FOURCC_RGBA ||
2356 obj_surf->fourcc == VA_FOURCC_P010);
2358 if (obj_surf->fourcc == VA_FOURCC_NV12) {
2359 surface_format = PLANAR_420_8;
2360 surface_pitch = obj_surf->width;
2361 is_uv_interleaved = 1;
2362 half_pitch_chroma = 0;
2363 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
2364 surface_format = YCRCB_NORMAL;
2365 surface_pitch = obj_surf->width * 2;
2366 is_uv_interleaved = 0;
2367 half_pitch_chroma = 0;
2368 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
2369 surface_format = PACKED_444A_8;
2370 surface_pitch = obj_surf->width * 4;
2371 is_uv_interleaved = 0;
2372 half_pitch_chroma = 0;
2373 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
2374 surface_format = R8G8B8A8_UNORM_SRGB;
2375 surface_pitch = obj_surf->width * 4;
2376 is_uv_interleaved = 0;
2377 half_pitch_chroma = 0;
2378 } else if (obj_surf->fourcc == VA_FOURCC_P010) {
2379 surface_format = PLANAR_420_16;
2380 surface_pitch = obj_surf->width;
2381 is_uv_interleaved = 1;
2382 half_pitch_chroma = 0;
2385 derived_pitch = surface_pitch;
2387 u_offset_y = obj_surf->y_cb_offset;
2388 v_offset_y = obj_surf->y_cr_offset;
2390 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
2392 BEGIN_VEB_BATCH(batch, 9);
2393 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (9 - 2));
2394 OUT_VEB_BATCH(batch,
2395 0 << 1 | // reserved
2396 is_output); // surface indentification.
2398 OUT_VEB_BATCH(batch,
2399 (obj_surf->orig_height - 1) << 18 | // height . w3
2400 (obj_surf->orig_width - 1) << 4 | // width
2403 OUT_VEB_BATCH(batch,
2404 surface_format << 28 | // surface format, YCbCr420. w4
2405 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
2406 0 << 20 | // reserved
2407 (surface_pitch - 1) << 3 | // surface pitch, 64 align
2408 half_pitch_chroma << 2 | // half pitch for chrome
2409 !!tiling << 1 | // tiled surface, linear surface used
2410 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
2412 OUT_VEB_BATCH(batch,
2413 0 << 16 | // X offset for V(Cb)
2414 u_offset_y); // Y offset for V(Cb)
2416 OUT_VEB_BATCH(batch,
2417 0 << 16 | // X offset for V(Cr)
2418 v_offset_y); // Y offset for V(Cr)
2420 OUT_VEB_BATCH(batch, 0);
2422 OUT_VEB_BATCH(batch, derived_pitch - 1);
2424 OUT_VEB_BATCH(batch, 0);
2426 ADVANCE_VEB_BATCH(batch);
2430 gen9_vebox_process_picture(VADriverContextP ctx,
2431 struct intel_vebox_context *proc_ctx)
2435 status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
2436 if (status != VA_STATUS_SUCCESS)
2439 status = gen75_vebox_init_filter_params(ctx, proc_ctx);
2440 if (status != VA_STATUS_SUCCESS)
2443 status = hsw_veb_pre_format_convert(ctx, proc_ctx);
2444 if (status != VA_STATUS_SUCCESS)
2447 status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
2448 if (status != VA_STATUS_SUCCESS)
2451 status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
2452 if (status != VA_STATUS_SUCCESS)
2455 if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
2456 vpp_sharpness_filtering(ctx, proc_ctx);
2457 } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
2458 assert(proc_ctx->is_second_field);
2459 /* directly copy the saved frame in the second call */
2461 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
2462 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
2463 skl_veb_state_table_setup(ctx, proc_ctx);
2464 skl_veb_state_command(ctx, proc_ctx);
2465 skl_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
2466 skl_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
2467 bdw_veb_dndi_iecp_command(ctx, proc_ctx);
2468 intel_batchbuffer_end_atomic(proc_ctx->batch);
2469 intel_batchbuffer_flush(proc_ctx->batch);
2472 status = hsw_veb_post_format_convert(ctx, proc_ctx);
2478 cnl_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2480 struct intel_batchbuffer *batch = proc_ctx->batch;
2482 BEGIN_VEB_BATCH(batch, 0x13);
2483 OUT_VEB_BATCH(batch, VEB_STATE | (0x13 - 2));
2484 OUT_VEB_BATCH(batch,
2485 0 << 25 | // state surface control bits
2486 0 << 23 | // reserved.
2487 0 << 22 | // gamut expansion position
2488 0 << 15 | // reserved.
2489 0 << 14 | // single slice vebox enable
2490 0 << 13 | // hot pixel filter enable
2491 0 << 12 | // alpha plane enable
2492 0 << 11 | // vignette enable
2493 0 << 10 | // demosaic enable
2494 proc_ctx->current_output_type << 8 | // DI output frame
2495 1 << 7 | // 444->422 downsample method
2496 1 << 6 | // 422->420 downsample method
2497 proc_ctx->is_first_frame << 5 | // DN/DI first frame
2498 proc_ctx->is_di_enabled << 4 | // DI enable
2499 proc_ctx->is_dn_enabled << 3 | // DN enable
2500 proc_ctx->is_iecp_enabled << 2 | // global IECP enabled
2501 0 << 1 | // ColorGamutCompressionEnable
2502 0) ; // ColorGamutExpansionEnable.
2505 proc_ctx->dndi_state_table.bo,
2506 I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 2-3
2509 proc_ctx->iecp_state_table.bo,
2510 I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 4-5
2513 proc_ctx->gamut_state_table.bo,
2514 I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 6-7
2517 proc_ctx->vertex_state_table.bo,
2518 I915_GEM_DOMAIN_INSTRUCTION, 0, 0); // DW 8-9
2520 OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/
2521 OUT_VEB_BATCH(batch, 0);
2523 OUT_VEB_BATCH(batch, 0);/*lace lut table state pointer*/
2524 OUT_VEB_BATCH(batch, 0);
2526 OUT_VEB_BATCH(batch, 0);/*gamma correction values address*/
2527 OUT_VEB_BATCH(batch, 0);
2529 OUT_VEB_BATCH(batch, 0);
2530 OUT_VEB_BATCH(batch, 0);
2531 OUT_VEB_BATCH(batch, 0);
2534 ADVANCE_VEB_BATCH(batch);
2537 void cnl_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2539 struct intel_batchbuffer *batch = proc_ctx->batch;
2540 unsigned char frame_ctrl_bits = 0;
2541 struct object_surface *obj_surface = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
2542 unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
2544 assert(obj_surface);
2545 if (width64 > obj_surface->orig_width)
2546 width64 = obj_surface->orig_width;
2548 BEGIN_VEB_BATCH(batch, 0x18);
2549 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (0x18 - 2));//DWord 0
2550 OUT_VEB_BATCH(batch, (width64 - 1));
2553 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
2554 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 2-3
2557 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
2558 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 4-4
2561 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
2562 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 6-7
2565 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
2566 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 8-9
2569 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
2570 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 10-11
2573 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
2574 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 12-13
2577 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
2578 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 14-15
2581 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
2582 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 16-17
2584 OUT_VEB_BATCH(batch, 0); //DWord 18
2585 OUT_VEB_BATCH(batch, 0); //DWord 19
2587 OUT_VEB_BATCH(batch, 0); //DWord 20
2588 OUT_VEB_BATCH(batch, 0); //DWord 21
2589 OUT_VEB_BATCH(batch, 0); //DWord 22
2590 OUT_VEB_BATCH(batch, 0); //DWord 23
2592 ADVANCE_VEB_BATCH(batch);
2595 void cnl_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
2597 struct intel_batchbuffer *batch = proc_ctx->batch;
2598 unsigned int u_offset_y = 0, v_offset_y = 0;
2599 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
2600 unsigned int surface_format = PLANAR_420_8;
2601 struct object_surface* obj_surf = NULL;
2602 unsigned int surface_pitch = 0;
2603 unsigned int half_pitch_chroma = 0;
2604 unsigned int derived_pitch;
2607 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
2609 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
2612 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
2613 obj_surf->fourcc == VA_FOURCC_YUY2 ||
2614 obj_surf->fourcc == VA_FOURCC_AYUV ||
2615 obj_surf->fourcc == VA_FOURCC_RGBA ||
2616 obj_surf->fourcc == VA_FOURCC_P010);
2618 if (obj_surf->fourcc == VA_FOURCC_NV12) {
2619 surface_format = PLANAR_420_8;
2620 surface_pitch = obj_surf->width;
2621 is_uv_interleaved = 1;
2622 half_pitch_chroma = 0;
2623 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
2624 surface_format = YCRCB_NORMAL;
2625 surface_pitch = obj_surf->width * 2;
2626 is_uv_interleaved = 0;
2627 half_pitch_chroma = 0;
2628 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
2629 surface_format = PACKED_444A_8;
2630 surface_pitch = obj_surf->width * 4;
2631 is_uv_interleaved = 0;
2632 half_pitch_chroma = 0;
2633 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
2634 surface_format = R8G8B8A8_UNORM_SRGB;
2635 surface_pitch = obj_surf->width * 4;
2636 is_uv_interleaved = 0;
2637 half_pitch_chroma = 0;
2638 } else if (obj_surf->fourcc == VA_FOURCC_P010) {
2639 surface_format = PLANAR_420_16;
2640 surface_pitch = obj_surf->width;
2641 is_uv_interleaved = 1;
2642 half_pitch_chroma = 0;
2645 derived_pitch = surface_pitch;
2647 u_offset_y = obj_surf->y_cb_offset;
2648 v_offset_y = obj_surf->y_cr_offset;
2650 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
2652 BEGIN_VEB_BATCH(batch, 9);
2653 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (9 - 2));
2654 OUT_VEB_BATCH(batch,
2655 0 << 1 | // reserved
2656 is_output); // surface indentification.
2658 OUT_VEB_BATCH(batch,
2659 (obj_surf->orig_height - 1) << 18 | // height . w3
2660 (obj_surf->orig_width - 1) << 4 | // width
2663 OUT_VEB_BATCH(batch,
2664 surface_format << 27 | // surface format, YCbCr420. w4
2665 is_uv_interleaved << 20 | // interleave chrome , two seperate palar
2666 (surface_pitch - 1) << 3 | // surface pitch, 64 align
2667 half_pitch_chroma << 2 | // half pitch for chrome
2668 !!tiling << 1 | // tiled surface, linear surface used
2669 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
2671 OUT_VEB_BATCH(batch,
2672 0 << 16 | // X offset for V(Cb)
2673 u_offset_y); // Y offset for V(Cb)
2675 OUT_VEB_BATCH(batch,
2676 0 << 16 | // X offset for V(Cr)
2677 v_offset_y); // Y offset for V(Cr)
2679 OUT_VEB_BATCH(batch, 0);
2681 OUT_VEB_BATCH(batch, derived_pitch - 1);
2683 OUT_VEB_BATCH(batch, 0);
2685 ADVANCE_VEB_BATCH(batch);
2689 gen10_vebox_process_picture(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
2693 status = gen75_vebox_init_pipe_params(ctx, proc_ctx);
2694 if (status != VA_STATUS_SUCCESS)
2697 status = gen75_vebox_init_filter_params(ctx, proc_ctx);
2698 if (status != VA_STATUS_SUCCESS)
2701 status = hsw_veb_pre_format_convert(ctx, proc_ctx);
2702 if (status != VA_STATUS_SUCCESS)
2705 status = gen75_vebox_ensure_surfaces(ctx, proc_ctx);
2706 if (status != VA_STATUS_SUCCESS)
2709 status = gen75_vebox_ensure_surfaces_storage(ctx, proc_ctx);
2710 if (status != VA_STATUS_SUCCESS)
2713 if (proc_ctx->filters_mask & VPP_SHARP_MASK) {
2714 vpp_sharpness_filtering(ctx, proc_ctx);
2715 } else if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
2716 assert(proc_ctx->is_second_field);
2717 /* directly copy the saved frame in the second call */
2719 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
2720 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
2721 skl_veb_state_table_setup(ctx, proc_ctx);
2722 cnl_veb_state_command(ctx, proc_ctx);
2723 cnl_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
2724 cnl_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
2725 cnl_veb_dndi_iecp_command(ctx, proc_ctx);
2726 intel_batchbuffer_end_atomic(proc_ctx->batch);
2727 intel_batchbuffer_flush(proc_ctx->batch);
2730 status = hsw_veb_post_format_convert(ctx, proc_ctx);