2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Li Xiaowei <xiaowei.a.li@intel.com>
26 * Li Zhong <zhong.li@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "gen75_vpp_vebox.h"
40 #include "intel_media.h"
45 i965_MapBuffer(VADriverContextP ctx, VABufferID buf_id, void **);
48 i965_UnmapBuffer(VADriverContextP ctx, VABufferID buf_id);
51 i965_DeriveImage(VADriverContextP ctx, VABufferID surface, VAImage *out_image);
54 i965_DestroyImage(VADriverContextP ctx, VAImageID image);
57 VAStatus vpp_surface_convert(VADriverContextP ctx,
58 struct object_surface *src_obj_surf,
59 struct object_surface *dst_obj_surf)
61 VAStatus va_status = VA_STATUS_SUCCESS;
63 assert(src_obj_surf->orig_width == dst_obj_surf->orig_width);
64 assert(src_obj_surf->orig_height == dst_obj_surf->orig_height);
66 VARectangle src_rect, dst_rect;
67 src_rect.x = dst_rect.x = 0;
68 src_rect.y = dst_rect.y = 0;
69 src_rect.width = dst_rect.width = src_obj_surf->orig_width;
70 src_rect.height = dst_rect.height = dst_obj_surf->orig_height;
72 struct i965_surface src_surface, dst_surface;
73 src_surface.base = (struct object_base *)src_obj_surf;
74 src_surface.type = I965_SURFACE_TYPE_SURFACE;
75 src_surface.flags = I965_SURFACE_FLAG_FRAME;
77 dst_surface.base = (struct object_base *)dst_obj_surf;
78 dst_surface.type = I965_SURFACE_TYPE_SURFACE;
79 dst_surface.flags = I965_SURFACE_FLAG_FRAME;
81 va_status = i965_image_processing(ctx,
89 VAStatus vpp_surface_scaling(VADriverContextP ctx,
90 struct object_surface *dst_obj_surf,
91 struct object_surface *src_obj_surf)
93 VAStatus va_status = VA_STATUS_SUCCESS;
94 int flags = I965_PP_FLAG_AVS;
96 assert(src_obj_surf->fourcc == VA_FOURCC_NV12);
97 assert(dst_obj_surf->fourcc == VA_FOURCC_NV12);
99 VARectangle src_rect, dst_rect;
102 src_rect.width = src_obj_surf->orig_width;
103 src_rect.height = src_obj_surf->orig_height;
107 dst_rect.width = dst_obj_surf->orig_width;
108 dst_rect.height = dst_obj_surf->orig_height;
110 va_status = i965_scaling_processing(ctx,
120 void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
122 struct i965_driver_data *i965 = i965_driver_data(ctx);
123 unsigned int* p_table ;
124 int progressive_dn = 1;
125 int dndi_top_first = 0;
126 int motion_compensated_enable = 0;
128 if (proc_ctx->filters_mask & VPP_DNDI_DI) {
129 VAProcFilterParameterBufferDeinterlacing *di_param =
130 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
134 dndi_top_first = !(di_param->flags & VA_DEINTERLACING_BOTTOM_FIELD_FIRST);
135 motion_compensated_enable = (di_param->algorithm == VAProcDeinterlacingMotionCompensated);
139 VAProcFilterParameterBufferDeinterlacing *di_param =
140 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
142 VAProcFilterParameterBuffer * dn_param =
143 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
145 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
147 if (IS_HASWELL(i965->intel.device_info))
148 *p_table ++ = 0; // reserved . w0
150 *p_table ++ = ( 140 << 24 | // denoise STAD threshold . w1
151 192 << 16 | // dnmh_history_max
152 0 << 12 | // reserved
153 7 << 8 | // dnmh_delta[3:0]
154 38 ); // denoise ASD threshold
156 *p_table ++ = ( 0 << 30 | // reserved . w2
157 0 << 24 | // temporal diff th
158 0 << 22 | // reserved.
159 0 << 16 | // low temporal diff th
161 1 << 8 | // denoise moving pixel th
162 38 ); // denoise th for sum of complexity measure
164 *p_table ++ = ( 0 << 30 | // reserved . w3
165 12<< 24 | // good neighbor th[5:0]
166 9 << 20 | // CAT slope minus 1
167 5 << 16 | // SAD Tight in
168 0 << 14 | // smooth mv th
169 0 << 12 | // reserved
170 1 << 8 | // bne_edge_th[3:0]
171 20 ); // block noise estimate noise th
173 *p_table ++ = ( 0 << 31 | // STMM blending constant select. w4
174 64 << 24 | // STMM trc1
175 125<< 16 | // STMM trc2
176 0 << 14 | // reserved
177 30 << 8 | // VECM_mul
178 150 ); // maximum STMM
180 *p_table ++ = ( 118<< 24 | // minumum STMM . W5
181 0 << 22 | // STMM shift down
182 1 << 20 | // STMM shift up
183 5 << 16 | // STMM output shift
184 100 << 8 | // SDI threshold
187 *p_table ++ = ( 50 << 24 | // SDI fallback mode 1 T1 constant . W6
188 100 << 16 | // SDI fallback mode 1 T2 constant
189 37 << 8 | // SDI fallback mode 2 constant(angle2x1)
190 175 ); // FMD temporal difference threshold
192 *p_table ++ = ( 16 << 24 | // FMD #1 vertical difference th . w7
193 100<< 16 | // FMD #2 vertical difference th
195 2 << 8 | // FMD tear threshold
196 motion_compensated_enable << 7 | // MCDI Enable, use motion compensated deinterlace algorithm
197 progressive_dn << 6 | // progressive DN
199 dndi_top_first << 3 | // DN/DI Top First
202 *p_table ++ = ( 0 << 29 | // reserved . W8
203 32 << 23 | // dnmh_history_init[5:0]
204 10 << 19 | // neighborPixel th
205 0 << 18 | // reserved
206 0 << 16 | // FMD for 2nd field of previous frame
207 25 << 10 | // MC pixel consistency th
208 0 << 8 | // FMD for 1st field for current frame
212 *p_table ++ = ( 0 << 24 | // reserved
213 140<< 16 | // chr_dnmh_stad_th
214 0 << 13 | // reserved
215 1 << 12 | // chrome denoise enable
216 13 << 6 | // chr temp diff th
217 7 ); // chr temp diff low
219 if (IS_GEN8(i965->intel.device_info))
220 *p_table ++ = 0; // parameters for hot pixel,
223 void hsw_veb_iecp_std_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
225 unsigned int *p_table = proc_ctx->iecp_state_table.ptr + 0 ;
226 //VAProcFilterParameterBuffer * std_param =
227 // (VAProcFilterParameterBuffer *) proc_ctx->filter_std;
229 if(!(proc_ctx->filters_mask & VPP_IECP_STD_STE)){
230 memset(p_table, 0, 29 * 4);
233 *p_table ++ = ( 154 << 24 | // V_Mid
235 14 << 10 | // Hue_Max
238 0 << 2 | // Output Control is set to output the 1=STD score /0=Output Pixels
239 1 << 1 | // Set STE Enable
240 1 ); // Set STD Enable
243 *p_table ++ = ( 0 << 31 | // Reserved
244 4 << 28 | // Diamond Margin
245 0 << 21 | // Diamond_du
246 3 << 18 | // HS_Margin
247 79 << 10 | // Cos(alpha)
252 *p_table ++ = ( 0 << 21 | // Reserved
253 100 << 13 | // Diamond_alpha
254 35 << 7 | // Diamond_Th
258 *p_table ++ = ( 254 << 24 | // Y_point_3
259 47 << 16 | // Y_point_2
260 46 << 8 | // Y_point_1
261 1 << 7 | // VY_STD_Enable
265 *p_table ++ = ( 0 << 18 | // Reserved
266 31 << 13 | // Y_slope_2
267 31 << 8 | // Y_slope_1
271 *p_table ++ = ( 400 << 16 | // INV_Skin_types_margin = 20* Skin_Type_margin => 20*20
272 3300 ); // INV_Margin_VYL => 1/Margin_VYL
275 *p_table ++ = ( 216 << 24 | // P1L
277 1600 ); // INV_Margin_VYU
280 *p_table ++ = ( 130 << 24 | // B1L
286 *p_table ++ = ( 0 << 27 | // Reserved
287 0x7FB << 16 | // S0L (11 bits, Default value: -5 = FBh, pad it with 1s to make it 11bits)
292 *p_table ++ = ( 0 << 22 | // Reserved
297 *p_table ++ = ( 0 << 27 | // Reserved
303 *p_table ++ = ( 163 << 24 | // B1U
309 *p_table ++ = ( 0 << 27 | // Reserved
315 *p_table ++ = ( 0 << 22 | // Reserved
316 0x74D << 11 | // S2U (11 bits, Default value -179 = F4Dh)
320 *p_table ++ = ( 0 << 28 | // Reserved
321 20 << 20 | // Skin_types_margin
322 120 << 12 | // Skin_types_thresh
323 1 << 11 | // Skin_Types_Enable
327 *p_table ++ = ( 0 << 31 | // Reserved
328 0x3F8 << 21 | // SATB1 (10 bits, default 8, optimized value -8)
331 0x7A ); // SATP1 (7 bits, default 6, optimized value -6)
334 *p_table ++ = ( 0 << 31 | // Reserved
340 *p_table ++ = ( 0 << 22 | // Reserved
345 *p_table ++ = ( 14 << 25 | // HUEP3
347 0x7A << 11 | // HUEP1 (7 bits, default value -6 = 7Ah)
351 *p_table ++ = ( 0 << 30 | // Reserved
354 0x3F8 ); // HUEB1 (10 bits, default value 8, optimized value -8)
357 *p_table ++ = ( 0 << 22 | // Reserved
362 *p_table ++ = ( 0 << 22 | // Reserved
367 *p_table ++ = ( 0 << 31 | // Reserved
368 0 << 21 | // SATB1_DARK
369 31 << 14 | // SATP3_DARK
370 31 << 7 | // SATP2_DARK
371 0x7B ); // SATP1_DARK (7 bits, default value -11 = FF5h, optimized value -5)
374 *p_table ++ = ( 0 << 31 | // Reserved
375 305 << 20 | // SATS0_DARK
376 124 << 10 | // SATB3_DARK
380 *p_table ++ = ( 0 << 22 | // Reserved
381 256 << 11 | // SATS2_DARK
385 *p_table ++ = ( 14 << 25 | // HUEP3_DARK
386 14 << 18 | // HUEP2_DARK
387 14 << 11 | // HUEP1_DARK
391 *p_table ++ = ( 0 << 30 | // Reserved
392 56 << 20 | // HUEB3_DARK
393 56 << 10 | // HUEB2_DARK
397 *p_table ++ = ( 0 << 22 | // Reserved
398 256 << 11 | // HUES1_DARK
402 *p_table ++ = ( 0 << 22 | // Reserved
403 256 << 11 | // HUES3_DARK
408 void hsw_veb_iecp_ace_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
410 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 116);
412 if(!(proc_ctx->filters_mask & VPP_IECP_ACE)){
413 memset(p_table, 0, 13 * 4);
415 *p_table ++ = 0x00000068;
416 *p_table ++ = 0x4c382410;
417 *p_table ++ = 0x9c887460;
418 *p_table ++ = 0xebd8c4b0;
419 *p_table ++ = 0x604c3824;
421 *p_table ++ = 0xb09c8874;
422 *p_table ++ = 0x0000d8c4;
423 *p_table ++ = 0x00000000;
424 *p_table ++ = 0x00000000;
425 *p_table ++ = 0x00000000;
427 *p_table ++ = 0x00000000;
428 *p_table ++ = 0x00000000;
429 *p_table ++ = 0x00000000;
433 void hsw_veb_iecp_tcc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
435 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 168);
436 // VAProcFilterParameterBuffer * tcc_param =
437 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
439 if(!(proc_ctx->filters_mask & VPP_IECP_TCC)){
440 memset(p_table, 0, 11 * 4);
442 *p_table ++ = 0x00000000;
443 *p_table ++ = 0x00000000;
444 *p_table ++ = 0x1e34cc91;
445 *p_table ++ = 0x3e3cce91;
446 *p_table ++ = 0x02e80195;
448 *p_table ++ = 0x0197046b;
449 *p_table ++ = 0x01790174;
450 *p_table ++ = 0x00000000;
451 *p_table ++ = 0x00000000;
452 *p_table ++ = 0x03030000;
454 *p_table ++ = 0x009201c0;
458 void hsw_veb_iecp_pro_amp_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
460 unsigned int contrast = 0x80; //default
461 int brightness = 0x00; //default
462 int cos_c_s = 256 ; //default
463 int sin_c_s = 0; //default
464 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 212);
466 if(!(proc_ctx->filters_mask & VPP_IECP_PRO_AMP)){
467 memset(p_table, 0, 2 * 4);
469 float src_saturation = 1.0;
471 float src_contrast = 1.0;
472 float src_brightness = 0.0;
473 float tmp_value = 0.0;
476 VAProcFilterParameterBufferColorBalance * amp_params =
477 (VAProcFilterParameterBufferColorBalance *) proc_ctx->filter_iecp_amp;
479 for (i = 0; i < proc_ctx->filter_iecp_amp_num_elements; i++){
480 VAProcColorBalanceType attrib = amp_params[i].attrib;
482 if(attrib == VAProcColorBalanceHue) {
483 src_hue = amp_params[i].value; //(-180.0, 180.0)
484 }else if(attrib == VAProcColorBalanceSaturation) {
485 src_saturation = amp_params[i].value; //(0.0, 10.0)
486 }else if(attrib == VAProcColorBalanceBrightness) {
487 src_brightness = amp_params[i].value; // (-100.0, 100.0)
488 brightness = intel_format_convert(src_brightness, 7, 4, 1);
489 }else if(attrib == VAProcColorBalanceContrast) {
490 src_contrast = amp_params[i].value; // (0.0, 10.0)
491 contrast = intel_format_convert(src_contrast, 4, 7, 0);
495 tmp_value = cos(src_hue/180*PI) * src_contrast * src_saturation;
496 cos_c_s = intel_format_convert(tmp_value, 7, 8, 1);
498 tmp_value = sin(src_hue/180*PI) * src_contrast * src_saturation;
499 sin_c_s = intel_format_convert(tmp_value, 7, 8, 1);
501 *p_table ++ = ( 0 << 28 | //reserved
502 contrast << 17 | //contrast value (U4.7 format)
504 brightness << 1| // S7.4 format
507 *p_table ++ = ( cos_c_s << 16 | // cos(h) * contrast * saturation
508 sin_c_s); // sin(h) * contrast * saturation
514 void hsw_veb_iecp_csc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
516 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
517 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
518 float v_coef[3] = {0.0, 0.0, 0.0};
519 float u_coef[3] = {0.0, 0.0, 0.0};
520 int is_transform_enabled = 0;
522 if(!(proc_ctx->filters_mask & VPP_IECP_CSC)){
523 memset(p_table, 0, 8 * 4);
527 if(proc_ctx->fourcc_input == VA_FOURCC_RGBA &&
528 (proc_ctx->fourcc_output == VA_FOURCC_NV12 ||
529 proc_ctx->fourcc_output == VA_FOURCC_YV12 ||
530 proc_ctx->fourcc_output == VA_FOURCC_YVY2 ||
531 proc_ctx->fourcc_output == VA_FOURCC_AYUV)) {
533 tran_coef[0] = 0.257;
534 tran_coef[1] = 0.504;
535 tran_coef[2] = 0.098;
536 tran_coef[3] = -0.148;
537 tran_coef[4] = -0.291;
538 tran_coef[5] = 0.439;
539 tran_coef[6] = 0.439;
540 tran_coef[7] = -0.368;
541 tran_coef[8] = -0.071;
547 is_transform_enabled = 1;
548 }else if((proc_ctx->fourcc_input == VA_FOURCC_NV12 ||
549 proc_ctx->fourcc_input == VA_FOURCC_YV12 ||
550 proc_ctx->fourcc_input == VA_FOURCC_YUY2 ||
551 proc_ctx->fourcc_input == VA_FOURCC_AYUV) &&
552 proc_ctx->fourcc_output == VA_FOURCC_RGBA) {
553 tran_coef[0] = 1.164;
554 tran_coef[1] = 0.000;
555 tran_coef[2] = 1.569;
556 tran_coef[3] = 1.164;
557 tran_coef[4] = -0.813;
558 tran_coef[5] = -0.392;
559 tran_coef[6] = 1.164;
560 tran_coef[7] = 2.017;
561 tran_coef[8] = 0.000;
564 v_coef[1] = -128 * 4;
565 v_coef[2] = -128 * 4;
567 is_transform_enabled = 1;
568 }else if(proc_ctx->fourcc_input != proc_ctx->fourcc_output){
569 //enable when input and output format are different.
570 is_transform_enabled = 1;
573 if(is_transform_enabled == 0){
574 memset(p_table, 0, 8 * 4);
576 *p_table ++ = ( 0 << 29 | //reserved
577 intel_format_convert(tran_coef[1], 2, 10, 1) << 16 | //c1, s2.10 format
578 intel_format_convert(tran_coef[0], 2, 10, 1) << 3 | //c0, s2.10 format
580 0 << 1 | // yuv_channel swap
581 is_transform_enabled);
583 *p_table ++ = ( 0 << 26 | //reserved
584 intel_format_convert(tran_coef[3], 2, 10, 1) << 13 |
585 intel_format_convert(tran_coef[2], 2, 10, 1));
587 *p_table ++ = ( 0 << 26 | //reserved
588 intel_format_convert(tran_coef[5], 2, 10, 1) << 13 |
589 intel_format_convert(tran_coef[4], 2, 10, 1));
591 *p_table ++ = ( 0 << 26 | //reserved
592 intel_format_convert(tran_coef[7], 2, 10, 1) << 13 |
593 intel_format_convert(tran_coef[6], 2, 10, 1));
595 *p_table ++ = ( 0 << 13 | //reserved
596 intel_format_convert(tran_coef[8], 2, 10, 1));
598 *p_table ++ = ( 0 << 22 | //reserved
599 intel_format_convert(u_coef[0], 10, 0, 1) << 11 |
600 intel_format_convert(v_coef[0], 10, 0, 1));
602 *p_table ++ = ( 0 << 22 | //reserved
603 intel_format_convert(u_coef[1], 10, 0, 1) << 11 |
604 intel_format_convert(v_coef[1], 10, 0, 1));
606 *p_table ++ = ( 0 << 22 | //reserved
607 intel_format_convert(u_coef[2], 10, 0, 1) << 11 |
608 intel_format_convert(v_coef[2], 10, 0, 1));
612 void hsw_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
614 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 252);
615 // VAProcFilterParameterBuffer * tcc_param =
616 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
618 if(!(proc_ctx->filters_mask & VPP_IECP_AOI)){
619 memset(p_table, 0, 3 * 4);
621 *p_table ++ = 0x00000000;
622 *p_table ++ = 0x00030000;
623 *p_table ++ = 0x00030000;
627 void hsw_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
629 if(proc_ctx->filters_mask & VPP_DNDI_MASK) {
630 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
631 dri_bo_map(dndi_bo, 1);
632 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
634 hsw_veb_dndi_table(ctx, proc_ctx);
636 dri_bo_unmap(dndi_bo);
639 if(proc_ctx->filters_mask & VPP_IECP_MASK) {
640 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
641 dri_bo_map(iecp_bo, 1);
642 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
644 hsw_veb_iecp_std_table(ctx, proc_ctx);
645 hsw_veb_iecp_ace_table(ctx, proc_ctx);
646 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
647 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
648 hsw_veb_iecp_csc_table(ctx, proc_ctx);
649 hsw_veb_iecp_aoi_table(ctx, proc_ctx);
651 dri_bo_unmap(iecp_bo);
655 void hsw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
657 struct intel_batchbuffer *batch = proc_ctx->batch;
658 unsigned int is_dn_enabled = !!(proc_ctx->filters_mask & VPP_DNDI_DN);
659 unsigned int is_di_enabled = !!(proc_ctx->filters_mask & VPP_DNDI_DI);
660 unsigned int is_iecp_enabled = !!(proc_ctx->filters_mask & VPP_IECP_MASK);
661 unsigned int is_first_frame = !!((proc_ctx->frame_order == -1) &&
664 unsigned int di_output_frames_flag = 2; /* Output Current Frame Only */
666 if(proc_ctx->fourcc_input != proc_ctx->fourcc_output ||
667 (is_dn_enabled == 0 && is_di_enabled == 0)){
672 VAProcFilterParameterBufferDeinterlacing *di_param =
673 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
677 if (di_param->algorithm == VAProcDeinterlacingBob)
680 if ((di_param->algorithm == VAProcDeinterlacingMotionAdaptive ||
681 di_param->algorithm == VAProcDeinterlacingMotionCompensated) &&
682 proc_ctx->frame_order != -1)
683 di_output_frames_flag = 0; /* Output both Current Frame and Previous Frame */
686 BEGIN_VEB_BATCH(batch, 6);
687 OUT_VEB_BATCH(batch, VEB_STATE | (6 - 2));
689 0 << 26 | // state surface control bits
690 0 << 11 | // reserved.
691 0 << 10 | // pipe sync disable
692 di_output_frames_flag << 8 | // DI output frame
693 1 << 7 | // 444->422 downsample method
694 1 << 6 | // 422->420 downsample method
695 is_first_frame << 5 | // DN/DI first frame
696 is_di_enabled << 4 | // DI enable
697 is_dn_enabled << 3 | // DN enable
698 is_iecp_enabled << 2 | // global IECP enabled
699 0 << 1 | // ColorGamutCompressionEnable
700 0 ) ; // ColorGamutExpansionEnable.
703 proc_ctx->dndi_state_table.bo,
704 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
707 proc_ctx->iecp_state_table.bo,
708 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
711 proc_ctx->gamut_state_table.bo,
712 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
715 proc_ctx->vertex_state_table.bo,
716 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
718 ADVANCE_VEB_BATCH(batch);
721 void hsw_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
723 struct intel_batchbuffer *batch = proc_ctx->batch;
724 unsigned int u_offset_y = 0, v_offset_y = 0;
725 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
726 unsigned int surface_format = PLANAR_420_8;
727 struct object_surface* obj_surf = NULL;
728 unsigned int surface_pitch = 0;
729 unsigned int half_pitch_chroma = 0;
732 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
734 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
737 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
738 obj_surf->fourcc == VA_FOURCC_YUY2 ||
739 obj_surf->fourcc == VA_FOURCC_AYUV ||
740 obj_surf->fourcc == VA_FOURCC_RGBA);
742 if (obj_surf->fourcc == VA_FOURCC_NV12) {
743 surface_format = PLANAR_420_8;
744 surface_pitch = obj_surf->width;
745 is_uv_interleaved = 1;
746 half_pitch_chroma = 0;
747 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
748 surface_format = YCRCB_NORMAL;
749 surface_pitch = obj_surf->width * 2;
750 is_uv_interleaved = 0;
751 half_pitch_chroma = 0;
752 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
753 surface_format = PACKED_444A_8;
754 surface_pitch = obj_surf->width * 4;
755 is_uv_interleaved = 0;
756 half_pitch_chroma = 0;
757 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
758 surface_format = R8G8B8A8_UNORM_SRGB;
759 surface_pitch = obj_surf->width * 4;
760 is_uv_interleaved = 0;
761 half_pitch_chroma = 0;
764 u_offset_y = obj_surf->y_cb_offset;
765 v_offset_y = obj_surf->y_cr_offset;
767 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
769 BEGIN_VEB_BATCH(batch, 6);
770 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (6 - 2));
773 is_output); // surface indentification.
776 (obj_surf->height - 1) << 18 | // height . w3
777 (obj_surf->width -1 ) << 4 | // width
781 surface_format << 28 | // surface format, YCbCr420. w4
782 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
783 0 << 20 | // reserved
784 (surface_pitch - 1) << 3 | // surface pitch, 64 align
785 half_pitch_chroma << 2 | // half pitch for chrome
786 !!tiling << 1 | // tiled surface, linear surface used
787 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
790 0 << 29 | // reserved . w5
791 0 << 16 | // X offset for V(Cb)
792 0 << 15 | // reserved
793 u_offset_y); // Y offset for V(Cb)
796 0 << 29 | // reserved . w6
797 0 << 16 | // X offset for V(Cr)
798 0 << 15 | // reserved
799 v_offset_y ); // Y offset for V(Cr)
801 ADVANCE_VEB_BATCH(batch);
804 void hsw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
806 struct intel_batchbuffer *batch = proc_ctx->batch;
807 unsigned char frame_ctrl_bits = 0;
808 const unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
810 /* s1:update the previous and current input */
811 /* tempFrame = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
812 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_IN_CURRENT]; ;
813 proc_ctx->frame_store[FRAME_IN_CURRENT] = tempFrame;
815 if(proc_ctx->surface_input_vebox != -1){
816 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
817 proc_ctx->surface_input_vebox);
819 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
820 proc_ctx->surface_input);
823 /*s2: update the STMM input and output */
824 /* tempFrame = proc_ctx->frame_store[FRAME_IN_STMM];
825 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM]; ;
826 proc_ctx->frame_store[FRAME_OUT_STMM] = tempFrame;
828 /*s3:set reloc buffer address */
829 BEGIN_VEB_BATCH(batch, 10);
830 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (10 - 2));
831 OUT_VEB_BATCH(batch, (width64 - 1));
833 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
834 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
836 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
837 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
839 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
840 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
842 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
843 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
845 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
846 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
848 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
849 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
851 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
852 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
854 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
855 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
857 ADVANCE_VEB_BATCH(batch);
860 void hsw_veb_resource_prepare(VADriverContextP ctx,
861 struct intel_vebox_context *proc_ctx)
865 struct i965_driver_data *i965 = i965_driver_data(ctx);
866 unsigned int input_fourcc, output_fourcc;
867 unsigned int input_sampling, output_sampling;
868 unsigned int input_tiling, output_tiling;
869 unsigned int i, swizzle;
870 struct object_surface *obj_surf_out = NULL, *obj_surf_in = NULL;
872 if (proc_ctx->surface_input_vebox_object != NULL) {
873 obj_surf_in = proc_ctx->surface_input_vebox_object;
875 obj_surf_in = proc_ctx->surface_input_object;
878 if (proc_ctx->surface_output_vebox_object != NULL) {
879 obj_surf_out = proc_ctx->surface_output_vebox_object;
881 obj_surf_out = proc_ctx->surface_output_object;
884 if(obj_surf_in->bo == NULL){
885 input_fourcc = VA_FOURCC_NV12;
886 input_sampling = SUBSAMPLE_YUV420;
888 i965_check_alloc_surface_bo(ctx, obj_surf_in, input_tiling, input_fourcc, input_sampling);
890 input_fourcc = obj_surf_in->fourcc;
891 input_sampling = obj_surf_in->subsampling;
892 dri_bo_get_tiling(obj_surf_in->bo, &input_tiling, &swizzle);
893 input_tiling = !!input_tiling;
896 if(obj_surf_out->bo == NULL){
897 output_fourcc = VA_FOURCC_NV12;
898 output_sampling = SUBSAMPLE_YUV420;
900 i965_check_alloc_surface_bo(ctx, obj_surf_out, output_tiling, output_fourcc, output_sampling);
902 output_fourcc = obj_surf_out->fourcc;
903 output_sampling = obj_surf_out->subsampling;
904 dri_bo_get_tiling(obj_surf_out->bo, &output_tiling, &swizzle);
905 output_tiling = !!output_tiling;
908 /* vebox pipelien input surface format info */
909 proc_ctx->fourcc_input = input_fourcc;
910 proc_ctx->fourcc_output = output_fourcc;
912 /* create pipeline surfaces */
913 for(i = 0; i < FRAME_STORE_SUM; i ++) {
914 if(proc_ctx->frame_store[i].obj_surface){
915 continue; //refer external surface for vebox pipeline
918 VASurfaceID new_surface;
919 struct object_surface *obj_surf = NULL;
921 va_status = i965_CreateSurfaces(ctx,
922 proc_ctx ->width_input,
923 proc_ctx ->height_input,
927 assert(va_status == VA_STATUS_SUCCESS);
929 obj_surf = SURFACE(new_surface);
932 if( i <= FRAME_IN_PREVIOUS || i == FRAME_OUT_CURRENT_DN) {
933 i965_check_alloc_surface_bo(ctx, obj_surf, input_tiling, input_fourcc, input_sampling);
934 } else if( i == FRAME_IN_STMM || i == FRAME_OUT_STMM){
935 i965_check_alloc_surface_bo(ctx, obj_surf, 1, input_fourcc, input_sampling);
936 } else if( i >= FRAME_OUT_CURRENT){
937 i965_check_alloc_surface_bo(ctx, obj_surf, output_tiling, output_fourcc, output_sampling);
940 proc_ctx->frame_store[i].surface_id = new_surface;
941 proc_ctx->frame_store[i].is_internal_surface = 1;
942 proc_ctx->frame_store[i].obj_surface = obj_surf;
945 /* alloc dndi state table */
946 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
947 bo = dri_bo_alloc(i965->intel.bufmgr,
948 "vebox: dndi state Buffer",
950 proc_ctx->dndi_state_table.bo = bo;
951 dri_bo_reference(proc_ctx->dndi_state_table.bo);
953 /* alloc iecp state table */
954 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
955 bo = dri_bo_alloc(i965->intel.bufmgr,
956 "vebox: iecp state Buffer",
958 proc_ctx->iecp_state_table.bo = bo;
959 dri_bo_reference(proc_ctx->iecp_state_table.bo);
961 /* alloc gamut state table */
962 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
963 bo = dri_bo_alloc(i965->intel.bufmgr,
964 "vebox: gamut state Buffer",
966 proc_ctx->gamut_state_table.bo = bo;
967 dri_bo_reference(proc_ctx->gamut_state_table.bo);
969 /* alloc vertex state table */
970 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
971 bo = dri_bo_alloc(i965->intel.bufmgr,
972 "vertex: iecp state Buffer",
974 proc_ctx->vertex_state_table.bo = bo;
975 dri_bo_reference(proc_ctx->vertex_state_table.bo);
980 hsw_veb_surface_reference(VADriverContextP ctx,
981 struct intel_vebox_context *proc_ctx)
983 struct object_surface * obj_surf;
984 VEBFrameStore tmp_store;
986 if (proc_ctx->surface_input_vebox_object != NULL) {
987 obj_surf = proc_ctx->surface_input_vebox_object;
989 obj_surf = proc_ctx->surface_input_object;
992 /* update the input surface */
993 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = VA_INVALID_ID;
994 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
995 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface = obj_surf;
997 /* update the previous input surface */
998 if (proc_ctx->frame_order != -1) {
999 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
1000 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_OUT_CURRENT_DN];
1001 } else if (proc_ctx->filters_mask & VPP_DNDI_DI) {
1002 VAProcFilterParameterBufferDeinterlacing *di_param =
1003 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
1006 (di_param->algorithm == VAProcDeinterlacingMotionAdaptive ||
1007 di_param->algorithm == VAProcDeinterlacingMotionCompensated)) {
1008 if ((proc_ctx->filters_mask & VPP_DNDI_DN) &&
1009 proc_ctx->frame_order == 0) { /* DNDI */
1010 tmp_store = proc_ctx->frame_store[FRAME_OUT_CURRENT_DN];
1011 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN] = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
1012 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = tmp_store;
1013 } else { /* DI only */
1014 VAProcPipelineParameterBuffer *pipe = proc_ctx->pipeline_param;
1015 struct object_surface *obj_surf = NULL;
1016 struct i965_driver_data * const i965 = i965_driver_data(ctx);
1019 !pipe->num_forward_references ||
1020 pipe->forward_references[0] == VA_INVALID_ID) {
1021 WARN_ONCE("A forward temporal reference is needed for Motion adaptive/compensated deinterlacing !!!\n");
1023 return VA_STATUS_ERROR_INVALID_PARAMETER;
1026 obj_surf = SURFACE(pipe->forward_references[0]);
1027 assert(obj_surf && obj_surf->bo);
1029 proc_ctx->frame_store[FRAME_IN_PREVIOUS].surface_id = pipe->forward_references[0];
1030 proc_ctx->frame_store[FRAME_IN_PREVIOUS].is_internal_surface = 0;
1031 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface = obj_surf;
1037 /* update STMM surface */
1038 if (proc_ctx->frame_order != -1) {
1039 tmp_store = proc_ctx->frame_store[FRAME_IN_STMM];
1040 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM];
1041 proc_ctx->frame_store[FRAME_OUT_STMM] = tmp_store;
1044 /* update the output surface */
1045 if (proc_ctx->surface_output_vebox_object != NULL) {
1046 obj_surf = proc_ctx->surface_output_vebox_object;
1048 obj_surf = proc_ctx->surface_output_object;
1051 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
1052 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = VA_INVALID_ID;
1053 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
1054 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface = obj_surf;
1055 proc_ctx->current_output = FRAME_OUT_CURRENT_DN;
1056 } else if (proc_ctx->filters_mask & VPP_DNDI_DI) {
1057 VAProcFilterParameterBufferDeinterlacing *di_param =
1058 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
1061 (di_param->algorithm == VAProcDeinterlacingMotionAdaptive ||
1062 di_param->algorithm == VAProcDeinterlacingMotionCompensated)) {
1063 if (proc_ctx->frame_order == -1) {
1064 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
1065 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
1066 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = obj_surf;
1067 proc_ctx->current_output = FRAME_OUT_CURRENT;
1068 } else if (proc_ctx->frame_order == 0) {
1069 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].surface_id = VA_INVALID_ID;
1070 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].is_internal_surface = 0;
1071 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface = obj_surf;
1072 proc_ctx->current_output = FRAME_OUT_PREVIOUS;
1074 proc_ctx->current_output = FRAME_OUT_CURRENT;
1075 proc_ctx->format_convert_flags |= POST_COPY_CONVERT;
1078 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
1079 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
1080 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = obj_surf;
1081 proc_ctx->current_output = FRAME_OUT_CURRENT;
1084 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
1085 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
1086 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = obj_surf;
1087 proc_ctx->current_output = FRAME_OUT_CURRENT;
1090 return VA_STATUS_SUCCESS;
1093 void hsw_veb_surface_unreference(VADriverContextP ctx,
1094 struct intel_vebox_context *proc_ctx)
1096 /* unreference the input surface */
1097 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = VA_INVALID_ID;
1098 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
1099 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface = NULL;
1101 /* unreference the shared output surface */
1102 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
1103 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = VA_INVALID_ID;
1104 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
1105 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface = NULL;
1107 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
1108 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
1109 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = NULL;
1113 int hsw_veb_pre_format_convert(VADriverContextP ctx,
1114 struct intel_vebox_context *proc_ctx)
1117 struct i965_driver_data *i965 = i965_driver_data(ctx);
1118 struct object_surface* obj_surf_input = proc_ctx->surface_input_object;
1119 struct object_surface* obj_surf_output = proc_ctx->surface_output_object;
1120 struct object_surface* obj_surf_input_vebox;
1121 struct object_surface* obj_surf_output_vebox;
1123 proc_ctx->format_convert_flags = 0;
1125 proc_ctx->width_input = obj_surf_input->orig_width;
1126 proc_ctx->height_input = obj_surf_input->orig_height;
1127 proc_ctx->width_output = obj_surf_output->orig_width;
1128 proc_ctx->height_output = obj_surf_output->orig_height;
1130 /* only partial frame is not supported to be processed */
1132 assert(proc_ctx->width_input == proc_ctx->pipeline_param->surface_region->width);
1133 assert(proc_ctx->height_input == proc_ctx->pipeline_param->surface_region->height);
1134 assert(proc_ctx->width_output == proc_ctx->pipeline_param->output_region->width);
1135 assert(proc_ctx->height_output == proc_ctx->pipeline_param->output_region->height);
1138 if(proc_ctx->width_output != proc_ctx->width_input ||
1139 proc_ctx->height_output != proc_ctx->height_input){
1140 proc_ctx->format_convert_flags |= POST_SCALING_CONVERT;
1143 /* convert the following format to NV12 format */
1144 if(obj_surf_input->fourcc == VA_FOURCC_YV12 ||
1145 obj_surf_input->fourcc == VA_FOURCC_I420 ||
1146 obj_surf_input->fourcc == VA_FOURCC_IMC1 ||
1147 obj_surf_input->fourcc == VA_FOURCC_IMC3 ||
1148 obj_surf_input->fourcc == VA_FOURCC_RGBA){
1150 proc_ctx->format_convert_flags |= PRE_FORMAT_CONVERT;
1152 } else if(obj_surf_input->fourcc == VA_FOURCC_AYUV ||
1153 obj_surf_input->fourcc == VA_FOURCC_YUY2 ||
1154 obj_surf_input->fourcc == VA_FOURCC_NV12){
1155 // nothing to do here
1157 /* not support other format as input */
1161 if (proc_ctx->format_convert_flags & PRE_FORMAT_CONVERT) {
1162 if(proc_ctx->surface_input_vebox_object == NULL){
1163 va_status = i965_CreateSurfaces(ctx,
1164 proc_ctx->width_input,
1165 proc_ctx->height_input,
1166 VA_RT_FORMAT_YUV420,
1168 &(proc_ctx->surface_input_vebox));
1169 assert(va_status == VA_STATUS_SUCCESS);
1170 obj_surf_input_vebox = SURFACE(proc_ctx->surface_input_vebox);
1171 assert(obj_surf_input_vebox);
1173 if (obj_surf_input_vebox) {
1174 proc_ctx->surface_input_vebox_object = obj_surf_input_vebox;
1175 i965_check_alloc_surface_bo(ctx, obj_surf_input_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1179 vpp_surface_convert(ctx, proc_ctx->surface_input_vebox_object, proc_ctx->surface_input_object);
1182 /* create one temporary NV12 surfaces for conversion*/
1183 if(obj_surf_output->fourcc == VA_FOURCC_YV12 ||
1184 obj_surf_output->fourcc == VA_FOURCC_I420 ||
1185 obj_surf_output->fourcc == VA_FOURCC_IMC1 ||
1186 obj_surf_output->fourcc == VA_FOURCC_IMC3 ||
1187 obj_surf_output->fourcc == VA_FOURCC_RGBA) {
1189 proc_ctx->format_convert_flags |= POST_FORMAT_CONVERT;
1190 } else if(obj_surf_output->fourcc == VA_FOURCC_AYUV ||
1191 obj_surf_output->fourcc == VA_FOURCC_YUY2 ||
1192 obj_surf_output->fourcc == VA_FOURCC_NV12){
1193 /* Nothing to do here */
1195 /* not support other format as input */
1199 if(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT ||
1200 proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1201 if(proc_ctx->surface_output_vebox_object == NULL){
1202 va_status = i965_CreateSurfaces(ctx,
1203 proc_ctx->width_input,
1204 proc_ctx->height_input,
1205 VA_RT_FORMAT_YUV420,
1207 &(proc_ctx->surface_output_vebox));
1208 assert(va_status == VA_STATUS_SUCCESS);
1209 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_vebox);
1210 assert(obj_surf_output_vebox);
1212 if (obj_surf_output_vebox) {
1213 proc_ctx->surface_output_vebox_object = obj_surf_output_vebox;
1214 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1219 if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1220 if(proc_ctx->surface_output_scaled_object == NULL){
1221 va_status = i965_CreateSurfaces(ctx,
1222 proc_ctx->width_output,
1223 proc_ctx->height_output,
1224 VA_RT_FORMAT_YUV420,
1226 &(proc_ctx->surface_output_scaled));
1227 assert(va_status == VA_STATUS_SUCCESS);
1228 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_scaled);
1229 assert(obj_surf_output_vebox);
1231 if (obj_surf_output_vebox) {
1232 proc_ctx->surface_output_scaled_object = obj_surf_output_vebox;
1233 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1241 int hsw_veb_post_format_convert(VADriverContextP ctx,
1242 struct intel_vebox_context *proc_ctx)
1244 struct object_surface *obj_surface = NULL;
1246 obj_surface = proc_ctx->frame_store[proc_ctx->current_output].obj_surface;
1248 if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1249 /* copy the saved frame in the second call */
1250 vpp_surface_convert(ctx,proc_ctx->surface_output_object, obj_surface);
1251 } else if(!(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1252 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1253 /* Output surface format is covered by vebox pipeline and
1254 * processed picture is already store in output surface
1255 * so nothing will be done here */
1256 } else if ((proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1257 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1258 /* convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1259 vpp_surface_convert(ctx,proc_ctx->surface_output_object, obj_surface);
1261 } else if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1262 /* scaling, convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1263 assert(obj_surface->fourcc == VA_FOURCC_NV12);
1265 /* first step :surface scaling */
1266 vpp_surface_scaling(ctx,proc_ctx->surface_output_scaled_object, obj_surface);
1268 /* second step: color format convert and copy to output */
1269 obj_surface = proc_ctx->surface_output_object;
1271 if(obj_surface->fourcc == VA_FOURCC_NV12 ||
1272 obj_surface->fourcc == VA_FOURCC_YV12 ||
1273 obj_surface->fourcc == VA_FOURCC_I420 ||
1274 obj_surface->fourcc == VA_FOURCC_YUY2 ||
1275 obj_surface->fourcc == VA_FOURCC_IMC1 ||
1276 obj_surface->fourcc == VA_FOURCC_IMC3 ||
1277 obj_surface->fourcc == VA_FOURCC_RGBA) {
1278 vpp_surface_convert(ctx, proc_ctx->surface_output_object, proc_ctx->surface_output_scaled_object);
1287 VAStatus gen75_vebox_process_picture(VADriverContextP ctx,
1288 struct intel_vebox_context *proc_ctx)
1290 struct i965_driver_data *i965 = i965_driver_data(ctx);
1292 VAProcPipelineParameterBuffer *pipe = proc_ctx->pipeline_param;
1293 VAProcFilterParameterBuffer* filter = NULL;
1294 struct object_buffer *obj_buf = NULL;
1297 for (i = 0; i < pipe->num_filters; i ++) {
1298 obj_buf = BUFFER(pipe->filters[i]);
1300 assert(obj_buf && obj_buf->buffer_store);
1302 if (!obj_buf || !obj_buf->buffer_store)
1305 filter = (VAProcFilterParameterBuffer*)obj_buf-> buffer_store->buffer;
1307 if (filter->type == VAProcFilterNoiseReduction) {
1308 proc_ctx->filters_mask |= VPP_DNDI_DN;
1309 proc_ctx->filter_dn = filter;
1310 } else if (filter->type == VAProcFilterDeinterlacing) {
1311 proc_ctx->filters_mask |= VPP_DNDI_DI;
1312 proc_ctx->filter_di = filter;
1313 } else if (filter->type == VAProcFilterColorBalance) {
1314 proc_ctx->filters_mask |= VPP_IECP_PRO_AMP;
1315 proc_ctx->filter_iecp_amp = filter;
1316 proc_ctx->filter_iecp_amp_num_elements = obj_buf->num_elements;
1317 } else if (filter->type == VAProcFilterSkinToneEnhancement) {
1318 proc_ctx->filters_mask |= VPP_IECP_STD_STE;
1319 proc_ctx->filter_iecp_std = filter;
1323 hsw_veb_pre_format_convert(ctx, proc_ctx);
1324 hsw_veb_surface_reference(ctx, proc_ctx);
1326 if (proc_ctx->frame_order == -1) {
1327 hsw_veb_resource_prepare(ctx, proc_ctx);
1330 if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1331 assert(proc_ctx->frame_order == 1);
1332 /* directly copy the saved frame in the second call */
1334 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1335 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1336 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1337 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1338 hsw_veb_state_table_setup(ctx, proc_ctx);
1340 hsw_veb_state_command(ctx, proc_ctx);
1341 hsw_veb_dndi_iecp_command(ctx, proc_ctx);
1342 intel_batchbuffer_end_atomic(proc_ctx->batch);
1343 intel_batchbuffer_flush(proc_ctx->batch);
1346 hsw_veb_post_format_convert(ctx, proc_ctx);
1347 // hsw_veb_surface_unreference(ctx, proc_ctx);
1349 proc_ctx->frame_order = (proc_ctx->frame_order + 1) % 2;
1351 return VA_STATUS_SUCCESS;
1354 return VA_STATUS_ERROR_INVALID_PARAMETER;
1357 void gen75_vebox_context_destroy(VADriverContextP ctx,
1358 struct intel_vebox_context *proc_ctx)
1362 if(proc_ctx->surface_input_vebox != VA_INVALID_ID){
1363 i965_DestroySurfaces(ctx, &proc_ctx->surface_input_vebox, 1);
1364 proc_ctx->surface_input_vebox = VA_INVALID_ID;
1365 proc_ctx->surface_input_vebox_object = NULL;
1368 if(proc_ctx->surface_output_vebox != VA_INVALID_ID){
1369 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_vebox, 1);
1370 proc_ctx->surface_output_vebox = VA_INVALID_ID;
1371 proc_ctx->surface_output_vebox_object = NULL;
1374 if(proc_ctx->surface_output_scaled != VA_INVALID_ID){
1375 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_scaled, 1);
1376 proc_ctx->surface_output_scaled = VA_INVALID_ID;
1377 proc_ctx->surface_output_scaled_object = NULL;
1380 for(i = 0; i < FRAME_STORE_SUM; i ++) {
1381 if (proc_ctx->frame_store[i].is_internal_surface == 1) {
1382 assert(proc_ctx->frame_store[i].surface_id != VA_INVALID_ID);
1384 if (proc_ctx->frame_store[i].surface_id != VA_INVALID_ID)
1385 i965_DestroySurfaces(ctx, &proc_ctx->frame_store[i].surface_id, 1);
1388 proc_ctx->frame_store[i].surface_id = VA_INVALID_ID;
1389 proc_ctx->frame_store[i].is_internal_surface = 0;
1390 proc_ctx->frame_store[i].obj_surface = NULL;
1393 /* dndi state table */
1394 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
1395 proc_ctx->dndi_state_table.bo = NULL;
1397 /* iecp state table */
1398 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
1399 proc_ctx->dndi_state_table.bo = NULL;
1401 /* gamut statu table */
1402 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
1403 proc_ctx->gamut_state_table.bo = NULL;
1405 /* vertex state table */
1406 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
1407 proc_ctx->vertex_state_table.bo = NULL;
1409 intel_batchbuffer_free(proc_ctx->batch);
1414 struct intel_vebox_context * gen75_vebox_context_init(VADriverContextP ctx)
1416 struct intel_driver_data *intel = intel_driver_data(ctx);
1417 struct intel_vebox_context *proc_context = calloc(1, sizeof(struct intel_vebox_context));
1420 proc_context->batch = intel_batchbuffer_new(intel, I915_EXEC_VEBOX, 0);
1421 memset(proc_context->frame_store, 0, sizeof(VEBFrameStore)*FRAME_STORE_SUM);
1423 for (i = 0; i < FRAME_STORE_SUM; i ++) {
1424 proc_context->frame_store[i].surface_id = VA_INVALID_ID;
1425 proc_context->frame_store[i].is_internal_surface = 0;
1426 proc_context->frame_store[i].obj_surface = NULL;
1429 proc_context->filters_mask = 0;
1430 proc_context->frame_order = -1; /* the first frame */
1431 proc_context->surface_output_object = NULL;
1432 proc_context->surface_input_object = NULL;
1433 proc_context->surface_input_vebox = VA_INVALID_ID;
1434 proc_context->surface_input_vebox_object = NULL;
1435 proc_context->surface_output_vebox = VA_INVALID_ID;
1436 proc_context->surface_output_vebox_object = NULL;
1437 proc_context->surface_output_scaled = VA_INVALID_ID;
1438 proc_context->surface_output_scaled_object = NULL;
1439 proc_context->filters_mask = 0;
1440 proc_context->format_convert_flags = 0;
1442 return proc_context;
1445 void bdw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1447 struct intel_batchbuffer *batch = proc_ctx->batch;
1448 unsigned int is_dn_enabled = !!(proc_ctx->filters_mask & VPP_DNDI_DN);
1449 unsigned int is_di_enabled = !!(proc_ctx->filters_mask & VPP_DNDI_DI);
1450 unsigned int is_iecp_enabled = !!(proc_ctx->filters_mask & VPP_IECP_MASK);
1451 unsigned int is_first_frame = !!((proc_ctx->frame_order == -1) &&
1454 unsigned int di_output_frames_flag = 2; /* Output Current Frame Only */
1456 if(proc_ctx->fourcc_input != proc_ctx->fourcc_output ||
1457 (is_dn_enabled == 0 && is_di_enabled == 0)){
1458 is_iecp_enabled = 1;
1461 if (is_di_enabled) {
1462 VAProcFilterParameterBufferDeinterlacing *di_param =
1463 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
1467 if (di_param->algorithm == VAProcDeinterlacingBob)
1470 if ((di_param->algorithm == VAProcDeinterlacingMotionAdaptive ||
1471 di_param->algorithm == VAProcDeinterlacingMotionCompensated) &&
1472 proc_ctx->frame_order != -1)
1473 di_output_frames_flag = 0; /* Output both Current Frame and Previous Frame */
1476 BEGIN_VEB_BATCH(batch, 0xc);
1477 OUT_VEB_BATCH(batch, VEB_STATE | (0xc - 2));
1478 OUT_VEB_BATCH(batch,
1479 0 << 25 | // state surface control bits
1480 0 << 23 | // reserved.
1481 0 << 22 | // gamut expansion position
1482 0 << 15 | // reserved.
1483 0 << 14 | // single slice vebox enable
1484 0 << 13 | // hot pixel filter enable
1485 0 << 12 | // alpha plane enable
1486 0 << 11 | // vignette enable
1487 0 << 10 | // demosaic enable
1488 di_output_frames_flag << 8 | // DI output frame
1489 1 << 7 | // 444->422 downsample method
1490 1 << 6 | // 422->420 downsample method
1491 is_first_frame << 5 | // DN/DI first frame
1492 is_di_enabled << 4 | // DI enable
1493 is_dn_enabled << 3 | // DN enable
1494 is_iecp_enabled << 2 | // global IECP enabled
1495 0 << 1 | // ColorGamutCompressionEnable
1496 0 ) ; // ColorGamutExpansionEnable.
1499 proc_ctx->dndi_state_table.bo,
1500 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1502 OUT_VEB_BATCH(batch, 0);
1505 proc_ctx->iecp_state_table.bo,
1506 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1508 OUT_VEB_BATCH(batch, 0);
1511 proc_ctx->gamut_state_table.bo,
1512 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1514 OUT_VEB_BATCH(batch, 0);
1517 proc_ctx->vertex_state_table.bo,
1518 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
1520 OUT_VEB_BATCH(batch, 0);
1522 OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/
1523 OUT_VEB_BATCH(batch, 0);
1525 ADVANCE_VEB_BATCH(batch);
1528 void bdw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
1530 struct intel_batchbuffer *batch = proc_ctx->batch;
1531 unsigned char frame_ctrl_bits = 0;
1532 const unsigned int width64 = ALIGN(proc_ctx->width_input, 64);
1534 BEGIN_VEB_BATCH(batch, 0x14);
1535 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (0x14 - 2));//DWord 0
1536 OUT_VEB_BATCH(batch, (width64 - 1));
1539 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
1540 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 2
1541 OUT_VEB_BATCH(batch,0);//DWord 3
1544 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
1545 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 4
1546 OUT_VEB_BATCH(batch,0);//DWord 5
1549 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
1550 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 6
1551 OUT_VEB_BATCH(batch,0);//DWord 7
1554 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
1555 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 8
1556 OUT_VEB_BATCH(batch,0);//DWord 9
1559 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
1560 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 10
1561 OUT_VEB_BATCH(batch,0);//DWord 11
1564 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
1565 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 12
1566 OUT_VEB_BATCH(batch,0);//DWord 13
1569 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
1570 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 14
1571 OUT_VEB_BATCH(batch,0);//DWord 15
1574 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
1575 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 16
1576 OUT_VEB_BATCH(batch,0);//DWord 17
1578 OUT_VEB_BATCH(batch,0);//DWord 18
1579 OUT_VEB_BATCH(batch,0);//DWord 19
1581 ADVANCE_VEB_BATCH(batch);
1584 VAStatus gen8_vebox_process_picture(VADriverContextP ctx,
1585 struct intel_vebox_context *proc_ctx)
1587 struct i965_driver_data *i965 = i965_driver_data(ctx);
1589 VAProcPipelineParameterBuffer *pipe = proc_ctx->pipeline_param;
1590 VAProcFilterParameterBuffer* filter = NULL;
1591 struct object_buffer *obj_buf = NULL;
1594 for (i = 0; i < pipe->num_filters; i ++) {
1595 obj_buf = BUFFER(pipe->filters[i]);
1597 assert(obj_buf && obj_buf->buffer_store);
1599 if (!obj_buf || !obj_buf->buffer_store)
1602 filter = (VAProcFilterParameterBuffer*)obj_buf-> buffer_store->buffer;
1604 if (filter->type == VAProcFilterNoiseReduction) {
1605 proc_ctx->filters_mask |= VPP_DNDI_DN;
1606 proc_ctx->filter_dn = filter;
1607 } else if (filter->type == VAProcFilterDeinterlacing) {
1608 proc_ctx->filters_mask |= VPP_DNDI_DI;
1609 proc_ctx->filter_di = filter;
1610 } else if (filter->type == VAProcFilterColorBalance) {
1611 proc_ctx->filters_mask |= VPP_IECP_PRO_AMP;
1612 proc_ctx->filter_iecp_amp = filter;
1613 proc_ctx->filter_iecp_amp_num_elements = obj_buf->num_elements;
1614 } else if (filter->type == VAProcFilterSkinToneEnhancement) {
1615 proc_ctx->filters_mask |= VPP_IECP_STD_STE;
1616 proc_ctx->filter_iecp_std = filter;
1620 hsw_veb_pre_format_convert(ctx, proc_ctx);
1621 hsw_veb_surface_reference(ctx, proc_ctx);
1623 if (proc_ctx->frame_order == -1) {
1624 hsw_veb_resource_prepare(ctx, proc_ctx);
1627 if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1628 assert(proc_ctx->frame_order == 1);
1629 /* directly copy the saved frame in the second call */
1631 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1632 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1633 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1634 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1635 hsw_veb_state_table_setup(ctx, proc_ctx);
1637 bdw_veb_state_command(ctx, proc_ctx);
1638 bdw_veb_dndi_iecp_command(ctx, proc_ctx);
1639 intel_batchbuffer_end_atomic(proc_ctx->batch);
1640 intel_batchbuffer_flush(proc_ctx->batch);
1643 hsw_veb_post_format_convert(ctx, proc_ctx);
1644 // hsw_veb_surface_unreference(ctx, proc_ctx);
1646 proc_ctx->frame_order = (proc_ctx->frame_order + 1) % 2;
1648 return VA_STATUS_SUCCESS;
1651 return VA_STATUS_ERROR_INVALID_PARAMETER;