2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Li Xiaowei <xiaowei.a.li@intel.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_driver.h"
36 #include "i965_defines.h"
37 #include "i965_structs.h"
38 #include "gen75_vpp_vebox.h"
39 #include "intel_media.h"
44 i965_MapBuffer(VADriverContextP ctx, VABufferID buf_id, void **);
47 i965_UnmapBuffer(VADriverContextP ctx, VABufferID buf_id);
50 i965_DeriveImage(VADriverContextP ctx, VABufferID surface, VAImage *out_image);
53 i965_DestroyImage(VADriverContextP ctx, VAImageID image);
56 i965_DestroySurfaces(VADriverContextP ctx,
57 VASurfaceID *surface_list,
61 i965_CreateSurfaces(VADriverContextP ctx,
66 VASurfaceID *surfaces);
68 VAStatus vpp_surface_convert(VADriverContextP ctx,
69 struct object_surface *src_obj_surf,
70 struct object_surface *dst_obj_surf)
72 VAStatus va_status = VA_STATUS_SUCCESS;
74 assert(src_obj_surf->orig_width == dst_obj_surf->orig_width);
75 assert(src_obj_surf->orig_height == dst_obj_surf->orig_height);
77 VARectangle src_rect, dst_rect;
78 src_rect.x = dst_rect.x = 0;
79 src_rect.y = dst_rect.y = 0;
80 src_rect.width = dst_rect.width = src_obj_surf->orig_width;
81 src_rect.height = dst_rect.height = dst_obj_surf->orig_height;
83 struct i965_surface src_surface, dst_surface;
84 src_surface.base = (struct object_base *)src_obj_surf;
85 src_surface.type = I965_SURFACE_TYPE_SURFACE;
86 src_surface.flags = I965_SURFACE_FLAG_FRAME;
88 dst_surface.base = (struct object_base *)dst_obj_surf;
89 dst_surface.type = I965_SURFACE_TYPE_SURFACE;
90 dst_surface.flags = I965_SURFACE_FLAG_FRAME;
92 va_status = i965_image_processing(ctx,
100 VAStatus vpp_surface_scaling(VADriverContextP ctx,
101 struct object_surface *dst_obj_surf,
102 struct object_surface *src_obj_surf)
104 VAStatus va_status = VA_STATUS_SUCCESS;
105 int flags = I965_PP_FLAG_AVS;
107 assert(src_obj_surf->fourcc == VA_FOURCC('N','V','1','2'));
108 assert(dst_obj_surf->fourcc == VA_FOURCC('N','V','1','2'));
110 VARectangle src_rect, dst_rect;
113 src_rect.width = src_obj_surf->orig_width;
114 src_rect.height = src_obj_surf->orig_height;
118 dst_rect.width = dst_obj_surf->orig_width;
119 dst_rect.height = dst_obj_surf->orig_height;
121 va_status = i965_scaling_processing(ctx,
131 void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
133 unsigned int* p_table ;
134 int progressive_dn = 1;
135 int dndi_top_first = 0;
136 int motion_compensated_enable = 0;
138 if (proc_ctx->filters_mask & VPP_DNDI_DI) {
139 VAProcFilterParameterBufferDeinterlacing *di_param =
140 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
144 dndi_top_first = !(di_param->flags & VA_DEINTERLACING_BOTTOM_FIELD);
145 motion_compensated_enable = (di_param->algorithm == VAProcDeinterlacingMotionCompensated);
149 VAProcFilterParameterBufferDeinterlacing *di_param =
150 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
152 VAProcFilterParameterBuffer * dn_param =
153 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
155 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
157 *p_table ++ = 0; // reserved . w0
158 *p_table ++ = ( 140 << 24 | // denoise STAD threshold . w1
159 192 << 16 | // dnmh_history_max
160 0 << 12 | // reserved
161 7 << 8 | // dnmh_delta[3:0]
162 38 ); // denoise ASD threshold
164 *p_table ++ = ( 0 << 30 | // reserved . w2
165 0 << 24 | // temporal diff th
166 0 << 22 | // reserved.
167 0 << 16 | // low temporal diff th
169 1 << 8 | // denoise moving pixel th
170 38 ); // denoise th for sum of complexity measure
172 *p_table ++ = ( 0 << 30 | // reserved . w3
173 12<< 24 | // good neighbor th[5:0]
174 9 << 20 | // CAT slope minus 1
175 5 << 16 | // SAD Tight in
176 0 << 14 | // smooth mv th
177 0 << 12 | // reserved
178 1 << 8 | // bne_edge_th[3:0]
179 20 ); // block noise estimate noise th
181 *p_table ++ = ( 0 << 31 | // STMM blending constant select. w4
182 64 << 24 | // STMM trc1
183 125<< 16 | // STMM trc2
184 0 << 14 | // reserved
185 30 << 8 | // VECM_mul
186 150 ); // maximum STMM
188 *p_table ++ = ( 118<< 24 | // minumum STMM . W5
189 0 << 22 | // STMM shift down
190 1 << 20 | // STMM shift up
191 5 << 16 | // STMM output shift
192 100 << 8 | // SDI threshold
195 *p_table ++ = ( 50 << 24 | // SDI fallback mode 1 T1 constant . W6
196 100 << 16 | // SDI fallback mode 1 T2 constant
197 37 << 8 | // SDI fallback mode 2 constant(angle2x1)
198 175 ); // FMD temporal difference threshold
200 *p_table ++ = ( 16 << 24 | // FMD #1 vertical difference th . w7
201 100<< 16 | // FMD #2 vertical difference th
203 2 << 8 | // FMD tear threshold
204 motion_compensated_enable << 7 | // MCDI Enable, use motion compensated deinterlace algorithm
205 progressive_dn << 6 | // progressive DN
207 dndi_top_first << 3 | // DN/DI Top First
210 *p_table ++ = ( 0 << 29 | // reserved . W8
211 32 << 23 | // dnmh_history_init[5:0]
212 10 << 19 | // neighborPixel th
213 0 << 18 | // reserved
214 0 << 16 | // FMD for 2nd field of previous frame
215 25 << 10 | // MC pixel consistency th
216 0 << 8 | // FMD for 1st field for current frame
220 *p_table ++ = ( 0 << 24 | // reserved
221 140<< 16 | // chr_dnmh_stad_th
222 0 << 13 | // reserved
223 1 << 12 | // chrome denoise enable
224 13 << 6 | // chr temp diff th
225 7 ); // chr temp diff low
229 void hsw_veb_iecp_std_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
231 unsigned int *p_table = proc_ctx->iecp_state_table.ptr + 0 ;
232 //VAProcFilterParameterBuffer * std_param =
233 // (VAProcFilterParameterBuffer *) proc_ctx->filter_std;
235 if(!(proc_ctx->filters_mask & VPP_IECP_STD_STE)){
236 memset(p_table, 0, 29 * 4);
238 *p_table ++ = 0x9a6e39f0;
239 *p_table ++ = 0x400c0000;
240 *p_table ++ = 0x00001180;
241 *p_table ++ = 0xfe2f2e00;
242 *p_table ++ = 0x000000ff;
244 *p_table ++ = 0x00140000;
245 *p_table ++ = 0xd82e0000;
246 *p_table ++ = 0x8285ecec;
247 *p_table ++ = 0x00008282;
248 *p_table ++ = 0x00000000;
250 *p_table ++ = 0x02117000;
251 *p_table ++ = 0xa38fec96;
252 *p_table ++ = 0x0000c8c8;
253 *p_table ++ = 0x00000000;
254 *p_table ++ = 0x01478000;
256 *p_table ++ = 0x0007c306;
257 *p_table ++ = 0x00000000;
258 *p_table ++ = 0x00000000;
259 *p_table ++ = 0x1c1bd000;
260 *p_table ++ = 0x00000000;
262 *p_table ++ = 0x00000000;
263 *p_table ++ = 0x00000000;
264 *p_table ++ = 0x0007cf80;
265 *p_table ++ = 0x00000000;
266 *p_table ++ = 0x00000000;
268 *p_table ++ = 0x1c080000;
269 *p_table ++ = 0x00000000;
270 *p_table ++ = 0x00000000;
271 *p_table ++ = 0x00000000;
275 void hsw_veb_iecp_ace_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
277 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 116);
279 if(!(proc_ctx->filters_mask & VPP_IECP_ACE)){
280 memset(p_table, 0, 13 * 4);
282 *p_table ++ = 0x00000068;
283 *p_table ++ = 0x4c382410;
284 *p_table ++ = 0x9c887460;
285 *p_table ++ = 0xebd8c4b0;
286 *p_table ++ = 0x604c3824;
288 *p_table ++ = 0xb09c8874;
289 *p_table ++ = 0x0000d8c4;
290 *p_table ++ = 0x00000000;
291 *p_table ++ = 0x00000000;
292 *p_table ++ = 0x00000000;
294 *p_table ++ = 0x00000000;
295 *p_table ++ = 0x00000000;
296 *p_table ++ = 0x00000000;
300 void hsw_veb_iecp_tcc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
302 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 168);
303 // VAProcFilterParameterBuffer * tcc_param =
304 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
306 if(!(proc_ctx->filters_mask & VPP_IECP_TCC)){
307 memset(p_table, 0, 11 * 4);
309 *p_table ++ = 0x00000000;
310 *p_table ++ = 0x00000000;
311 *p_table ++ = 0x1e34cc91;
312 *p_table ++ = 0x3e3cce91;
313 *p_table ++ = 0x02e80195;
315 *p_table ++ = 0x0197046b;
316 *p_table ++ = 0x01790174;
317 *p_table ++ = 0x00000000;
318 *p_table ++ = 0x00000000;
319 *p_table ++ = 0x03030000;
321 *p_table ++ = 0x009201c0;
325 void hsw_veb_iecp_pro_amp_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
327 unsigned int contrast = 0x80; //default
328 int brightness = 0x00; //default
329 int cos_c_s = 256 ; //default
330 int sin_c_s = 0; //default
331 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 212);
333 if(!(proc_ctx->filters_mask & VPP_IECP_PRO_AMP)){
334 memset(p_table, 0, 2 * 4);
336 float src_saturation = 1.0;
338 float src_contrast = 1.0;
339 float src_brightness = 0.0;
340 float tmp_value = 0.0;
343 VAProcFilterParameterBufferColorBalance * amp_params =
344 (VAProcFilterParameterBufferColorBalance *) proc_ctx->filter_iecp_amp;
346 for (i = 0; i < proc_ctx->filter_iecp_amp_num_elements; i++){
347 VAProcColorBalanceType attrib = amp_params[i].attrib;
349 if(attrib == VAProcColorBalanceHue) {
350 src_hue = amp_params[i].value; //(-180.0, 180.0)
351 }else if(attrib == VAProcColorBalanceSaturation) {
352 src_saturation = amp_params[i].value; //(0.0, 10.0)
353 }else if(attrib == VAProcColorBalanceBrightness) {
354 src_brightness = amp_params[i].value; // (-100.0, 100.0)
355 brightness = intel_format_convert(src_brightness, 7, 4, 1);
356 }else if(attrib == VAProcColorBalanceContrast) {
357 src_contrast = amp_params[i].value; // (0.0, 10.0)
358 contrast = intel_format_convert(src_contrast, 4, 7, 0);
362 tmp_value = cos(src_hue/180*PI) * src_contrast * src_saturation;
363 cos_c_s = intel_format_convert(tmp_value, 7, 8, 1);
365 tmp_value = sin(src_hue/180*PI) * src_contrast * src_saturation;
366 sin_c_s = intel_format_convert(tmp_value, 7, 8, 1);
368 *p_table ++ = ( 0 << 28 | //reserved
369 contrast << 17 | //contrast value (U4.7 format)
371 brightness << 1| // S7.4 format
374 *p_table ++ = ( cos_c_s << 16 | // cos(h) * contrast * saturation
375 sin_c_s); // sin(h) * contrast * saturation
381 void hsw_veb_iecp_csc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
383 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
384 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
385 float v_coef[3] = {0.0, 0.0, 0.0};
386 float u_coef[3] = {0.0, 0.0, 0.0};
387 int is_transform_enabled = 0;
389 if(!(proc_ctx->filters_mask & VPP_IECP_CSC)){
390 memset(p_table, 0, 8 * 4);
394 if(proc_ctx->fourcc_input == VA_FOURCC('R','G','B','A') &&
395 (proc_ctx->fourcc_output == VA_FOURCC('N','V','1','2') ||
396 proc_ctx->fourcc_output == VA_FOURCC('Y','V','1','2') ||
397 proc_ctx->fourcc_output == VA_FOURCC('Y','V','Y','2') ||
398 proc_ctx->fourcc_output == VA_FOURCC('A','Y','U','V'))) {
400 tran_coef[0] = 0.257;
401 tran_coef[1] = 0.504;
402 tran_coef[2] = 0.098;
403 tran_coef[3] = -0.148;
404 tran_coef[4] = -0.291;
405 tran_coef[5] = 0.439;
406 tran_coef[6] = 0.439;
407 tran_coef[7] = -0.368;
408 tran_coef[8] = -0.071;
414 is_transform_enabled = 1;
415 }else if((proc_ctx->fourcc_input == VA_FOURCC('N','V','1','2') ||
416 proc_ctx->fourcc_input == VA_FOURCC('Y','V','1','2') ||
417 proc_ctx->fourcc_input == VA_FOURCC('Y','U','Y','2') ||
418 proc_ctx->fourcc_input == VA_FOURCC('A','Y','U','V'))&&
419 proc_ctx->fourcc_output == VA_FOURCC('R','G','B','A')) {
421 tran_coef[0] = 1.164;
422 tran_coef[1] = 0.000;
423 tran_coef[2] = 1.569;
424 tran_coef[3] = 1.164;
425 tran_coef[4] = -0.813;
426 tran_coef[5] = -0.392;
427 tran_coef[6] = 1.164;
428 tran_coef[7] = 2.017;
429 tran_coef[8] = 0.000;
432 v_coef[1] = -128 * 4;
433 v_coef[2] = -128 * 4;
435 is_transform_enabled = 1;
436 }else if(proc_ctx->fourcc_input != proc_ctx->fourcc_output){
437 //enable when input and output format are different.
438 is_transform_enabled = 1;
441 if(is_transform_enabled == 0){
442 memset(p_table, 0, 8 * 4);
444 *p_table ++ = ( 0 << 29 | //reserved
445 intel_format_convert(tran_coef[1], 2, 10, 1) << 16 | //c1, s2.10 format
446 intel_format_convert(tran_coef[0], 2, 10, 1) << 3 | //c0, s2.10 format
448 0 << 1 | // yuv_channel swap
449 is_transform_enabled);
451 *p_table ++ = ( 0 << 26 | //reserved
452 intel_format_convert(tran_coef[3], 2, 10, 1) << 13 |
453 intel_format_convert(tran_coef[2], 2, 10, 1));
455 *p_table ++ = ( 0 << 26 | //reserved
456 intel_format_convert(tran_coef[5], 2, 10, 1) << 13 |
457 intel_format_convert(tran_coef[4], 2, 10, 1));
459 *p_table ++ = ( 0 << 26 | //reserved
460 intel_format_convert(tran_coef[7], 2, 10, 1) << 13 |
461 intel_format_convert(tran_coef[6], 2, 10, 1));
463 *p_table ++ = ( 0 << 13 | //reserved
464 intel_format_convert(tran_coef[8], 2, 10, 1));
466 *p_table ++ = ( 0 << 22 | //reserved
467 intel_format_convert(u_coef[0], 10, 0, 1) << 11 |
468 intel_format_convert(v_coef[0], 10, 0, 1));
470 *p_table ++ = ( 0 << 22 | //reserved
471 intel_format_convert(u_coef[1], 10, 0, 1) << 11 |
472 intel_format_convert(v_coef[1], 10, 0, 1));
474 *p_table ++ = ( 0 << 22 | //reserved
475 intel_format_convert(u_coef[2], 10, 0, 1) << 11 |
476 intel_format_convert(v_coef[2], 10, 0, 1));
480 void hsw_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
482 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 252);
483 // VAProcFilterParameterBuffer * tcc_param =
484 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
486 if(!(proc_ctx->filters_mask & VPP_IECP_AOI)){
487 memset(p_table, 0, 3 * 4);
489 *p_table ++ = 0x00000000;
490 *p_table ++ = 0x00030000;
491 *p_table ++ = 0x00030000;
495 void hsw_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
497 if(proc_ctx->filters_mask & 0x000000ff) {
498 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
499 dri_bo_map(dndi_bo, 1);
500 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
502 hsw_veb_dndi_table(ctx, proc_ctx);
504 dri_bo_unmap(dndi_bo);
507 if(proc_ctx->filters_mask & 0x0000ff00) {
508 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
509 dri_bo_map(iecp_bo, 1);
510 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
512 hsw_veb_iecp_std_table(ctx, proc_ctx);
513 hsw_veb_iecp_ace_table(ctx, proc_ctx);
514 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
515 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
516 hsw_veb_iecp_csc_table(ctx, proc_ctx);
517 hsw_veb_iecp_aoi_table(ctx, proc_ctx);
519 dri_bo_unmap(iecp_bo);
523 void hsw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
525 struct intel_batchbuffer *batch = proc_ctx->batch;
526 unsigned int is_dn_enabled = (proc_ctx->filters_mask & 0x01)? 1: 0;
527 unsigned int is_di_enabled = (proc_ctx->filters_mask & 0x02)? 1: 0;
528 unsigned int is_iecp_enabled = (proc_ctx->filters_mask & 0xff00)?1:0;
529 unsigned int is_first_frame = !!((proc_ctx->frame_order == -1) &&
532 unsigned int di_output_frames_flag = 2; /* Output Current Frame Only */
534 if(proc_ctx->fourcc_input != proc_ctx->fourcc_output ||
535 (is_dn_enabled == 0 && is_di_enabled == 0)){
540 VAProcFilterParameterBufferDeinterlacing *di_param =
541 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
545 if (di_param->algorithm == VAProcDeinterlacingBob)
548 if ((di_param->algorithm == VAProcDeinterlacingMotionAdaptive ||
549 di_param->algorithm == VAProcDeinterlacingMotionCompensated) &&
550 proc_ctx->frame_order != -1)
551 di_output_frames_flag = 0; /* Output both Current Frame and Previous Frame */
554 BEGIN_VEB_BATCH(batch, 6);
555 OUT_VEB_BATCH(batch, VEB_STATE | (6 - 2));
557 0 << 26 | // state surface control bits
558 0 << 11 | // reserved.
559 0 << 10 | // pipe sync disable
560 di_output_frames_flag << 8 | // DI output frame
561 1 << 7 | // 444->422 downsample method
562 1 << 6 | // 422->420 downsample method
563 is_first_frame << 5 | // DN/DI first frame
564 is_di_enabled << 4 | // DI enable
565 is_dn_enabled << 3 | // DN enable
566 is_iecp_enabled << 2 | // global IECP enabled
567 0 << 1 | // ColorGamutCompressionEnable
568 0 ) ; // ColorGamutExpansionEnable.
571 proc_ctx->dndi_state_table.bo,
572 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
575 proc_ctx->iecp_state_table.bo,
576 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
579 proc_ctx->gamut_state_table.bo,
580 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
583 proc_ctx->vertex_state_table.bo,
584 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
586 ADVANCE_VEB_BATCH(batch);
589 void hsw_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
591 struct intel_batchbuffer *batch = proc_ctx->batch;
592 unsigned int u_offset_y = 0, v_offset_y = 0;
593 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
594 unsigned int surface_format = PLANAR_420_8;
595 struct object_surface* obj_surf = NULL;
596 unsigned int surface_pitch = 0;
597 unsigned int half_pitch_chroma = 0;
600 obj_surf = proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface;
602 obj_surf = proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface;
605 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
606 obj_surf->fourcc == VA_FOURCC_YUY2 ||
607 obj_surf->fourcc == VA_FOURCC_AYUV ||
608 obj_surf->fourcc == VA_FOURCC_RGBA);
610 if (obj_surf->fourcc == VA_FOURCC_NV12) {
611 surface_format = PLANAR_420_8;
612 surface_pitch = obj_surf->width;
613 is_uv_interleaved = 1;
614 half_pitch_chroma = 0;
615 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
616 surface_format = YCRCB_NORMAL;
617 surface_pitch = obj_surf->width * 2;
618 is_uv_interleaved = 0;
619 half_pitch_chroma = 0;
620 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
621 surface_format = PACKED_444A_8;
622 surface_pitch = obj_surf->width * 4;
623 is_uv_interleaved = 0;
624 half_pitch_chroma = 0;
625 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
626 surface_format = R8G8B8A8_UNORM_SRGB;
627 surface_pitch = obj_surf->width * 4;
628 is_uv_interleaved = 0;
629 half_pitch_chroma = 0;
632 u_offset_y = obj_surf->y_cb_offset;
633 v_offset_y = obj_surf->y_cr_offset;
635 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
637 BEGIN_VEB_BATCH(batch, 6);
638 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (6 - 2));
641 is_output); // surface indentification.
644 (obj_surf->height - 1) << 18 | // height . w3
645 (obj_surf->width -1 ) << 4 | // width
649 surface_format << 28 | // surface format, YCbCr420. w4
650 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
651 0 << 20 | // reserved
652 (surface_pitch - 1) << 3 | // surface pitch, 64 align
653 half_pitch_chroma << 2 | // half pitch for chrome
654 !!tiling << 1 | // tiled surface, linear surface used
655 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
658 0 << 29 | // reserved . w5
659 0 << 16 | // X offset for V(Cb)
660 0 << 15 | // reserved
661 u_offset_y); // Y offset for V(Cb)
664 0 << 29 | // reserved . w6
665 0 << 16 | // X offset for V(Cr)
666 0 << 15 | // reserved
667 v_offset_y ); // Y offset for V(Cr)
669 ADVANCE_VEB_BATCH(batch);
672 void hsw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
674 struct intel_batchbuffer *batch = proc_ctx->batch;
675 unsigned char frame_ctrl_bits = 0;
676 unsigned int startingX = 0;
677 unsigned int endingX = (proc_ctx->width_input + 63 ) / 64 * 64;
679 /* s1:update the previous and current input */
680 /* tempFrame = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
681 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_IN_CURRENT]; ;
682 proc_ctx->frame_store[FRAME_IN_CURRENT] = tempFrame;
684 if(proc_ctx->surface_input_vebox != -1){
685 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
686 proc_ctx->surface_input_vebox);
688 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
689 proc_ctx->surface_input);
692 /*s2: update the STMM input and output */
693 /* tempFrame = proc_ctx->frame_store[FRAME_IN_STMM];
694 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM]; ;
695 proc_ctx->frame_store[FRAME_OUT_STMM] = tempFrame;
697 /*s3:set reloc buffer address */
698 BEGIN_VEB_BATCH(batch, 10);
699 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (10 - 2));
704 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo,
705 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
707 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo,
708 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
710 proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo,
711 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
713 proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo,
714 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
716 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo,
717 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
719 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo,
720 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
722 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo,
723 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
725 proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo,
726 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
728 ADVANCE_VEB_BATCH(batch);
731 void hsw_veb_resource_prepare(VADriverContextP ctx,
732 struct intel_vebox_context *proc_ctx)
736 struct i965_driver_data *i965 = i965_driver_data(ctx);
737 unsigned int input_fourcc, output_fourcc;
738 unsigned int input_sampling, output_sampling;
739 unsigned int input_tiling, output_tiling;
740 unsigned int i, swizzle;
741 struct object_surface *obj_surf_out = NULL, *obj_surf_in = NULL;
743 if (proc_ctx->surface_input_vebox_object != NULL) {
744 obj_surf_in = proc_ctx->surface_input_vebox_object;
746 obj_surf_in = proc_ctx->surface_input_object;
749 if (proc_ctx->surface_output_vebox_object != NULL) {
750 obj_surf_out = proc_ctx->surface_output_vebox_object;
752 obj_surf_out = proc_ctx->surface_output_object;
755 if(obj_surf_in->bo == NULL){
756 input_fourcc = VA_FOURCC('N','V','1','2');
757 input_sampling = SUBSAMPLE_YUV420;
759 i965_check_alloc_surface_bo(ctx, obj_surf_in, input_tiling, input_fourcc, input_sampling);
761 input_fourcc = obj_surf_in->fourcc;
762 input_sampling = obj_surf_in->subsampling;
763 dri_bo_get_tiling(obj_surf_in->bo, &input_tiling, &swizzle);
764 input_tiling = !!input_tiling;
767 if(obj_surf_out->bo == NULL){
768 output_fourcc = VA_FOURCC('N','V','1','2');
769 output_sampling = SUBSAMPLE_YUV420;
771 i965_check_alloc_surface_bo(ctx, obj_surf_out, output_tiling, output_fourcc, output_sampling);
773 output_fourcc = obj_surf_out->fourcc;
774 output_sampling = obj_surf_out->subsampling;
775 dri_bo_get_tiling(obj_surf_out->bo, &output_tiling, &swizzle);
776 output_tiling = !!output_tiling;
779 /* vebox pipelien input surface format info */
780 proc_ctx->fourcc_input = input_fourcc;
781 proc_ctx->fourcc_output = output_fourcc;
783 /* create pipeline surfaces */
784 for(i = 0; i < FRAME_STORE_SUM; i ++) {
785 if(proc_ctx->frame_store[i].obj_surface){
786 continue; //refer external surface for vebox pipeline
789 VASurfaceID new_surface;
790 struct object_surface *obj_surf = NULL;
792 va_status = i965_CreateSurfaces(ctx,
793 proc_ctx ->width_input,
794 proc_ctx ->height_input,
798 assert(va_status == VA_STATUS_SUCCESS);
800 obj_surf = SURFACE(new_surface);
803 if( i <= FRAME_IN_PREVIOUS || i == FRAME_OUT_CURRENT_DN) {
804 i965_check_alloc_surface_bo(ctx, obj_surf, input_tiling, input_fourcc, input_sampling);
805 } else if( i == FRAME_IN_STMM || i == FRAME_OUT_STMM){
806 i965_check_alloc_surface_bo(ctx, obj_surf, 1, input_fourcc, input_sampling);
807 } else if( i >= FRAME_OUT_CURRENT){
808 i965_check_alloc_surface_bo(ctx, obj_surf, output_tiling, output_fourcc, output_sampling);
811 proc_ctx->frame_store[i].surface_id = new_surface;
812 proc_ctx->frame_store[i].is_internal_surface = 1;
813 proc_ctx->frame_store[i].obj_surface = obj_surf;
816 /* alloc dndi state table */
817 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
818 bo = dri_bo_alloc(i965->intel.bufmgr,
819 "vebox: dndi state Buffer",
821 proc_ctx->dndi_state_table.bo = bo;
822 dri_bo_reference(proc_ctx->dndi_state_table.bo);
824 /* alloc iecp state table */
825 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
826 bo = dri_bo_alloc(i965->intel.bufmgr,
827 "vebox: iecp state Buffer",
829 proc_ctx->iecp_state_table.bo = bo;
830 dri_bo_reference(proc_ctx->iecp_state_table.bo);
832 /* alloc gamut state table */
833 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
834 bo = dri_bo_alloc(i965->intel.bufmgr,
835 "vebox: gamut state Buffer",
837 proc_ctx->gamut_state_table.bo = bo;
838 dri_bo_reference(proc_ctx->gamut_state_table.bo);
840 /* alloc vertex state table */
841 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
842 bo = dri_bo_alloc(i965->intel.bufmgr,
843 "vertex: iecp state Buffer",
845 proc_ctx->vertex_state_table.bo = bo;
846 dri_bo_reference(proc_ctx->vertex_state_table.bo);
851 hsw_veb_surface_reference(VADriverContextP ctx,
852 struct intel_vebox_context *proc_ctx)
854 struct object_surface * obj_surf;
855 VEBFrameStore tmp_store;
857 if (proc_ctx->surface_input_vebox_object != NULL) {
858 obj_surf = proc_ctx->surface_input_vebox_object;
860 obj_surf = proc_ctx->surface_input_object;
863 /* update the input surface */
864 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = VA_INVALID_ID;
865 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
866 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface = obj_surf;
868 /* update the previous input surface */
869 if (proc_ctx->frame_order != -1) {
870 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
871 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_OUT_CURRENT_DN];
872 } else if (proc_ctx->filters_mask & VPP_DNDI_DI) {
873 VAProcFilterParameterBufferDeinterlacing *di_param =
874 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
877 (di_param->algorithm == VAProcDeinterlacingMotionAdaptive ||
878 di_param->algorithm == VAProcDeinterlacingMotionCompensated)) {
879 if ((proc_ctx->filters_mask & VPP_DNDI_DN) &&
880 proc_ctx->frame_order == 0) { /* DNDI */
881 tmp_store = proc_ctx->frame_store[FRAME_OUT_CURRENT_DN];
882 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN] = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
883 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = tmp_store;
884 } else { /* DI only */
885 VAProcPipelineParameterBuffer *pipe = proc_ctx->pipeline_param;
886 struct object_surface *obj_surf = NULL;
887 struct i965_driver_data * const i965 = i965_driver_data(ctx);
890 !pipe->num_forward_references ||
891 pipe->forward_references[0] == VA_INVALID_ID) {
892 WARN_ONCE("A forward temporal reference is needed for Motion adaptive/compensated deinterlacing !!!\n");
894 return VA_STATUS_ERROR_INVALID_PARAMETER;
897 obj_surf = SURFACE(pipe->forward_references[0]);
898 assert(obj_surf && obj_surf->bo);
900 proc_ctx->frame_store[FRAME_IN_PREVIOUS].surface_id = pipe->forward_references[0];
901 proc_ctx->frame_store[FRAME_IN_PREVIOUS].is_internal_surface = 0;
902 proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface = obj_surf;
908 /* update STMM surface */
909 if (proc_ctx->frame_order != -1) {
910 tmp_store = proc_ctx->frame_store[FRAME_IN_STMM];
911 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM];
912 proc_ctx->frame_store[FRAME_OUT_STMM] = tmp_store;
915 /* update the output surface */
916 if (proc_ctx->surface_output_vebox_object != NULL) {
917 obj_surf = proc_ctx->surface_output_vebox_object;
919 obj_surf = proc_ctx->surface_output_object;
922 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
923 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = VA_INVALID_ID;
924 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
925 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface = obj_surf;
926 proc_ctx->current_output = FRAME_OUT_CURRENT_DN;
927 } else if (proc_ctx->filters_mask & VPP_DNDI_DI) {
928 VAProcFilterParameterBufferDeinterlacing *di_param =
929 (VAProcFilterParameterBufferDeinterlacing *)proc_ctx->filter_di;
932 (di_param->algorithm == VAProcDeinterlacingMotionAdaptive ||
933 di_param->algorithm == VAProcDeinterlacingMotionCompensated)) {
934 if (proc_ctx->frame_order == -1) {
935 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
936 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
937 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = obj_surf;
938 proc_ctx->current_output = FRAME_OUT_CURRENT;
939 } else if (proc_ctx->frame_order == 0) {
940 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].surface_id = VA_INVALID_ID;
941 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].is_internal_surface = 0;
942 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface = obj_surf;
943 proc_ctx->current_output = FRAME_OUT_PREVIOUS;
945 proc_ctx->current_output = FRAME_OUT_CURRENT;
946 proc_ctx->format_convert_flags |= POST_COPY_CONVERT;
949 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
950 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
951 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = obj_surf;
952 proc_ctx->current_output = FRAME_OUT_CURRENT;
955 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
956 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
957 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = obj_surf;
958 proc_ctx->current_output = FRAME_OUT_CURRENT;
961 return VA_STATUS_SUCCESS;
964 void hsw_veb_surface_unreference(VADriverContextP ctx,
965 struct intel_vebox_context *proc_ctx)
967 /* unreference the input surface */
968 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = VA_INVALID_ID;
969 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
970 proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface = NULL;
972 /* unreference the shared output surface */
973 if (proc_ctx->filters_mask == VPP_DNDI_DN) {
974 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = VA_INVALID_ID;
975 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
976 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface = NULL;
978 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = VA_INVALID_ID;
979 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
980 proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface = NULL;
984 int hsw_veb_pre_format_convert(VADriverContextP ctx,
985 struct intel_vebox_context *proc_ctx)
988 struct i965_driver_data *i965 = i965_driver_data(ctx);
989 struct object_surface* obj_surf_input = proc_ctx->surface_input_object;
990 struct object_surface* obj_surf_output = proc_ctx->surface_output_object;
991 struct object_surface* obj_surf_input_vebox;
992 struct object_surface* obj_surf_output_vebox;
994 proc_ctx->format_convert_flags = 0;
996 proc_ctx->width_input = obj_surf_input->orig_width;
997 proc_ctx->height_input = obj_surf_input->orig_height;
998 proc_ctx->width_output = obj_surf_output->orig_width;
999 proc_ctx->height_output = obj_surf_output->orig_height;
1001 /* only partial frame is not supported to be processed */
1003 assert(proc_ctx->width_input == proc_ctx->pipeline_param->surface_region->width);
1004 assert(proc_ctx->height_input == proc_ctx->pipeline_param->surface_region->height);
1005 assert(proc_ctx->width_output == proc_ctx->pipeline_param->output_region->width);
1006 assert(proc_ctx->height_output == proc_ctx->pipeline_param->output_region->height);
1009 if(proc_ctx->width_output != proc_ctx->width_input ||
1010 proc_ctx->height_output != proc_ctx->height_input){
1011 proc_ctx->format_convert_flags |= POST_SCALING_CONVERT;
1014 /* convert the following format to NV12 format */
1015 if(obj_surf_input->fourcc == VA_FOURCC('Y','V','1','2') ||
1016 obj_surf_input->fourcc == VA_FOURCC('I','4','2','0') ||
1017 obj_surf_input->fourcc == VA_FOURCC('I','M','C','1') ||
1018 obj_surf_input->fourcc == VA_FOURCC('I','M','C','3') ||
1019 obj_surf_input->fourcc == VA_FOURCC('R','G','B','A')){
1021 proc_ctx->format_convert_flags |= PRE_FORMAT_CONVERT;
1023 } else if(obj_surf_input->fourcc == VA_FOURCC('A','Y','U','V') ||
1024 obj_surf_input->fourcc == VA_FOURCC('Y','U','Y','2') ||
1025 obj_surf_input->fourcc == VA_FOURCC('N','V','1','2')){
1026 // nothing to do here
1028 /* not support other format as input */
1032 if (proc_ctx->format_convert_flags & PRE_FORMAT_CONVERT) {
1033 if(proc_ctx->surface_input_vebox_object == NULL){
1034 va_status = i965_CreateSurfaces(ctx,
1035 proc_ctx->width_input,
1036 proc_ctx->height_input,
1037 VA_RT_FORMAT_YUV420,
1039 &(proc_ctx->surface_input_vebox));
1040 assert(va_status == VA_STATUS_SUCCESS);
1041 obj_surf_input_vebox = SURFACE(proc_ctx->surface_input_vebox);
1042 assert(obj_surf_input_vebox);
1044 if (obj_surf_input_vebox) {
1045 proc_ctx->surface_input_vebox_object = obj_surf_input_vebox;
1046 i965_check_alloc_surface_bo(ctx, obj_surf_input_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1050 vpp_surface_convert(ctx, proc_ctx->surface_input_vebox_object, proc_ctx->surface_input_object);
1053 /* create one temporary NV12 surfaces for conversion*/
1054 if(obj_surf_output->fourcc == VA_FOURCC('Y','V','1','2') ||
1055 obj_surf_output->fourcc == VA_FOURCC('I','4','2','0') ||
1056 obj_surf_output->fourcc == VA_FOURCC('I','M','C','1') ||
1057 obj_surf_output->fourcc == VA_FOURCC('I','M','C','3') ||
1058 obj_surf_output->fourcc == VA_FOURCC('R','G','B','A')) {
1060 proc_ctx->format_convert_flags |= POST_FORMAT_CONVERT;
1061 } else if(obj_surf_output->fourcc == VA_FOURCC('A','Y','U','V') ||
1062 obj_surf_output->fourcc == VA_FOURCC('Y','U','Y','2') ||
1063 obj_surf_output->fourcc == VA_FOURCC('N','V','1','2')){
1064 /* Nothing to do here */
1066 /* not support other format as input */
1070 if(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT ||
1071 proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1072 if(proc_ctx->surface_output_vebox_object == NULL){
1073 va_status = i965_CreateSurfaces(ctx,
1074 proc_ctx->width_input,
1075 proc_ctx->height_input,
1076 VA_RT_FORMAT_YUV420,
1078 &(proc_ctx->surface_output_vebox));
1079 assert(va_status == VA_STATUS_SUCCESS);
1080 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_vebox);
1081 assert(obj_surf_output_vebox);
1083 if (obj_surf_output_vebox) {
1084 proc_ctx->surface_output_vebox_object = obj_surf_output_vebox;
1085 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1090 if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1091 if(proc_ctx->surface_output_scaled_object == NULL){
1092 va_status = i965_CreateSurfaces(ctx,
1093 proc_ctx->width_output,
1094 proc_ctx->height_output,
1095 VA_RT_FORMAT_YUV420,
1097 &(proc_ctx->surface_output_scaled));
1098 assert(va_status == VA_STATUS_SUCCESS);
1099 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_scaled);
1100 assert(obj_surf_output_vebox);
1102 if (obj_surf_output_vebox) {
1103 proc_ctx->surface_output_scaled_object = obj_surf_output_vebox;
1104 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1112 int hsw_veb_post_format_convert(VADriverContextP ctx,
1113 struct intel_vebox_context *proc_ctx)
1115 struct object_surface *obj_surface = NULL;
1117 obj_surface = proc_ctx->frame_store[proc_ctx->current_output].obj_surface;
1119 if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1120 /* copy the saved frame in the second call */
1121 vpp_surface_convert(ctx,proc_ctx->surface_output_object, obj_surface);
1122 } else if(!(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1123 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1124 /* Output surface format is covered by vebox pipeline and
1125 * processed picture is already store in output surface
1126 * so nothing will be done here */
1127 } else if ((proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1128 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1129 /* convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1130 vpp_surface_convert(ctx,proc_ctx->surface_output_object, obj_surface);
1132 } else if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1133 /* scaling, convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/
1134 assert(obj_surface->fourcc == VA_FOURCC('N','V','1','2'));
1136 /* first step :surface scaling */
1137 vpp_surface_scaling(ctx,proc_ctx->surface_output_scaled_object, obj_surface);
1139 /* second step: color format convert and copy to output */
1140 obj_surface = proc_ctx->surface_output_object;
1142 if(obj_surface->fourcc == VA_FOURCC('N','V','1','2') ||
1143 obj_surface->fourcc == VA_FOURCC('Y','V','1','2') ||
1144 obj_surface->fourcc == VA_FOURCC('I','4','2','0') ||
1145 obj_surface->fourcc == VA_FOURCC('Y','U','Y','2') ||
1146 obj_surface->fourcc == VA_FOURCC('I','M','C','1') ||
1147 obj_surface->fourcc == VA_FOURCC('I','M','C','3') ||
1148 obj_surface->fourcc == VA_FOURCC('R','G','B','A')) {
1149 vpp_surface_convert(ctx, proc_ctx->surface_output_object, proc_ctx->surface_output_scaled_object);
1158 VAStatus gen75_vebox_process_picture(VADriverContextP ctx,
1159 struct intel_vebox_context *proc_ctx)
1161 struct i965_driver_data *i965 = i965_driver_data(ctx);
1163 VAProcPipelineParameterBuffer *pipe = proc_ctx->pipeline_param;
1164 VAProcFilterParameterBuffer* filter = NULL;
1165 struct object_buffer *obj_buf = NULL;
1168 for (i = 0; i < pipe->num_filters; i ++) {
1169 obj_buf = BUFFER(pipe->filters[i]);
1171 assert(obj_buf && obj_buf->buffer_store);
1173 if (!obj_buf || !obj_buf->buffer_store)
1176 filter = (VAProcFilterParameterBuffer*)obj_buf-> buffer_store->buffer;
1178 if (filter->type == VAProcFilterNoiseReduction) {
1179 proc_ctx->filters_mask |= VPP_DNDI_DN;
1180 proc_ctx->filter_dn = filter;
1181 } else if (filter->type == VAProcFilterDeinterlacing) {
1182 proc_ctx->filters_mask |= VPP_DNDI_DI;
1183 proc_ctx->filter_di = filter;
1184 } else if (filter->type == VAProcFilterColorBalance) {
1185 proc_ctx->filters_mask |= VPP_IECP_PRO_AMP;
1186 proc_ctx->filter_iecp_amp = filter;
1187 proc_ctx->filter_iecp_amp_num_elements = obj_buf->num_elements;
1191 hsw_veb_pre_format_convert(ctx, proc_ctx);
1192 hsw_veb_surface_reference(ctx, proc_ctx);
1194 if (proc_ctx->frame_order == -1) {
1195 hsw_veb_resource_prepare(ctx, proc_ctx);
1198 if (proc_ctx->format_convert_flags & POST_COPY_CONVERT) {
1199 assert(proc_ctx->frame_order == 1);
1200 /* directly copy the saved frame in the second call */
1202 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1203 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1204 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1205 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1206 hsw_veb_state_table_setup(ctx, proc_ctx);
1208 hsw_veb_state_command(ctx, proc_ctx);
1209 hsw_veb_dndi_iecp_command(ctx, proc_ctx);
1210 intel_batchbuffer_end_atomic(proc_ctx->batch);
1211 intel_batchbuffer_flush(proc_ctx->batch);
1214 hsw_veb_post_format_convert(ctx, proc_ctx);
1215 // hsw_veb_surface_unreference(ctx, proc_ctx);
1217 proc_ctx->frame_order = (proc_ctx->frame_order + 1) % 2;
1219 return VA_STATUS_SUCCESS;
1222 return VA_STATUS_ERROR_INVALID_PARAMETER;
1225 void gen75_vebox_context_destroy(VADriverContextP ctx,
1226 struct intel_vebox_context *proc_ctx)
1230 if(proc_ctx->surface_input_vebox != VA_INVALID_ID){
1231 i965_DestroySurfaces(ctx, &proc_ctx->surface_input_vebox, 1);
1232 proc_ctx->surface_input_vebox = VA_INVALID_ID;
1233 proc_ctx->surface_input_vebox_object = NULL;
1236 if(proc_ctx->surface_output_vebox != VA_INVALID_ID){
1237 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_vebox, 1);
1238 proc_ctx->surface_output_vebox = VA_INVALID_ID;
1239 proc_ctx->surface_output_vebox_object = NULL;
1242 if(proc_ctx->surface_output_scaled != VA_INVALID_ID){
1243 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_scaled, 1);
1244 proc_ctx->surface_output_scaled = VA_INVALID_ID;
1245 proc_ctx->surface_output_scaled_object = NULL;
1248 for(i = 0; i < FRAME_STORE_SUM; i ++) {
1249 if (proc_ctx->frame_store[i].is_internal_surface == 1) {
1250 assert(proc_ctx->frame_store[i].surface_id != VA_INVALID_ID);
1252 if (proc_ctx->frame_store[i].surface_id != VA_INVALID_ID)
1253 i965_DestroySurfaces(ctx, &proc_ctx->frame_store[i].surface_id, 1);
1256 proc_ctx->frame_store[i].surface_id = VA_INVALID_ID;
1257 proc_ctx->frame_store[i].is_internal_surface = 0;
1258 proc_ctx->frame_store[i].obj_surface = NULL;
1261 /* dndi state table */
1262 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
1263 proc_ctx->dndi_state_table.bo = NULL;
1265 /* iecp state table */
1266 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
1267 proc_ctx->dndi_state_table.bo = NULL;
1269 /* gamut statu table */
1270 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
1271 proc_ctx->gamut_state_table.bo = NULL;
1273 /* vertex state table */
1274 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
1275 proc_ctx->vertex_state_table.bo = NULL;
1277 intel_batchbuffer_free(proc_ctx->batch);
1282 struct intel_vebox_context * gen75_vebox_context_init(VADriverContextP ctx)
1284 struct intel_driver_data *intel = intel_driver_data(ctx);
1285 struct intel_vebox_context *proc_context = calloc(1, sizeof(struct intel_vebox_context));
1288 proc_context->batch = intel_batchbuffer_new(intel, I915_EXEC_VEBOX, 0);
1289 memset(proc_context->frame_store, 0, sizeof(VEBFrameStore)*FRAME_STORE_SUM);
1291 for (i = 0; i < FRAME_STORE_SUM; i ++) {
1292 proc_context->frame_store[i].surface_id = VA_INVALID_ID;
1293 proc_context->frame_store[i].is_internal_surface = 0;
1294 proc_context->frame_store[i].obj_surface = NULL;
1297 proc_context->filters_mask = 0;
1298 proc_context->frame_order = -1; /* the first frame */
1299 proc_context->surface_output_object = NULL;
1300 proc_context->surface_input_object = NULL;
1301 proc_context->surface_input_vebox = VA_INVALID_ID;
1302 proc_context->surface_input_vebox_object = NULL;
1303 proc_context->surface_output_vebox = VA_INVALID_ID;
1304 proc_context->surface_output_vebox_object = NULL;
1305 proc_context->surface_output_scaled = VA_INVALID_ID;
1306 proc_context->surface_output_scaled_object = NULL;
1307 proc_context->filters_mask = 0;
1308 proc_context->format_convert_flags = 0;
1310 return proc_context;