2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Li Xiaowei <xiaowei.a.li@intel.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_driver.h"
36 #include "i965_defines.h"
37 #include "i965_structs.h"
38 #include "gen75_vpp_vebox.h"
39 #include "intel_media.h"
44 i965_MapBuffer(VADriverContextP ctx, VABufferID buf_id, void **);
47 i965_UnmapBuffer(VADriverContextP ctx, VABufferID buf_id);
50 i965_DeriveImage(VADriverContextP ctx, VABufferID surface, VAImage *out_image);
53 i965_DestroyImage(VADriverContextP ctx, VAImageID image);
56 i965_DestroySurfaces(VADriverContextP ctx,
57 VASurfaceID *surface_list,
61 i965_CreateSurfaces(VADriverContextP ctx,
66 VASurfaceID *surfaces);
68 vpp_surface_convert(VADriverContextP ctx,
69 VASurfaceID dstSurfaceID,
70 VASurfaceID srcSurfaceID);
72 VAStatus vpp_surface_copy(VADriverContextP ctx, VASurfaceID dstSurfaceID, VASurfaceID srcSurfaceID)
74 VAStatus va_status = VA_STATUS_SUCCESS;
75 VAImage srcImage, dstImage;
76 void *pBufferSrc, *pBufferDst;
78 va_status = vpp_surface_convert(ctx, dstSurfaceID, srcSurfaceID);
79 if(va_status == VA_STATUS_SUCCESS){
83 va_status = i965_DeriveImage(ctx, srcSurfaceID, &srcImage);
84 assert(va_status == VA_STATUS_SUCCESS);
86 va_status = i965_DeriveImage(ctx, dstSurfaceID, &dstImage);
87 assert(va_status == VA_STATUS_SUCCESS);
89 if(srcImage.width != dstImage.width ||
90 srcImage.height != dstImage.height ||
91 srcImage.format.fourcc != dstImage.format.fourcc) {
92 return VA_STATUS_ERROR_UNIMPLEMENTED;
95 va_status = i965_MapBuffer(ctx, srcImage.buf, &pBufferSrc);
96 assert(va_status == VA_STATUS_SUCCESS);
98 va_status = i965_MapBuffer(ctx, dstImage.buf, &pBufferDst);
99 assert(va_status == VA_STATUS_SUCCESS);
101 memcpy(pBufferDst, pBufferSrc, dstImage.data_size);
103 i965_UnmapBuffer(ctx, srcImage.buf);
104 i965_UnmapBuffer(ctx, dstImage.buf);
105 i965_DestroyImage(ctx, srcImage.image_id);
106 i965_DestroyImage(ctx, dstImage.image_id);
111 VAStatus vpp_surface_convert(VADriverContextP ctx, VASurfaceID dstSurfaceID, VASurfaceID srcSurfaceID)
113 VAStatus va_status = VA_STATUS_SUCCESS;
114 struct i965_driver_data *i965 = i965_driver_data(ctx);
115 struct object_surface* src_obj_surf = SURFACE(srcSurfaceID);
116 struct object_surface* dst_obj_surf = SURFACE(dstSurfaceID);
118 assert(src_obj_surf->orig_width == dst_obj_surf->orig_width);
119 assert(src_obj_surf->orig_height == dst_obj_surf->orig_height);
121 VARectangle src_rect, dst_rect;
122 src_rect.x = dst_rect.x = 0;
123 src_rect.y = dst_rect.y = 0;
124 src_rect.width = dst_rect.width = src_obj_surf->orig_width;
125 src_rect.height = dst_rect.height = dst_obj_surf->orig_height;
127 struct i965_surface src_surface, dst_surface;
128 src_surface.id = srcSurfaceID;
129 src_surface.type = I965_SURFACE_TYPE_SURFACE;
130 src_surface.flags = I965_SURFACE_FLAG_FRAME;
132 dst_surface.id = dstSurfaceID;
133 dst_surface.type = I965_SURFACE_TYPE_SURFACE;
134 dst_surface.flags = I965_SURFACE_FLAG_FRAME;
136 va_status = i965_image_processing(ctx,
144 VAStatus vpp_surface_scaling(VADriverContextP ctx, VASurfaceID dstSurfaceID, VASurfaceID srcSurfaceID)
146 VAStatus va_status = VA_STATUS_SUCCESS;
147 int flags = I965_PP_FLAG_AVS;
148 struct i965_driver_data *i965 = i965_driver_data(ctx);
149 struct object_surface* src_obj_surf = SURFACE(srcSurfaceID);
150 struct object_surface* dst_obj_surf = SURFACE(dstSurfaceID);
152 assert(src_obj_surf->fourcc == VA_FOURCC('N','V','1','2'));
153 assert(dst_obj_surf->fourcc == VA_FOURCC('N','V','1','2'));
155 VARectangle src_rect, dst_rect;
158 src_rect.width = src_obj_surf->orig_width;
159 src_rect.height = src_obj_surf->orig_height;
163 dst_rect.width = dst_obj_surf->orig_width;
164 dst_rect.height = dst_obj_surf->orig_height;
166 va_status = i965_scaling_processing(ctx,
176 void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
178 unsigned int* p_table ;
180 VAProcFilterParameterBufferDeinterlacing *di_param =
181 (VAProcFilterParameterBufferDeinterlacing *) proc_ctx->filter_di;
183 VAProcFilterParameterBuffer * dn_param =
184 (VAProcFilterParameterBuffer *) proc_ctx->filter_dn;
186 p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
188 *p_table ++ = 0; // reserved . w0
189 *p_table ++ = ( 0 << 24 | // denoise STAD threshold . w1
190 128 << 16 | // dnmh_history_max
191 0 << 12 | // reserved
192 8 << 8 | // dnmh_delta[3:0]
193 0 ); // denoise ASD threshold
195 *p_table ++ = ( 0 << 30 | // reserved . w2
196 16 << 24 | // temporal diff th
197 0 << 22 | // reserved.
198 8 << 16 | // low temporal diff th
200 0 << 8 | // denoise moving pixel th
201 64 ); // denoise th for sum of complexity measure
203 *p_table ++ = ( 0 << 30 | // reserved . w3
204 4 << 24 | // good neighbor th[5:0]
205 9 << 20 | // CAT slope minus 1
206 5 << 16 | // SAD Tight in
207 0 << 14 | // smooth mv th
208 0 << 12 | // reserved
209 1 << 8 | // bne_edge_th[3:0]
210 15 ); // block noise estimate noise th
212 *p_table ++ = ( 0 << 31 | // STMM blending constant select. w4
213 64 << 24 | // STMM trc1
214 0 << 16 | // STMM trc2
215 0 << 14 | // reserved
217 128 ); // maximum STMM
219 *p_table ++ = ( 0 << 24 | // minumum STMM . W5
220 0 << 22 | // STMM shift down
221 0 << 20 | // STMM shift up
222 7 << 16 | // STMM output shift
223 128 << 8 | // SDI threshold
226 *p_table ++ = ( 0 << 24 | // SDI fallback mode 1 T1 constant . W6
227 0 << 16 | // SDI fallback mode 1 T2 constant
228 0 << 8 | // SDI fallback mode 2 constant(angle2x1)
229 0 ); // FMD temporal difference threshold
231 *p_table ++ = ( 32 << 24 | // FMD #1 vertical difference th . w7
232 32 << 16 | // FMD #2 vertical difference th
234 32 << 8 | // FMD tear threshold
235 0 << 7 | // MCDI Enable, use motion compensated deinterlace algorithm
236 0 << 6 | // progressive DN
238 0 << 3 | // DN/DI Top First
241 *p_table ++ = ( 0 << 29 | // reserved . W8
242 0 << 23 | // dnmh_history_init[5:0]
243 10 << 19 | // neighborPixel th
244 0 << 18 | // reserved
245 0 << 16 | // FMD for 2nd field of previous frame
246 25 << 10 | // MC pixel consistency th
247 0 << 8 | // FMD for 1st field for current frame
251 *p_table ++ = ( 0 << 24 | // reserved
252 0 << 16 | // chr_dnmh_stad_th
253 0 << 13 | // reserved
254 0 << 12 | // chrome denoise enable
255 0 << 6 | // chr temp diff th
256 0 ); // chr temp diff low
260 void hsw_veb_iecp_std_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
262 unsigned int *p_table = proc_ctx->iecp_state_table.ptr + 0 ;
263 //VAProcFilterParameterBuffer * std_param =
264 // (VAProcFilterParameterBuffer *) proc_ctx->filter_std;
266 if(!(proc_ctx->filters_mask & VPP_IECP_STD_STE)){
267 memset(p_table, 0, 29 * 4);
269 *p_table ++ = 0x9a6e39f0;
270 *p_table ++ = 0x400c0000;
271 *p_table ++ = 0x00001180;
272 *p_table ++ = 0xfe2f2e00;
273 *p_table ++ = 0x000000ff;
275 *p_table ++ = 0x00140000;
276 *p_table ++ = 0xd82e0000;
277 *p_table ++ = 0x8285ecec;
278 *p_table ++ = 0x00008282;
279 *p_table ++ = 0x00000000;
281 *p_table ++ = 0x02117000;
282 *p_table ++ = 0xa38fec96;
283 *p_table ++ = 0x0000c8c8;
284 *p_table ++ = 0x00000000;
285 *p_table ++ = 0x01478000;
287 *p_table ++ = 0x0007c306;
288 *p_table ++ = 0x00000000;
289 *p_table ++ = 0x00000000;
290 *p_table ++ = 0x1c1bd000;
291 *p_table ++ = 0x00000000;
293 *p_table ++ = 0x00000000;
294 *p_table ++ = 0x00000000;
295 *p_table ++ = 0x0007cf80;
296 *p_table ++ = 0x00000000;
297 *p_table ++ = 0x00000000;
299 *p_table ++ = 0x1c080000;
300 *p_table ++ = 0x00000000;
301 *p_table ++ = 0x00000000;
302 *p_table ++ = 0x00000000;
306 void hsw_veb_iecp_ace_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
308 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 116);
310 if(!(proc_ctx->filters_mask & VPP_IECP_ACE)){
311 memset(p_table, 0, 13 * 4);
313 *p_table ++ = 0x00000068;
314 *p_table ++ = 0x4c382410;
315 *p_table ++ = 0x9c887460;
316 *p_table ++ = 0xebd8c4b0;
317 *p_table ++ = 0x604c3824;
319 *p_table ++ = 0xb09c8874;
320 *p_table ++ = 0x0000d8c4;
321 *p_table ++ = 0x00000000;
322 *p_table ++ = 0x00000000;
323 *p_table ++ = 0x00000000;
325 *p_table ++ = 0x00000000;
326 *p_table ++ = 0x00000000;
327 *p_table ++ = 0x00000000;
331 void hsw_veb_iecp_tcc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
333 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 168);
335 // VAProcFilterParameterBuffer * tcc_param =
336 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
338 if(!(proc_ctx->filters_mask & VPP_IECP_TCC)){
339 memset(p_table, 0, 11 * 4);
341 *p_table ++ = 0x00000000;
342 *p_table ++ = 0x00000000;
343 *p_table ++ = 0x1e34cc91;
344 *p_table ++ = 0x3e3cce91;
345 *p_table ++ = 0x02e80195;
347 *p_table ++ = 0x0197046b;
348 *p_table ++ = 0x01790174;
349 *p_table ++ = 0x00000000;
350 *p_table ++ = 0x00000000;
351 *p_table ++ = 0x03030000;
353 *p_table ++ = 0x009201c0;
357 void hsw_veb_iecp_pro_amp_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
359 unsigned int contrast = 0x80; //default
360 int brightness = 0x00; //default
361 int cos_c_s = 256 ; //default
362 int sin_c_s = 0; //default
363 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 212);
365 if(!(proc_ctx->filters_mask & VPP_IECP_PRO_AMP)){
366 memset(p_table, 0, 2 * 4);
368 float src_saturation = 1.0;
370 float src_contrast = 1.0;
371 float src_brightness = 0.0;
372 float tmp_value = 0.0;
375 VAProcFilterParameterBufferColorBalance * amp_params =
376 (VAProcFilterParameterBufferColorBalance *) proc_ctx->filter_iecp_amp;
378 for (i = 0; i < proc_ctx->filter_iecp_amp_num_elements; i++){
379 VAProcColorBalanceType attrib = amp_params[i].attrib;
381 if(attrib == VAProcColorBalanceHue) {
382 src_hue = amp_params[i].value; //(-180.0, 180.0)
383 }else if(attrib == VAProcColorBalanceSaturation) {
384 src_saturation = amp_params[i].value; //(0.0, 10.0)
385 }else if(attrib == VAProcColorBalanceBrightness) {
386 src_brightness = amp_params[i].value; // (-100.0, 100.0)
387 brightness = intel_format_convert(src_brightness, 7, 4, 1);
388 }else if(attrib == VAProcColorBalanceContrast) {
389 src_contrast = amp_params[i].value; // (0.0, 10.0)
390 contrast = intel_format_convert(src_contrast, 4, 7, 0);
394 tmp_value = cos(src_hue/180*PI) * src_contrast * src_saturation;
395 cos_c_s = intel_format_convert(tmp_value, 7, 8, 1);
397 tmp_value = sin(src_hue/180*PI) * src_contrast * src_saturation;
398 sin_c_s = intel_format_convert(tmp_value, 7, 8, 1);
400 *p_table ++ = ( 0 << 28 | //reserved
401 contrast << 17 | //contrast value (U4.7 format)
403 brightness << 1| // S7.4 format
406 *p_table ++ = ( cos_c_s << 16 | // cos(h) * contrast * saturation
407 sin_c_s); // sin(h) * contrast * saturation
413 void hsw_veb_iecp_csc_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
415 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 220);
416 float tran_coef[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
417 float v_coef[3] = {0.0, 0.0, 0.0};
418 float u_coef[3] = {0.0, 0.0, 0.0};
419 int is_transform_enabled = 0;
421 if(!(proc_ctx->filters_mask & VPP_IECP_CSC)){
422 memset(p_table, 0, 8 * 4);
426 VAProcColorStandardType in_color_std = proc_ctx->pipeline_param->surface_color_standard;
427 VAProcColorStandardType out_color_std = proc_ctx->pipeline_param->output_color_standard;
428 assert(in_color_std == out_color_std);
430 if(proc_ctx->fourcc_input == VA_FOURCC('R','G','B','A') &&
431 (proc_ctx->fourcc_output == VA_FOURCC('N','V','1','2') ||
432 proc_ctx->fourcc_output == VA_FOURCC('Y','V','1','2') ||
433 proc_ctx->fourcc_output == VA_FOURCC('Y','V','Y','2') ||
434 proc_ctx->fourcc_output == VA_FOURCC('A','Y','U','V'))) {
436 tran_coef[0] = 0.257;
437 tran_coef[1] = 0.504;
438 tran_coef[2] = 0.098;
439 tran_coef[3] = -0.148;
440 tran_coef[4] = -0.291;
441 tran_coef[5] = 0.439;
442 tran_coef[6] = 0.439;
443 tran_coef[7] = -0.368;
444 tran_coef[8] = -0.071;
450 is_transform_enabled = 1;
451 }else if((proc_ctx->fourcc_input == VA_FOURCC('N','V','1','2') ||
452 proc_ctx->fourcc_input == VA_FOURCC('Y','V','1','2') ||
453 proc_ctx->fourcc_input == VA_FOURCC('Y','U','Y','2') ||
454 proc_ctx->fourcc_input == VA_FOURCC('A','Y','U','V'))&&
455 proc_ctx->fourcc_output == VA_FOURCC('R','G','B','A')) {
457 tran_coef[0] = 1.164;
458 tran_coef[1] = 0.000;
459 tran_coef[2] = 1.569;
460 tran_coef[3] = 1.164;
461 tran_coef[4] = -0.813;
462 tran_coef[5] = -0.392;
463 tran_coef[6] = 1.164;
464 tran_coef[7] = 2.017;
465 tran_coef[8] = 0.000;
468 v_coef[1] = -128 * 4;
469 v_coef[2] = -128 * 4;
471 is_transform_enabled = 1;
472 }else if(proc_ctx->fourcc_input != proc_ctx->fourcc_output){
473 //enable when input and output format are different.
474 is_transform_enabled = 1;
477 if(is_transform_enabled == 0){
478 memset(p_table, 0, 8 * 4);
480 *p_table ++ = ( 0 << 29 | //reserved
481 intel_format_convert(tran_coef[1], 2, 10, 1) << 16 | //c1, s2.10 format
482 intel_format_convert(tran_coef[0], 2, 10, 1) << 3 | //c0, s2.10 format
484 0 << 1 | // yuv_channel swap
485 is_transform_enabled);
487 *p_table ++ = ( 0 << 26 | //reserved
488 intel_format_convert(tran_coef[3], 2, 10, 1) << 13 |
489 intel_format_convert(tran_coef[2], 2, 10, 1));
491 *p_table ++ = ( 0 << 26 | //reserved
492 intel_format_convert(tran_coef[5], 2, 10, 1) << 13 |
493 intel_format_convert(tran_coef[4], 2, 10, 1));
495 *p_table ++ = ( 0 << 26 | //reserved
496 intel_format_convert(tran_coef[7], 2, 10, 1) << 13 |
497 intel_format_convert(tran_coef[6], 2, 10, 1));
499 *p_table ++ = ( 0 << 13 | //reserved
500 intel_format_convert(tran_coef[8], 2, 10, 1));
502 *p_table ++ = ( 0 << 22 | //reserved
503 intel_format_convert(u_coef[0], 10, 0, 1) << 11 |
504 intel_format_convert(v_coef[0], 10, 0, 1));
506 *p_table ++ = ( 0 << 22 | //reserved
507 intel_format_convert(u_coef[1], 10, 0, 1) << 11 |
508 intel_format_convert(v_coef[1], 10, 0, 1));
510 *p_table ++ = ( 0 << 22 | //reserved
511 intel_format_convert(u_coef[2], 10, 0, 1) << 11 |
512 intel_format_convert(v_coef[2], 10, 0, 1));
516 void hsw_veb_iecp_aoi_table(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
518 unsigned int *p_table = (unsigned int*)(proc_ctx->iecp_state_table.ptr + 252);
519 // VAProcFilterParameterBuffer * tcc_param =
520 // (VAProcFilterParameterBuffer *) proc_ctx->filter_iecp_tcc;
522 if(!(proc_ctx->filters_mask & VPP_IECP_AOI)){
523 memset(p_table, 0, 3 * 4);
525 *p_table ++ = 0x00000000;
526 *p_table ++ = 0x00030000;
527 *p_table ++ = 0x00030000;
531 void hsw_veb_state_table_setup(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
533 if(proc_ctx->filters_mask & 0x000000ff) {
534 dri_bo *dndi_bo = proc_ctx->dndi_state_table.bo;
535 dri_bo_map(dndi_bo, 1);
536 proc_ctx->dndi_state_table.ptr = dndi_bo->virtual;
538 hsw_veb_dndi_table(ctx, proc_ctx);
540 dri_bo_unmap(dndi_bo);
543 if(proc_ctx->filters_mask & 0x0000ff00 ||
544 proc_ctx->fourcc_input != proc_ctx->fourcc_output) {
545 dri_bo *iecp_bo = proc_ctx->iecp_state_table.bo;
546 dri_bo_map(iecp_bo, 1);
547 proc_ctx->iecp_state_table.ptr = iecp_bo->virtual;
549 hsw_veb_iecp_std_table(ctx, proc_ctx);
550 hsw_veb_iecp_ace_table(ctx, proc_ctx);
551 hsw_veb_iecp_tcc_table(ctx, proc_ctx);
552 hsw_veb_iecp_pro_amp_table(ctx, proc_ctx);
553 hsw_veb_iecp_csc_table(ctx, proc_ctx);
554 hsw_veb_iecp_aoi_table(ctx, proc_ctx);
556 dri_bo_unmap(iecp_bo);
560 void hsw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
562 struct intel_batchbuffer *batch = proc_ctx->batch;
563 unsigned int is_dn_enabled = (proc_ctx->filters_mask & 0x01)? 1: 0;
564 unsigned int is_di_enabled = (proc_ctx->filters_mask & 0x02)? 1: 0;
565 unsigned int is_iecp_enabled = (proc_ctx->filters_mask & 0xff00)?1:0;
567 if(proc_ctx->fourcc_input != proc_ctx->fourcc_output ||
568 (is_dn_enabled == 0 && is_di_enabled == 0)){
572 BEGIN_VEB_BATCH(batch, 6);
573 OUT_VEB_BATCH(batch, VEB_STATE | (6 - 2));
575 0 << 26 | // state surface control bits
576 0 << 11 | // reserved.
577 0 << 10 | // pipe sync disable
578 2 << 8 | // DI output frame
579 0 << 7 | // 444->422 downsample method
580 0 << 6 | // 422->420 downsample method
581 !!(proc_ctx->is_first_frame && (is_di_enabled || is_dn_enabled)) << 5 | // DN/DI first frame
582 is_di_enabled << 4 | // DI enable
583 is_dn_enabled << 3 | // DN enable
584 is_iecp_enabled << 2 | // global IECP enabled
585 0 << 1 | // ColorGamutCompressionEnable
586 0 ) ; // ColorGamutExpansionEnable.
589 proc_ctx->dndi_state_table.bo,
590 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
593 proc_ctx->iecp_state_table.bo,
594 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
597 proc_ctx->gamut_state_table.bo,
598 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
601 proc_ctx->vertex_state_table.bo,
602 I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
604 ADVANCE_VEB_BATCH(batch);
607 void hsw_veb_surface_state(VADriverContextP ctx, struct intel_vebox_context *proc_ctx, unsigned int is_output)
609 struct i965_driver_data *i965 = i965_driver_data(ctx);
610 struct intel_batchbuffer *batch = proc_ctx->batch;
611 unsigned int u_offset_y = 0, v_offset_y = 0;
612 unsigned int is_uv_interleaved = 0, tiling = 0, swizzle = 0;
613 unsigned int surface_format = PLANAR_420_8;
614 struct object_surface* obj_surf = NULL;
615 unsigned int surface_pitch = 0;
616 unsigned int half_pitch_chroma = 0;
619 obj_surf = SURFACE(proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id);
621 obj_surf = SURFACE(proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id);
624 assert(obj_surf->fourcc == VA_FOURCC_NV12 ||
625 obj_surf->fourcc == VA_FOURCC_YUY2 ||
626 obj_surf->fourcc == VA_FOURCC_AYUV ||
627 obj_surf->fourcc == VA_FOURCC_RGBA);
629 if (obj_surf->fourcc == VA_FOURCC_NV12) {
630 surface_format = PLANAR_420_8;
631 surface_pitch = obj_surf->width;
632 is_uv_interleaved = 1;
633 half_pitch_chroma = 0;
634 } else if (obj_surf->fourcc == VA_FOURCC_YUY2) {
635 surface_format = YCRCB_NORMAL;
636 surface_pitch = obj_surf->width * 2;
637 is_uv_interleaved = 0;
638 half_pitch_chroma = 0;
639 } else if (obj_surf->fourcc == VA_FOURCC_AYUV) {
640 surface_format = PACKED_444A_8;
641 surface_pitch = obj_surf->width * 4;
642 is_uv_interleaved = 0;
643 half_pitch_chroma = 0;
644 } else if (obj_surf->fourcc == VA_FOURCC_RGBA) {
645 surface_format = R8G8B8A8_UNORM_SRGB;
646 surface_pitch = obj_surf->width * 4;
647 is_uv_interleaved = 0;
648 half_pitch_chroma = 0;
651 u_offset_y = obj_surf->y_cb_offset;
652 v_offset_y = obj_surf->y_cr_offset;
654 dri_bo_get_tiling(obj_surf->bo, &tiling, &swizzle);
656 BEGIN_VEB_BATCH(batch, 6);
657 OUT_VEB_BATCH(batch, VEB_SURFACE_STATE | (6 - 2));
660 is_output); // surface indentification.
663 (proc_ctx->height_input - 1) << 18 | // height . w3
664 (proc_ctx->width_input) << 4 | // width
668 surface_format << 28 | // surface format, YCbCr420. w4
669 is_uv_interleaved << 27 | // interleave chrome , two seperate palar
670 0 << 20 | // reserved
671 (surface_pitch - 1) << 3 | // surface pitch, 64 align
672 half_pitch_chroma << 2 | // half pitch for chrome
673 !!tiling << 1 | // tiled surface, linear surface used
674 (tiling == I915_TILING_Y)); // tiled walk, ignored when liner surface
677 0 << 29 | // reserved . w5
678 0 << 16 | // X offset for V(Cb)
679 0 << 15 | // reserved
680 u_offset_y); // Y offset for V(Cb)
683 0 << 29 | // reserved . w6
684 0 << 16 | // X offset for V(Cr)
685 0 << 15 | // reserved
686 v_offset_y ); // Y offset for V(Cr)
688 ADVANCE_VEB_BATCH(batch);
691 void hsw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context *proc_ctx)
693 struct intel_batchbuffer *batch = proc_ctx->batch;
694 unsigned char frame_ctrl_bits = 0;
695 unsigned int startingX = 0;
696 unsigned int endingX = proc_ctx->width_input;
698 /* s1:update the previous and current input */
699 /* tempFrame = proc_ctx->frame_store[FRAME_IN_PREVIOUS];
700 proc_ctx->frame_store[FRAME_IN_PREVIOUS] = proc_ctx->frame_store[FRAME_IN_CURRENT]; ;
701 proc_ctx->frame_store[FRAME_IN_CURRENT] = tempFrame;
703 if(proc_ctx->surface_input_vebox != -1){
704 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
705 proc_ctx->surface_input_vebox);
707 vpp_surface_copy(ctx, proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id,
708 proc_ctx->surface_input);
711 /*s2: update the STMM input and output */
712 /* tempFrame = proc_ctx->frame_store[FRAME_IN_STMM];
713 proc_ctx->frame_store[FRAME_IN_STMM] = proc_ctx->frame_store[FRAME_OUT_STMM]; ;
714 proc_ctx->frame_store[FRAME_OUT_STMM] = tempFrame;
716 /*s3:set reloc buffer address */
717 BEGIN_VEB_BATCH(batch, 10);
718 OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (10 - 2));
723 proc_ctx->frame_store[FRAME_IN_CURRENT].bo,
724 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
726 proc_ctx->frame_store[FRAME_IN_PREVIOUS].bo,
727 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
729 proc_ctx->frame_store[FRAME_IN_STMM].bo,
730 I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);
732 proc_ctx->frame_store[FRAME_OUT_STMM].bo,
733 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
735 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].bo,
736 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
738 proc_ctx->frame_store[FRAME_OUT_CURRENT].bo,
739 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
741 proc_ctx->frame_store[FRAME_OUT_PREVIOUS].bo,
742 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
744 proc_ctx->frame_store[FRAME_OUT_STATISTIC].bo,
745 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);
747 ADVANCE_VEB_BATCH(batch);
750 void hsw_veb_resource_prepare(VADriverContextP ctx,
751 struct intel_vebox_context *proc_ctx)
755 struct i965_driver_data *i965 = i965_driver_data(ctx);
756 unsigned int input_fourcc, output_fourcc;
757 unsigned int input_sampling, output_sampling;
758 unsigned int input_tiling, output_tiling;
759 VAGenericID vebox_in_id, vebox_out_id;
760 unsigned int i, swizzle;
762 if(proc_ctx->surface_input_vebox != -1){
763 vebox_in_id = proc_ctx->surface_input_vebox;
765 vebox_in_id = proc_ctx->surface_input;
768 if(proc_ctx->surface_output_vebox != -1){
769 vebox_out_id = proc_ctx->surface_output_vebox;
771 vebox_out_id = proc_ctx->surface_output;
774 struct object_surface* obj_surf_in = SURFACE(vebox_in_id);
775 struct object_surface* obj_surf_out = SURFACE(vebox_out_id);
777 if(obj_surf_in->bo == NULL){
778 input_fourcc = VA_FOURCC('N','V','1','2');
779 input_sampling = SUBSAMPLE_YUV420;
781 i965_check_alloc_surface_bo(ctx, obj_surf_in, input_tiling, input_fourcc, input_sampling);
783 input_fourcc = obj_surf_in->fourcc;
784 input_sampling = obj_surf_in->subsampling;
785 dri_bo_get_tiling(obj_surf_in->bo, &input_tiling, &swizzle);
786 input_tiling = !!input_tiling;
789 if(obj_surf_out->bo == NULL){
790 output_fourcc = VA_FOURCC('N','V','1','2');
791 output_sampling = SUBSAMPLE_YUV420;
793 i965_check_alloc_surface_bo(ctx, obj_surf_out, output_tiling, output_fourcc, output_sampling);
795 output_fourcc = obj_surf_out->fourcc;
796 output_sampling = obj_surf_out->subsampling;
797 dri_bo_get_tiling(obj_surf_out->bo, &output_tiling, &swizzle);
798 output_tiling = !!output_tiling;
801 /* vebox pipelien input surface format info */
802 proc_ctx->fourcc_input = input_fourcc;
803 proc_ctx->fourcc_output = output_fourcc;
805 /* create pipeline surfaces */
806 VASurfaceID surfaces[FRAME_STORE_SUM];
807 va_status = i965_CreateSurfaces(ctx,
808 proc_ctx ->width_input,
809 proc_ctx ->height_input,
813 assert(va_status == VA_STATUS_SUCCESS);
815 for(i = 0; i < FRAME_STORE_SUM; i ++) {
816 if(proc_ctx->frame_store[i].bo){
817 continue; //refer external surface for vebox pipeline
820 VASurfaceID new_surface;
821 va_status = i965_CreateSurfaces(ctx,
822 proc_ctx ->width_input,
823 proc_ctx ->height_input,
827 assert(va_status == VA_STATUS_SUCCESS);
829 proc_ctx->frame_store[i].surface_id = new_surface;
830 struct object_surface* obj_surf = SURFACE(new_surface);
832 if( i <= FRAME_IN_PREVIOUS || i == FRAME_OUT_CURRENT_DN) {
833 i965_check_alloc_surface_bo(ctx, obj_surf, input_tiling, input_fourcc, input_sampling);
834 } else if( i == FRAME_IN_STMM || i == FRAME_OUT_STMM){
835 i965_check_alloc_surface_bo(ctx, obj_surf, 1, input_fourcc, input_sampling);
836 } else if( i >= FRAME_OUT_CURRENT){
837 i965_check_alloc_surface_bo(ctx, obj_surf, output_tiling, output_fourcc, output_sampling);
839 proc_ctx->frame_store[i].bo = obj_surf->bo;
840 dri_bo_reference(proc_ctx->frame_store[i].bo);
841 proc_ctx->frame_store[i].is_internal_surface = 1;
844 /* alloc dndi state table */
845 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
846 bo = dri_bo_alloc(i965->intel.bufmgr,
847 "vebox: dndi state Buffer",
849 proc_ctx->dndi_state_table.bo = bo;
850 dri_bo_reference(proc_ctx->dndi_state_table.bo);
852 /* alloc iecp state table */
853 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
854 bo = dri_bo_alloc(i965->intel.bufmgr,
855 "vebox: iecp state Buffer",
857 proc_ctx->iecp_state_table.bo = bo;
858 dri_bo_reference(proc_ctx->iecp_state_table.bo);
860 /* alloc gamut state table */
861 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
862 bo = dri_bo_alloc(i965->intel.bufmgr,
863 "vebox: gamut state Buffer",
865 proc_ctx->gamut_state_table.bo = bo;
866 dri_bo_reference(proc_ctx->gamut_state_table.bo);
868 /* alloc vertex state table */
869 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
870 bo = dri_bo_alloc(i965->intel.bufmgr,
871 "vertex: iecp state Buffer",
873 proc_ctx->vertex_state_table.bo = bo;
874 dri_bo_reference(proc_ctx->vertex_state_table.bo);
878 void hsw_veb_surface_reference(VADriverContextP ctx,
879 struct intel_vebox_context *proc_ctx)
881 struct object_surface * obj_surf;
882 struct i965_driver_data *i965 = i965_driver_data(ctx);
883 VAGenericID vebox_in_id, vebox_out_id;
885 if(proc_ctx->surface_input_vebox != -1){
886 vebox_in_id = proc_ctx->surface_input_vebox;
888 vebox_in_id = proc_ctx->surface_input;
891 if(proc_ctx->surface_output_vebox != -1){
892 vebox_out_id = proc_ctx->surface_output_vebox;
894 vebox_out_id = proc_ctx->surface_output;
897 /* update the input surface */
898 obj_surf = SURFACE(vebox_in_id);
899 dri_bo_unreference(proc_ctx->frame_store[FRAME_IN_CURRENT].bo);
900 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = vebox_in_id;
901 proc_ctx->frame_store[FRAME_IN_CURRENT].bo = obj_surf->bo;
902 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
903 dri_bo_reference(proc_ctx->frame_store[FRAME_IN_CURRENT].bo);
905 /* update the output surface */
906 obj_surf = SURFACE(vebox_out_id);
907 if(proc_ctx->filters_mask == VPP_DNDI_DN){
908 dri_bo_unreference(proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].bo);
909 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = vebox_out_id;
910 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].bo = obj_surf->bo;
911 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
912 dri_bo_reference(proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].bo);
914 dri_bo_unreference(proc_ctx->frame_store[FRAME_OUT_CURRENT].bo);
915 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = vebox_out_id;
916 proc_ctx->frame_store[FRAME_OUT_CURRENT].bo = obj_surf->bo;
917 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
918 dri_bo_reference(proc_ctx->frame_store[FRAME_OUT_CURRENT].bo);
922 void hsw_veb_surface_unreference(VADriverContextP ctx,
923 struct intel_vebox_context *proc_ctx)
925 /* unreference the input surface */
926 dri_bo_unreference(proc_ctx->frame_store[FRAME_IN_CURRENT].bo);
927 proc_ctx->frame_store[FRAME_IN_CURRENT].surface_id = -1;
928 proc_ctx->frame_store[FRAME_IN_CURRENT].bo = NULL;
929 proc_ctx->frame_store[FRAME_IN_CURRENT].is_internal_surface = 0;
931 /* unreference the shared output surface */
932 if(proc_ctx->filters_mask == VPP_DNDI_DN){
933 dri_bo_unreference(proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].bo);
934 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id = -1;
935 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].bo = NULL;
936 proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].is_internal_surface = 0;
938 dri_bo_unreference(proc_ctx->frame_store[FRAME_OUT_CURRENT].bo);
939 proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id = -1;
940 proc_ctx->frame_store[FRAME_OUT_CURRENT].bo = NULL;
941 proc_ctx->frame_store[FRAME_OUT_CURRENT].is_internal_surface = 0;
945 int hsw_veb_pre_format_convert(VADriverContextP ctx,
946 struct intel_vebox_context *proc_ctx)
949 struct i965_driver_data *i965 = i965_driver_data(ctx);
950 struct object_surface* obj_surf_input = SURFACE(proc_ctx->surface_input);
951 struct object_surface* obj_surf_output = SURFACE(proc_ctx->surface_output);
952 struct object_surface* obj_surf_input_vebox;
953 struct object_surface* obj_surf_output_vebox;
955 proc_ctx->width_input = obj_surf_input->orig_width;
956 proc_ctx->height_input = obj_surf_input->orig_height;
957 proc_ctx->width_output = obj_surf_output->orig_width;
958 proc_ctx->height_output = obj_surf_output->orig_height;
960 /* only partial frame is not supported to be processed */
962 assert(proc_ctx->width_input == proc_ctx->pipeline_param->surface_region->width);
963 assert(proc_ctx->height_input == proc_ctx->pipeline_param->surface_region->height);
964 assert(proc_ctx->width_output == proc_ctx->pipeline_param->output_region->width);
965 assert(proc_ctx->height_output == proc_ctx->pipeline_param->output_region->height);
968 if(proc_ctx->width_output != proc_ctx->width_input ||
969 proc_ctx->height_output != proc_ctx->height_input){
970 proc_ctx->format_convert_flags |= POST_SCALING_CONVERT;
973 /* convert the following format to NV12 format */
974 if(obj_surf_input->fourcc == VA_FOURCC('Y','V','1','2') ||
975 obj_surf_input->fourcc == VA_FOURCC('I','4','2','0') ||
976 obj_surf_input->fourcc == VA_FOURCC('I','M','C','1') ||
977 obj_surf_input->fourcc == VA_FOURCC('I','M','C','3')){
979 proc_ctx->format_convert_flags |= PRE_FORMAT_CONVERT;
981 } else if(obj_surf_input->fourcc == VA_FOURCC('R','G','B','A') ||
982 obj_surf_input->fourcc == VA_FOURCC('A','Y','U','V') ||
983 obj_surf_input->fourcc == VA_FOURCC('Y','U','Y','2') ||
984 obj_surf_input->fourcc == VA_FOURCC('N','V','1','2')){
985 // nothing to do here
987 /* not support other format as input */
991 if(proc_ctx->format_convert_flags & PRE_FORMAT_CONVERT){
992 if(proc_ctx->surface_input_vebox == -1){
993 va_status = i965_CreateSurfaces(ctx,
994 proc_ctx->width_input,
995 proc_ctx->height_input,
998 &(proc_ctx->surface_input_vebox));
999 assert(va_status == VA_STATUS_SUCCESS);
1000 obj_surf_input_vebox = SURFACE(proc_ctx->surface_input_vebox);
1001 i965_check_alloc_surface_bo(ctx, obj_surf_input_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1004 vpp_surface_convert(ctx, proc_ctx->surface_input_vebox, proc_ctx->surface_input);
1007 /* create one temporary NV12 surfaces for conversion*/
1008 if(obj_surf_output->fourcc == VA_FOURCC('Y','V','1','2') ||
1009 obj_surf_output->fourcc == VA_FOURCC('I','4','2','0') ||
1010 obj_surf_output->fourcc == VA_FOURCC('I','M','C','1') ||
1011 obj_surf_output->fourcc == VA_FOURCC('I','M','C','3')) {
1013 proc_ctx->format_convert_flags |= POST_FORMAT_CONVERT;
1014 } else if(obj_surf_output->fourcc == VA_FOURCC('R','G','B','A') ||
1015 obj_surf_output->fourcc == VA_FOURCC('A','Y','U','V') ||
1016 obj_surf_output->fourcc == VA_FOURCC('Y','U','Y','2') ||
1017 obj_surf_output->fourcc == VA_FOURCC('N','V','1','2')){
1018 /* Nothing to do here */
1020 /* not support other format as input */
1024 if(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT ||
1025 proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1026 if(proc_ctx->surface_output_vebox == -1){
1027 va_status = i965_CreateSurfaces(ctx,
1028 proc_ctx->width_input,
1029 proc_ctx->height_input,
1030 VA_RT_FORMAT_YUV420,
1032 &(proc_ctx->surface_output_vebox));
1033 assert(va_status == VA_STATUS_SUCCESS);
1034 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_vebox);
1035 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1039 if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT){
1040 if(proc_ctx->surface_output_scaled == -1){
1041 va_status = i965_CreateSurfaces(ctx,
1042 proc_ctx->width_output,
1043 proc_ctx->height_output,
1044 VA_RT_FORMAT_YUV420,
1046 &(proc_ctx->surface_output_scaled));
1047 assert(va_status == VA_STATUS_SUCCESS);
1048 obj_surf_output_vebox = SURFACE(proc_ctx->surface_output_scaled);
1049 i965_check_alloc_surface_bo(ctx, obj_surf_output_vebox, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
1056 int hsw_veb_post_format_convert(VADriverContextP ctx,
1057 struct intel_vebox_context *proc_ctx)
1059 struct i965_driver_data *i965 = i965_driver_data(ctx);
1060 VASurfaceID surf_id_pipe_out = 0;
1062 if(proc_ctx->filters_mask == VPP_DNDI_DN){
1063 surf_id_pipe_out = proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].surface_id;
1065 surf_id_pipe_out = proc_ctx->frame_store[FRAME_OUT_CURRENT].surface_id;
1068 if(!(proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1069 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1070 /* Output surface format is covered by vebox pipeline and
1071 * processed picture is already store in output surface
1072 * so nothing will be done here */
1073 } else if ((proc_ctx->format_convert_flags & POST_FORMAT_CONVERT) &&
1074 !(proc_ctx->format_convert_flags & POST_SCALING_CONVERT)){
1075 /* convert and copy NV12 to YV12/IMC3/IMC2 output*/
1076 vpp_surface_convert(ctx,proc_ctx->surface_output, surf_id_pipe_out);
1078 } else if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT) {
1079 /* scaling, convert and copy NV12 to YV12/IMC3/IMC2/ output*/
1080 assert((SURFACE(surf_id_pipe_out))->fourcc == VA_FOURCC('N','V','1','2'));
1082 /* first step :surface scaling */
1083 vpp_surface_scaling(ctx,proc_ctx->surface_output_scaled, surf_id_pipe_out);
1085 /* second step: color format convert and copy to output */
1086 struct object_surface *obj_surf = SURFACE(proc_ctx->surface_output);
1087 if(obj_surf->fourcc == VA_FOURCC('N','V','1','2') ||
1088 obj_surf->fourcc == VA_FOURCC('Y','V','1','2') ||
1089 obj_surf->fourcc == VA_FOURCC('I','4','2','0') ||
1090 obj_surf->fourcc == VA_FOURCC('Y','U','Y','2') ||
1091 obj_surf->fourcc == VA_FOURCC('I','M','C','1') ||
1092 obj_surf->fourcc == VA_FOURCC('I','M','C','3')) {
1093 vpp_surface_convert(ctx,proc_ctx->surface_output, proc_ctx->surface_output_scaled);
1102 VAStatus gen75_vebox_process_picture(VADriverContextP ctx,
1103 struct intel_vebox_context *proc_ctx)
1105 struct i965_driver_data *i965 = i965_driver_data(ctx);
1107 VAProcPipelineParameterBuffer *pipe = proc_ctx->pipeline_param;
1108 VAProcFilterParameterBuffer* filter = NULL;
1109 struct object_buffer *obj_buf = NULL;
1112 for (i = 0; i < pipe->num_filters; i ++) {
1113 obj_buf = BUFFER(pipe->filters[i]);
1114 filter = (VAProcFilterParameterBuffer*)obj_buf-> buffer_store->buffer;
1116 if (filter->type == VAProcFilterNoiseReduction) {
1117 proc_ctx->filters_mask |= VPP_DNDI_DN;
1118 proc_ctx->filter_dn = filter;
1119 } else if (filter->type == VAProcFilterDeinterlacing) {
1120 proc_ctx->filters_mask |= VPP_DNDI_DI;
1121 proc_ctx->filter_di = filter;
1122 } else if (filter->type == VAProcFilterColorBalance) {
1123 proc_ctx->filters_mask |= VPP_IECP_PRO_AMP;
1124 proc_ctx->filter_iecp_amp = filter;
1125 proc_ctx->filter_iecp_amp_num_elements = obj_buf->num_elements;
1126 } else if (filter->type == VAProcFilterColorStandard){
1127 proc_ctx->filters_mask |= VPP_IECP_CSC;
1128 proc_ctx->filter_iecp_csc = filter;
1132 hsw_veb_pre_format_convert(ctx, proc_ctx);
1133 hsw_veb_surface_reference(ctx, proc_ctx);
1135 if(proc_ctx->is_first_frame){
1136 hsw_veb_resource_prepare(ctx, proc_ctx);
1139 intel_batchbuffer_start_atomic_veb(proc_ctx->batch, 0x1000);
1140 intel_batchbuffer_emit_mi_flush(proc_ctx->batch);
1141 hsw_veb_surface_state(ctx, proc_ctx, INPUT_SURFACE);
1142 hsw_veb_surface_state(ctx, proc_ctx, OUTPUT_SURFACE);
1143 hsw_veb_state_table_setup(ctx, proc_ctx);
1145 hsw_veb_state_command(ctx, proc_ctx);
1146 hsw_veb_dndi_iecp_command(ctx, proc_ctx);
1147 intel_batchbuffer_end_atomic(proc_ctx->batch);
1148 intel_batchbuffer_flush(proc_ctx->batch);
1150 hsw_veb_post_format_convert(ctx, proc_ctx);
1151 hsw_veb_surface_unreference(ctx, proc_ctx);
1154 if(proc_ctx->is_first_frame)
1155 proc_ctx->is_first_frame = 0;
1157 return VA_STATUS_SUCCESS;
1160 void gen75_vebox_context_destroy(VADriverContextP ctx,
1161 struct intel_vebox_context *proc_ctx)
1165 if(proc_ctx->surface_input_vebox != -1){
1166 i965_DestroySurfaces(ctx, &proc_ctx->surface_input_vebox, 1);
1167 proc_ctx->surface_input_vebox = -1;
1170 if(proc_ctx->surface_output_vebox != -1){
1171 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_vebox, 1);
1172 proc_ctx->surface_output_vebox = -1;
1175 if(proc_ctx->surface_output_scaled != -1){
1176 i965_DestroySurfaces(ctx, &proc_ctx->surface_output_scaled, 1);
1177 proc_ctx->surface_output_scaled = -1;
1180 for(i = 0; i < FRAME_STORE_SUM; i ++) {
1181 if(proc_ctx->frame_store[i].bo){
1182 dri_bo_unreference(proc_ctx->frame_store[i].bo);
1183 i965_DestroySurfaces(ctx, &proc_ctx->frame_store[i].surface_id, 1);
1186 proc_ctx->frame_store[i].surface_id = -1;
1187 proc_ctx->frame_store[i].bo = NULL;
1188 proc_ctx->frame_store[i].valid = 0;
1191 /* dndi state table */
1192 dri_bo_unreference(proc_ctx->dndi_state_table.bo);
1193 proc_ctx->dndi_state_table.bo = NULL;
1195 /* iecp state table */
1196 dri_bo_unreference(proc_ctx->iecp_state_table.bo);
1197 proc_ctx->dndi_state_table.bo = NULL;
1199 /* gamut statu table */
1200 dri_bo_unreference(proc_ctx->gamut_state_table.bo);
1201 proc_ctx->gamut_state_table.bo = NULL;
1203 /* vertex state table */
1204 dri_bo_unreference(proc_ctx->vertex_state_table.bo);
1205 proc_ctx->vertex_state_table.bo = NULL;
1207 intel_batchbuffer_free(proc_ctx->batch);
1212 struct intel_vebox_context * gen75_vebox_context_init(VADriverContextP ctx)
1214 struct intel_driver_data *intel = intel_driver_data(ctx);
1215 struct intel_vebox_context *proc_context = calloc(1, sizeof(struct intel_vebox_context));
1217 proc_context->batch = intel_batchbuffer_new(intel, I915_EXEC_VEBOX, 0);
1218 memset(proc_context->frame_store, 0, sizeof(VEBFrameStore)*FRAME_STORE_SUM);
1220 proc_context->filters_mask = 0;
1221 proc_context->is_first_frame = 1;
1222 proc_context->surface_input_vebox = -1;
1223 proc_context->surface_output_vebox = -1;
1224 proc_context->surface_output_scaled = -1;
1225 proc_context->filters_mask = 0;
1226 proc_context->format_convert_flags = 0;
1228 return proc_context;