OSDN Git Service

render: clear background using 3D pipeline on GEN8+
[android-x86/hardware-intel-common-vaapi.git] / src / gen8_post_processing.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Xiang Haihao <haihao.xiang@intel.com>
26  *    Zhao Yakui <yakui.zhao@intel.com>
27  *
28  */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <assert.h>
34
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "i965_yuv_coefs.h"
43 #include "intel_media.h"
44
45 #include "gen75_picture_process.h"
46 #include "intel_common_vpp_internal.h"
47
48 #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
49
50 #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
51 #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
52
53 #define GPU_ASM_BLOCK_WIDTH         16
54 #define GPU_ASM_BLOCK_HEIGHT        8
55 #define GPU_ASM_X_OFFSET_ALIGNMENT  4
56
57 #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
58
59 VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
60                             const struct i965_surface *src_surface,
61                             const VARectangle *src_rect,
62                             struct i965_surface *dst_surface,
63                             const VARectangle *dst_rect,
64                             void *filter_param);
65
66 VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
67                                     const struct i965_surface *src_surface,
68                                     const VARectangle *src_rect,
69                                     struct i965_surface *dst_surface,
70                                     const VARectangle *dst_rect,
71                                     void *filter_param);
72
73 /* TODO: Modify the shader and then compile it again.
74  * Currently it is derived from Haswell*/
75 static const uint32_t pp_null_gen8[][4] = {
76 };
77
78 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
79 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
80 };
81
82 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
83 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
84 };
85
86 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
87 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
88 };
89
90 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
91 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
92 };
93
94 static const uint32_t pp_nv12_scaling_gen8[][4] = {
95 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
96 };
97
98 static const uint32_t pp_nv12_avs_gen8[][4] = {
99 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
100 };
101
102 static const uint32_t pp_nv12_dndi_gen8[][4] = {
103 // #include "shaders/post_processing/gen7/dndi.g75b"
104 };
105
106 static const uint32_t pp_nv12_dn_gen8[][4] = {
107 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
108 };
109 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
110 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
111 };
112 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
113 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
114 };
115 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
116 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
117 };
118 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
119 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
120 };
121 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
122 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
123 };
124 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
125 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
126 };
127 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
128 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
129 };
130
131 static struct pp_module pp_modules_gen8[] = {
132     {
133         {
134             "NULL module (for testing)",
135             PP_NULL,
136             pp_null_gen8,
137             sizeof(pp_null_gen8),
138             NULL,
139         },
140
141         pp_null_initialize,
142     },
143
144     {
145         {
146             "NV12_NV12",
147             PP_NV12_LOAD_SAVE_N12,
148             pp_nv12_load_save_nv12_gen8,
149             sizeof(pp_nv12_load_save_nv12_gen8),
150             NULL,
151         },
152
153         gen8_pp_plx_avs_initialize,
154     },
155
156     {
157         {
158             "NV12_PL3",
159             PP_NV12_LOAD_SAVE_PL3,
160             pp_nv12_load_save_pl3_gen8,
161             sizeof(pp_nv12_load_save_pl3_gen8),
162             NULL,
163         },
164         gen8_pp_plx_avs_initialize,
165     },
166
167     {
168         {
169             "PL3_NV12",
170             PP_PL3_LOAD_SAVE_N12,
171             pp_pl3_load_save_nv12_gen8,
172             sizeof(pp_pl3_load_save_nv12_gen8),
173             NULL,
174         },
175
176         gen8_pp_plx_avs_initialize,
177     },
178
179     {
180         {
181             "PL3_PL3",
182             PP_PL3_LOAD_SAVE_PL3,
183             pp_pl3_load_save_pl3_gen8,
184             sizeof(pp_pl3_load_save_pl3_gen8),
185             NULL,
186         },
187
188         gen8_pp_plx_avs_initialize,
189     },
190
191     {
192         {
193             "NV12 Scaling module",
194             PP_NV12_SCALING,
195             pp_nv12_scaling_gen8,
196             sizeof(pp_nv12_scaling_gen8),
197             NULL,
198         },
199
200         gen8_pp_plx_avs_initialize,
201     },
202
203     {
204         {
205             "NV12 AVS module",
206             PP_NV12_AVS,
207             pp_nv12_avs_gen8,
208             sizeof(pp_nv12_avs_gen8),
209             NULL,
210         },
211
212         gen8_pp_plx_avs_initialize,
213     },
214
215     {
216         {
217             "NV12 DNDI module",
218             PP_NV12_DNDI,
219             pp_nv12_dndi_gen8,
220             sizeof(pp_nv12_dndi_gen8),
221             NULL,
222         },
223
224         pp_null_initialize,
225     },
226
227     {
228         {
229             "NV12 DN module",
230             PP_NV12_DN,
231             pp_nv12_dn_gen8,
232             sizeof(pp_nv12_dn_gen8),
233             NULL,
234         },
235
236         pp_null_initialize,
237     },
238     {
239         {
240             "NV12_PA module",
241             PP_NV12_LOAD_SAVE_PA,
242             pp_nv12_load_save_pa_gen8,
243             sizeof(pp_nv12_load_save_pa_gen8),
244             NULL,
245         },
246
247         gen8_pp_plx_avs_initialize,
248     },
249
250     {
251         {
252             "PL3_PA module",
253             PP_PL3_LOAD_SAVE_PA,
254             pp_pl3_load_save_pa_gen8,
255             sizeof(pp_pl3_load_save_pa_gen8),
256             NULL,
257         },
258
259         gen8_pp_plx_avs_initialize,
260     },
261
262     {
263         {
264             "PA_NV12 module",
265             PP_PA_LOAD_SAVE_NV12,
266             pp_pa_load_save_nv12_gen8,
267             sizeof(pp_pa_load_save_nv12_gen8),
268             NULL,
269         },
270
271         gen8_pp_plx_avs_initialize,
272     },
273
274     {
275         {
276             "PA_PL3 module",
277             PP_PA_LOAD_SAVE_PL3,
278             pp_pa_load_save_pl3_gen8,
279             sizeof(pp_pa_load_save_pl3_gen8),
280             NULL,
281         },
282
283         gen8_pp_plx_avs_initialize,
284     },
285
286     {
287         {
288             "PA_PA module",
289             PP_PA_LOAD_SAVE_PA,
290             pp_pa_load_save_pa_gen8,
291             sizeof(pp_pa_load_save_pa_gen8),
292             NULL,
293         },
294
295         gen8_pp_plx_avs_initialize,
296     },
297
298     {
299         {
300             "RGBX_NV12 module",
301             PP_RGBX_LOAD_SAVE_NV12,
302             pp_rgbx_load_save_nv12_gen8,
303             sizeof(pp_rgbx_load_save_nv12_gen8),
304             NULL,
305         },
306
307         gen8_pp_plx_avs_initialize,
308     },
309
310     {
311         {
312             "NV12_RGBX module",
313             PP_NV12_LOAD_SAVE_RGBX,
314             pp_nv12_load_save_rgbx_gen8,
315             sizeof(pp_nv12_load_save_rgbx_gen8),
316             NULL,
317         },
318
319         gen8_pp_plx_avs_initialize,
320     },
321 };
322
323 #define MAX_SCALING_SURFACES    16
324
325 #define DEFAULT_MOCS    0
326
327 static const uint32_t pp_yuv420p8_scaling_gen8[][4] = {
328 #include "shaders/post_processing/gen8/conv_nv12.g8b"
329 };
330
331 static const uint32_t pp_8bit_420_rgb32_scaling_gen8[][4] = {
332 #include "shaders/post_processing/gen8/conv_8bit_420_rgb32.g8b"
333 };
334
335 struct i965_kernel pp_common_scaling_gen8[] = {
336     {
337         "8bit to 8bit",
338         0,
339         pp_yuv420p8_scaling_gen8,
340         sizeof(pp_yuv420p8_scaling_gen8),
341         NULL,
342     },
343
344     {
345         "8bit 420 to rgb32",
346         1,
347         pp_8bit_420_rgb32_scaling_gen8,
348         sizeof(pp_8bit_420_rgb32_scaling_gen8),
349         NULL,
350     },
351 };
352
353 static void
354 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
355 {
356     switch (tiling) {
357     case I915_TILING_NONE:
358         ss->ss0.tiled_surface = 0;
359         ss->ss0.tile_walk = 0;
360         break;
361     case I915_TILING_X:
362         ss->ss0.tiled_surface = 1;
363         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
364         break;
365     case I915_TILING_Y:
366         ss->ss0.tiled_surface = 1;
367         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
368         break;
369     }
370 }
371
372 static void
373 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
374 {
375     switch (tiling) {
376     case I915_TILING_NONE:
377         ss->ss2.tiled_surface = 0;
378         ss->ss2.tile_walk = 0;
379         break;
380     case I915_TILING_X:
381         ss->ss2.tiled_surface = 1;
382         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
383         break;
384     case I915_TILING_Y:
385         ss->ss2.tiled_surface = 1;
386         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
387         break;
388     }
389 }
390
391
392 static void
393 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
394                           dri_bo *surf_bo, unsigned long surf_bo_offset,
395                           int width, int height, int pitch, int format,
396                           int index, int is_target)
397 {
398     struct i965_driver_data *i965 = i965_driver_data(ctx);
399     struct gen8_surface_state *ss;
400     dri_bo *ss_bo;
401     unsigned int tiling;
402     unsigned int swizzle;
403
404     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
405     ss_bo = pp_context->surface_state_binding_table.bo;
406     assert(ss_bo);
407
408     dri_bo_map(ss_bo, True);
409     assert(ss_bo->virtual);
410     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
411     memset(ss, 0, sizeof(*ss));
412
413     if (IS_GEN9(i965->intel.device_info) ||
414         IS_GEN10(i965->intel.device_info))
415         ss->ss1.surface_mocs = GEN9_CACHE_PTE;
416
417     ss->ss0.surface_type = I965_SURFACE_2D;
418     ss->ss0.surface_format = format;
419     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
420     ss->ss2.width = width - 1;
421     ss->ss2.height = height - 1;
422     ss->ss3.pitch = pitch - 1;
423
424     /* Always set 1(align 4 mode) per B-spec */
425     ss->ss0.vertical_alignment = 1;
426     ss->ss0.horizontal_alignment = 1;
427
428     gen8_pp_set_surface_tiling(ss, tiling);
429     gen8_render_set_surface_scs(ss);
430     dri_bo_emit_reloc(ss_bo,
431                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
432                       surf_bo_offset,
433                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
434                       surf_bo);
435     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
436     dri_bo_unmap(ss_bo);
437 }
438
439
440 static void
441 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
442                            dri_bo *surf_bo, unsigned long surf_bo_offset,
443                            int width, int height, int wpitch,
444                            int xoffset, int yoffset,
445                            int format, int interleave_chroma,
446                            int index)
447 {
448     struct i965_driver_data *i965 = i965_driver_data(ctx);
449     struct gen8_surface_state2 *ss2;
450     dri_bo *ss2_bo;
451     unsigned int tiling;
452     unsigned int swizzle;
453
454     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
455     ss2_bo = pp_context->surface_state_binding_table.bo;
456     assert(ss2_bo);
457
458     dri_bo_map(ss2_bo, True);
459     assert(ss2_bo->virtual);
460     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
461     memset(ss2, 0, sizeof(*ss2));
462
463     if (IS_GEN9(i965->intel.device_info) ||
464         IS_GEN10(i965->intel.device_info))
465         ss2->ss5.surface_object_mocs = GEN9_CACHE_PTE;
466
467     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
468     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
469     ss2->ss1.width = width - 1;
470     ss2->ss1.height = height - 1;
471     ss2->ss2.pitch = wpitch - 1;
472     ss2->ss2.interleave_chroma = interleave_chroma;
473     ss2->ss2.surface_format = format;
474     ss2->ss3.x_offset_for_cb = xoffset;
475     ss2->ss3.y_offset_for_cb = yoffset;
476     gen8_pp_set_surface2_tiling(ss2, tiling);
477     dri_bo_emit_reloc(ss2_bo,
478                       I915_GEM_DOMAIN_RENDER, 0,
479                       surf_bo_offset,
480                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
481                       surf_bo);
482     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
483     dri_bo_unmap(ss2_bo);
484 }
485
486 static void
487 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
488                                      const struct i965_surface *surface,
489                                      int base_index, int is_target,
490                                      const VARectangle *rect,
491                                      int *width, int *height, int *pitch, int *offset)
492 {
493     struct object_surface *obj_surface;
494     struct object_image *obj_image;
495     dri_bo *bo;
496     int fourcc = pp_get_surface_fourcc(ctx, surface);
497     const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
498
499     if (fourcc_info == NULL)
500         return;
501
502     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
503         obj_surface = (struct object_surface *)surface->base;
504         bo = obj_surface->bo;
505         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
506         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
507         pitch[0] = obj_surface->width;
508         offset[0] = 0;
509
510         if (fourcc_info->num_planes == 1 && is_target)
511             width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
512
513         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
514         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
515         pitch[1] = obj_surface->cb_cr_pitch;
516         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
517
518         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
519         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
520         pitch[2] = obj_surface->cb_cr_pitch;
521         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
522     } else {
523         int U = 0, V = 0;
524
525         /* FIXME: add support for ARGB/ABGR image */
526         obj_image = (struct object_image *)surface->base;
527         bo = obj_image->bo;
528         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
529         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
530         pitch[0] = obj_image->image.pitches[0];
531         offset[0] = obj_image->image.offsets[0];
532
533         if (fourcc_info->num_planes == 1) {
534             if (is_target)
535                 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
536         } else if (fourcc_info->num_planes == 2) {
537             U = 1, V = 1;
538         } else {
539             assert(fourcc_info->num_components == 3);
540
541             U = fourcc_info->components[1].plane;
542             V = fourcc_info->components[2].plane;
543             assert((U == 1 && V == 2) ||
544                    (U == 2 && V == 1));
545         }
546
547         /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
548         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
549         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
550         pitch[1] = obj_image->image.pitches[U];
551         offset[1] = obj_image->image.offsets[U];
552
553         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
554         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
555         pitch[2] = obj_image->image.pitches[V];
556         offset[2] = obj_image->image.offsets[V];
557     }
558
559     if (is_target) {
560         gen8_pp_set_surface_state(ctx, pp_context,
561                                   bo, 0,
562                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
563                                   I965_SURFACEFORMAT_R8_UINT,
564                                   base_index, 1);
565
566         if (fourcc_info->num_planes == 2) {
567             gen8_pp_set_surface_state(ctx, pp_context,
568                                       bo, offset[1],
569                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
570                                       I965_SURFACEFORMAT_R8G8_SINT,
571                                       base_index + 1, 1);
572         } else if (fourcc_info->num_planes == 3) {
573             gen8_pp_set_surface_state(ctx, pp_context,
574                                       bo, offset[1],
575                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
576                                       I965_SURFACEFORMAT_R8_SINT,
577                                       base_index + 1, 1);
578             gen8_pp_set_surface_state(ctx, pp_context,
579                                       bo, offset[2],
580                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
581                                       I965_SURFACEFORMAT_R8_SINT,
582                                       base_index + 2, 1);
583         }
584
585         if (fourcc_info->format == I965_COLOR_RGB) {
586             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
587             /* the format is MSB: X-B-G-R */
588             pp_static_parameter->grf2.save_avs_rgb_swap = 0;
589             if ((fourcc == VA_FOURCC_BGRA) ||
590                 (fourcc == VA_FOURCC_BGRX)) {
591                 /* It is stored as MSB: X-R-G-B */
592                 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
593             }
594         }
595     } else {
596         int format0 = SURFACE_FORMAT_Y8_UNORM;
597
598         switch (fourcc) {
599         case VA_FOURCC_YUY2:
600             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
601             break;
602
603         case VA_FOURCC_UYVY:
604             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
605             break;
606
607         default:
608             break;
609         }
610
611         if (fourcc_info->format == I965_COLOR_RGB) {
612             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
613             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
614             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
615             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
616             if ((fourcc == VA_FOURCC_BGRA) ||
617                 (fourcc == VA_FOURCC_BGRX)) {
618                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
619             }
620         }
621
622         gen8_pp_set_surface2_state(ctx, pp_context,
623                                    bo, offset[0],
624                                    width[0], height[0], pitch[0],
625                                    0, 0,
626                                    format0, 0,
627                                    base_index);
628
629         if (fourcc_info->num_planes == 2) {
630             gen8_pp_set_surface2_state(ctx, pp_context,
631                                        bo, offset[1],
632                                        width[1], height[1], pitch[1],
633                                        0, 0,
634                                        SURFACE_FORMAT_R8B8_UNORM, 0,
635                                        base_index + 1);
636         } else if (fourcc_info->num_planes == 3) {
637             gen8_pp_set_surface2_state(ctx, pp_context,
638                                        bo, offset[1],
639                                        width[1], height[1], pitch[1],
640                                        0, 0,
641                                        SURFACE_FORMAT_R8_UNORM, 0,
642                                        base_index + 1);
643             gen8_pp_set_surface2_state(ctx, pp_context,
644                                        bo, offset[2],
645                                        width[2], height[2], pitch[2],
646                                        0, 0,
647                                        SURFACE_FORMAT_R8_UNORM, 0,
648                                        base_index + 2);
649         }
650
651         gen8_pp_set_surface_state(ctx, pp_context,
652                                   bo, 0,
653                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
654                                   I965_SURFACEFORMAT_R8_UINT,
655                                   base_index + 3, 1);
656
657         if (fourcc_info->num_planes == 2) {
658             gen8_pp_set_surface_state(ctx, pp_context,
659                                       bo, offset[1],
660                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
661                                       I965_SURFACEFORMAT_R8G8_SINT,
662                                       base_index + 4, 1);
663         } else if (fourcc_info->num_planes == 3) {
664             gen8_pp_set_surface_state(ctx, pp_context,
665                                       bo, offset[1],
666                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
667                                       I965_SURFACEFORMAT_R8_SINT,
668                                       base_index + 4, 1);
669             gen8_pp_set_surface_state(ctx, pp_context,
670                                       bo, offset[2],
671                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
672                                       I965_SURFACEFORMAT_R8_SINT,
673                                       base_index + 5, 1);
674         }
675     }
676 }
677
678 static int
679 pp_null_x_steps(void *private_context)
680 {
681     return 1;
682 }
683
684 static int
685 pp_null_y_steps(void *private_context)
686 {
687     return 1;
688 }
689
690 static int
691 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
692 {
693     return 0;
694 }
695
696 VAStatus
697 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
698                    const struct i965_surface *src_surface,
699                    const VARectangle *src_rect,
700                    struct i965_surface *dst_surface,
701                    const VARectangle *dst_rect,
702                    void *filter_param)
703 {
704     /* private function & data */
705     pp_context->pp_x_steps = pp_null_x_steps;
706     pp_context->pp_y_steps = pp_null_y_steps;
707     pp_context->private_context = NULL;
708     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
709
710     dst_surface->flags = src_surface->flags;
711
712     return VA_STATUS_SUCCESS;
713 }
714
715 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
716 {
717     int i, dst_width_adjust;
718     /* x offset of dest surface must be dword aligned.
719      * so we have to extend dst surface on left edge, and mask out pixels not interested
720      */
721     if (dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT) {
722         pp_context->block_horizontal_mask_left = 0;
723         for (i = dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT; i < GPU_ASM_BLOCK_WIDTH; i++) {
724             pp_context->block_horizontal_mask_left |= 1 << i;
725         }
726     } else {
727         pp_context->block_horizontal_mask_left = 0xffff;
728     }
729
730     dst_width_adjust = dst_rect->width + dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT;
731     if (dst_width_adjust % GPU_ASM_BLOCK_WIDTH) {
732         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust % GPU_ASM_BLOCK_WIDTH)) - 1;
733     } else {
734         pp_context->block_horizontal_mask_right = 0xffff;
735     }
736
737     if (dst_rect->height % GPU_ASM_BLOCK_HEIGHT) {
738         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height % GPU_ASM_BLOCK_HEIGHT)) - 1;
739     } else {
740         pp_context->block_vertical_mask_bottom = 0xff;
741     }
742
743 }
744
745 static int
746 gen7_pp_avs_x_steps(void *private_context)
747 {
748     struct pp_avs_context *pp_avs_context = private_context;
749
750     return pp_avs_context->dest_w / 16;
751 }
752
753 static int
754 gen7_pp_avs_y_steps(void *private_context)
755 {
756     struct pp_avs_context *pp_avs_context = private_context;
757
758     return pp_avs_context->dest_h / 16;
759 }
760
761 static int
762 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
763 {
764     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
765     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
766
767     pp_inline_parameter->grf9.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
768     pp_inline_parameter->grf9.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
769     pp_inline_parameter->grf9.constant_0 = 0xffffffff;
770     pp_inline_parameter->grf9.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
771
772     return 0;
773 }
774
775 static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
776                                               struct i965_post_processing_context *pp_context,
777                                               const struct i965_surface *surface)
778 {
779     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
780     int fourcc = pp_get_surface_fourcc(ctx, surface);
781
782     if (fourcc == VA_FOURCC_YUY2) {
783         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
784         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
785         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
786     } else if (fourcc == VA_FOURCC_UYVY) {
787         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
788         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
789         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
790     }
791 }
792
793 static const AVSConfig gen8_avs_config = {
794     .coeff_frac_bits = 6,
795     .coeff_epsilon = 1.0f / (1U << 6),
796     .num_phases = 16,
797     .num_luma_coeffs = 8,
798     .num_chroma_coeffs = 4,
799
800     .coeff_range = {
801         .lower_bound = {
802             .y_k_h = { -2, -2, -2, -2, -2, -2, -2, -2 },
803             .y_k_v = { -2, -2, -2, -2, -2, -2, -2, -2 },
804             .uv_k_h = { -1, -2, -2, -1 },
805             .uv_k_v = { -1, -2, -2, -1 },
806         },
807         .upper_bound = {
808             .y_k_h = { 2, 2, 2, 2, 2, 2, 2, 2 },
809             .y_k_v = { 2, 2, 2, 2, 2, 2, 2, 2 },
810             .uv_k_h = { 1, 2, 2, 1 },
811             .uv_k_v = { 1, 2, 2, 1 },
812         },
813     },
814 };
815
816 static int
817 gen8_pp_get_8tap_filter_mode(VADriverContextP ctx,
818                              const struct i965_surface *surface)
819 {
820     int fourcc = pp_get_surface_fourcc(ctx, surface);
821
822     if (fourcc == VA_FOURCC_YUY2 ||
823         fourcc == VA_FOURCC_UYVY)
824         return 1;
825     else
826         return 3;
827 }
828
829 static int
830 gen8_pp_kernel_use_media_read_msg(VADriverContextP ctx,
831                                   const struct i965_surface *src_surface,
832                                   const VARectangle *src_rect,
833                                   const struct i965_surface *dst_surface,
834                                   const VARectangle *dst_rect)
835 {
836     int src_fourcc = pp_get_surface_fourcc(ctx, src_surface);
837     int dst_fourcc = pp_get_surface_fourcc(ctx, dst_surface);
838     const i965_fourcc_info *src_fourcc_info = get_fourcc_info(src_fourcc);
839     const i965_fourcc_info *dst_fourcc_info = get_fourcc_info(dst_fourcc);
840
841     if (!src_fourcc_info ||
842         src_fourcc_info->subsampling != SUBSAMPLE_YUV420 ||
843         !dst_fourcc_info ||
844         dst_fourcc_info->subsampling != SUBSAMPLE_YUV420)
845         return 0;
846
847     if (src_rect->x == dst_rect->x &&
848         src_rect->y == dst_rect->y &&
849         src_rect->width == dst_rect->width &&
850         src_rect->height == dst_rect->height)
851         return 1;
852
853     return 0;
854 }
855
856 VAStatus
857 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
858                            const struct i965_surface *src_surface,
859                            const VARectangle *src_rect,
860                            struct i965_surface *dst_surface,
861                            const VARectangle *dst_rect,
862                            void *filter_param)
863 {
864     /* TODO: Add the sampler_8x8 state */
865     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
866     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
867     struct gen8_sampler_8x8_avs *sampler_8x8;
868     int i;
869     int width[3], height[3], pitch[3], offset[3];
870     int src_width, src_height;
871     unsigned char *cc_ptr;
872     AVSState * const avs = &pp_avs_context->state;
873     float sx, sy;
874     const float * yuv_to_rgb_coefs;
875     size_t yuv_to_rgb_coefs_size;
876
877     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
878
879     /* source surface */
880     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
881                                          src_rect,
882                                          width, height, pitch, offset);
883     src_height = height[0];
884     src_width  = width[0];
885
886     /* destination surface */
887     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
888                                          dst_rect,
889                                          width, height, pitch, offset);
890
891     /* sampler 8x8 state */
892     dri_bo_map(pp_context->dynamic_state.bo, True);
893     assert(pp_context->dynamic_state.bo->virtual);
894
895     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
896              pp_context->sampler_offset;
897     /* Currently only one gen8 sampler_8x8 is initialized */
898     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
899     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
900
901     sampler_8x8->dw0.gain_factor = 44;
902     sampler_8x8->dw0.weak_edge_threshold = 1;
903     sampler_8x8->dw0.strong_edge_threshold = 8;
904     /* Use the value like that on Ivy instead of default
905      * sampler_8x8->dw0.r3x_coefficient = 5;
906      */
907     sampler_8x8->dw0.r3x_coefficient = 27;
908     sampler_8x8->dw0.r3c_coefficient = 5;
909
910     sampler_8x8->dw2.global_noise_estimation = 255;
911     sampler_8x8->dw2.non_edge_weight = 1;
912     sampler_8x8->dw2.regular_weight = 2;
913     sampler_8x8->dw2.strong_edge_weight = 7;
914     /* Use the value like that on Ivy instead of default
915      * sampler_8x8->dw2.r5x_coefficient = 7;
916      * sampler_8x8->dw2.r5cx_coefficient = 7;
917      * sampler_8x8->dw2.r5c_coefficient = 7;
918      */
919     sampler_8x8->dw2.r5x_coefficient = 9;
920     sampler_8x8->dw2.r5cx_coefficient = 8;
921     sampler_8x8->dw2.r5c_coefficient = 3;
922
923     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
924     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
925     sampler_8x8->dw3.sat_max = 0x1f;
926     sampler_8x8->dw3.hue_max = 14;
927     /* The 8tap filter will determine whether the adaptive Filter is
928      * applied for all channels(dw153).
929      * If the 8tap filter is disabled, the adaptive filter should be disabled.
930      * Only when 8tap filter is enabled, it can be enabled or not.
931      */
932     sampler_8x8->dw3.enable_8tap_filter = gen8_pp_get_8tap_filter_mode(ctx, src_surface);
933     sampler_8x8->dw3.ief4_smooth_enable = 0;
934
935     sampler_8x8->dw4.s3u = 0;
936     sampler_8x8->dw4.diamond_margin = 4;
937     sampler_8x8->dw4.vy_std_enable = 0;
938     sampler_8x8->dw4.umid = 110;
939     sampler_8x8->dw4.vmid = 154;
940
941     sampler_8x8->dw5.diamond_dv = 0;
942     sampler_8x8->dw5.diamond_th = 35;
943     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
944     sampler_8x8->dw5.hs_margin = 3;
945     sampler_8x8->dw5.diamond_du = 2;
946
947     sampler_8x8->dw6.y_point1 = 46;
948     sampler_8x8->dw6.y_point2 = 47;
949     sampler_8x8->dw6.y_point3 = 254;
950     sampler_8x8->dw6.y_point4 = 255;
951
952     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
953
954     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
955     sampler_8x8->dw8.p0l = 46;
956     sampler_8x8->dw8.p1l = 216;
957
958     sampler_8x8->dw9.p2l = 236;
959     sampler_8x8->dw9.p3l = 236;
960     sampler_8x8->dw9.b0l = 133;
961     sampler_8x8->dw9.b1l = 130;
962
963     sampler_8x8->dw10.b2l = 130;
964     sampler_8x8->dw10.b3l = 130;
965     /* s0l = -5 / 256. s2.8 */
966     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
967     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
968
969     sampler_8x8->dw11.s1l = 0;
970     sampler_8x8->dw11.s2l = 0;
971
972     sampler_8x8->dw12.s3l = 0;
973     sampler_8x8->dw12.p0u = 46;
974     sampler_8x8->dw12.p1u = 66;
975     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
976
977     sampler_8x8->dw13.p2u = 130;
978     sampler_8x8->dw13.p3u = 236;
979     sampler_8x8->dw13.b0u = 143;
980     sampler_8x8->dw13.b1u = 163;
981
982     sampler_8x8->dw14.b2u = 200;
983     sampler_8x8->dw14.b3u = 140;
984     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
985
986     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
987     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
988
989     sx = (float)dst_rect->width / src_rect->width;
990     sy = (float)dst_rect->height / src_rect->height;
991     avs_update_coefficients(avs, sx, sy, pp_context->filter_flags);
992
993     assert(avs->config->num_phases >= 16);
994     for (i = 0; i <= 16; i++) {
995         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
996                     &sampler_8x8->coefficients[i];
997         const AVSCoeffs * const coeffs = &avs->coeffs[i];
998
999         sampler_8x8_state->dw0.table_0x_filter_c0 =
1000             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
1001         sampler_8x8_state->dw0.table_0y_filter_c0 =
1002             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
1003         sampler_8x8_state->dw0.table_0x_filter_c1 =
1004             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1005         sampler_8x8_state->dw0.table_0y_filter_c1 =
1006             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1007
1008         sampler_8x8_state->dw1.table_0x_filter_c2 =
1009             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1010         sampler_8x8_state->dw1.table_0y_filter_c2 =
1011             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1012         sampler_8x8_state->dw1.table_0x_filter_c3 =
1013             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1014         sampler_8x8_state->dw1.table_0y_filter_c3 =
1015             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1016
1017         sampler_8x8_state->dw2.table_0x_filter_c4 =
1018             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1019         sampler_8x8_state->dw2.table_0y_filter_c4 =
1020             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1021         sampler_8x8_state->dw2.table_0x_filter_c5 =
1022             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1023         sampler_8x8_state->dw2.table_0y_filter_c5 =
1024             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1025
1026         sampler_8x8_state->dw3.table_0x_filter_c6 =
1027             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1028         sampler_8x8_state->dw3.table_0y_filter_c6 =
1029             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1030         sampler_8x8_state->dw3.table_0x_filter_c7 =
1031             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1032         sampler_8x8_state->dw3.table_0y_filter_c7 =
1033             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1034
1035         sampler_8x8_state->dw4.pad0 = 0;
1036         sampler_8x8_state->dw5.pad0 = 0;
1037         sampler_8x8_state->dw4.table_1x_filter_c2 =
1038             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1039         sampler_8x8_state->dw4.table_1x_filter_c3 =
1040             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1041         sampler_8x8_state->dw5.table_1x_filter_c4 =
1042             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1043         sampler_8x8_state->dw5.table_1x_filter_c5 =
1044             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1045
1046         sampler_8x8_state->dw6.pad0 =
1047             sampler_8x8_state->dw7.pad0 =
1048                 sampler_8x8_state->dw6.table_1y_filter_c2 =
1049                     intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1050         sampler_8x8_state->dw6.table_1y_filter_c3 =
1051             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1052         sampler_8x8_state->dw7.table_1y_filter_c4 =
1053             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1054         sampler_8x8_state->dw7.table_1y_filter_c5 =
1055             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1056     }
1057
1058     sampler_8x8->dw152.default_sharpness_level =
1059         -avs_is_needed(pp_context->filter_flags);
1060     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
1061     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
1062     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
1063
1064     for (; i <= avs->config->num_phases; i++) {
1065         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
1066                     &sampler_8x8->coefficients1[i - 17];
1067         const AVSCoeffs * const coeffs = &avs->coeffs[i];
1068
1069         sampler_8x8_state->dw0.table_0x_filter_c0 =
1070             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
1071         sampler_8x8_state->dw0.table_0y_filter_c0 =
1072             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
1073         sampler_8x8_state->dw0.table_0x_filter_c1 =
1074             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1075         sampler_8x8_state->dw0.table_0y_filter_c1 =
1076             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1077
1078         sampler_8x8_state->dw1.table_0x_filter_c2 =
1079             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1080         sampler_8x8_state->dw1.table_0y_filter_c2 =
1081             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1082         sampler_8x8_state->dw1.table_0x_filter_c3 =
1083             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1084         sampler_8x8_state->dw1.table_0y_filter_c3 =
1085             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1086
1087         sampler_8x8_state->dw2.table_0x_filter_c4 =
1088             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1089         sampler_8x8_state->dw2.table_0y_filter_c4 =
1090             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1091         sampler_8x8_state->dw2.table_0x_filter_c5 =
1092             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1093         sampler_8x8_state->dw2.table_0y_filter_c5 =
1094             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1095
1096         sampler_8x8_state->dw3.table_0x_filter_c6 =
1097             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1098         sampler_8x8_state->dw3.table_0y_filter_c6 =
1099             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1100         sampler_8x8_state->dw3.table_0x_filter_c7 =
1101             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1102         sampler_8x8_state->dw3.table_0y_filter_c7 =
1103             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1104
1105         sampler_8x8_state->dw4.pad0 = 0;
1106         sampler_8x8_state->dw5.pad0 = 0;
1107         sampler_8x8_state->dw4.table_1x_filter_c2 =
1108             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1109         sampler_8x8_state->dw4.table_1x_filter_c3 =
1110             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1111         sampler_8x8_state->dw5.table_1x_filter_c4 =
1112             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1113         sampler_8x8_state->dw5.table_1x_filter_c5 =
1114             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1115
1116         sampler_8x8_state->dw6.pad0 =
1117             sampler_8x8_state->dw7.pad0 =
1118                 sampler_8x8_state->dw6.table_1y_filter_c2 =
1119                     intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1120         sampler_8x8_state->dw6.table_1y_filter_c3 =
1121             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1122         sampler_8x8_state->dw7.table_1y_filter_c4 =
1123             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1124         sampler_8x8_state->dw7.table_1y_filter_c5 =
1125             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1126     }
1127
1128     dri_bo_unmap(pp_context->dynamic_state.bo);
1129
1130
1131     /* private function & data */
1132     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
1133     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
1134     pp_context->private_context = &pp_context->pp_avs_context;
1135     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
1136
1137     int dst_left_edge_extend = dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT;
1138     pp_avs_context->dest_x = dst_rect->x - dst_left_edge_extend;
1139     pp_avs_context->dest_y = dst_rect->y;
1140     pp_avs_context->dest_w = ALIGN(dst_rect->width + dst_left_edge_extend, 16);
1141     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
1142     pp_avs_context->src_w = src_rect->width;
1143     pp_avs_context->src_h = src_rect->height;
1144     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
1145
1146     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
1147     dw = MAX(dw, dst_rect->width + dst_left_edge_extend);
1148
1149     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
1150     pp_static_parameter->grf2.avs_wa_enable = gen8_pp_kernel_use_media_read_msg(ctx,
1151                                                                                 src_surface, src_rect,
1152                                                                                 dst_surface, dst_rect); /* reuse this flag for media block reading on gen8+ */
1153     pp_static_parameter->grf2.alpha = 255;
1154
1155     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
1156     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
1157     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
1158                                                                    (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
1159     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
1160                                                                      (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
1161
1162     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
1163
1164     yuv_to_rgb_coefs = i915_color_standard_to_coefs(i915_filter_to_color_standard(src_surface->flags &
1165                                                                                   VA_SRC_COLOR_MASK),
1166                                                     &yuv_to_rgb_coefs_size);
1167     memcpy(&pp_static_parameter->grf7, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
1168
1169     dst_surface->flags = src_surface->flags;
1170
1171     return VA_STATUS_SUCCESS;
1172 }
1173
1174 VAStatus
1175 gen8_pp_initialize(
1176     VADriverContextP   ctx,
1177     struct i965_post_processing_context *pp_context,
1178     const struct i965_surface *src_surface,
1179     const VARectangle *src_rect,
1180     struct i965_surface *dst_surface,
1181     const VARectangle *dst_rect,
1182     int                pp_index,
1183     void * filter_param
1184 )
1185 {
1186     VAStatus va_status;
1187     struct i965_driver_data *i965 = i965_driver_data(ctx);
1188     dri_bo *bo;
1189     int bo_size;
1190     unsigned int end_offset;
1191     struct pp_module *pp_module;
1192     int static_param_size, inline_param_size;
1193
1194     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1195     bo = dri_bo_alloc(i965->intel.bufmgr,
1196                       "surface state & binding table",
1197                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1198                       4096);
1199     assert(bo);
1200     pp_context->surface_state_binding_table.bo = bo;
1201
1202     pp_context->idrt.num_interface_descriptors = 0;
1203
1204     pp_context->sampler_size = 4 * 4096;
1205
1206     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1207               + pp_context->idrt_size;
1208
1209     dri_bo_unreference(pp_context->dynamic_state.bo);
1210     bo = dri_bo_alloc(i965->intel.bufmgr,
1211                       "dynamic_state",
1212                       bo_size,
1213                       4096);
1214
1215     assert(bo);
1216     pp_context->dynamic_state.bo = bo;
1217     pp_context->dynamic_state.bo_size = bo_size;
1218
1219     end_offset = 0;
1220     pp_context->dynamic_state.end_offset = 0;
1221
1222     /* Constant buffer offset */
1223     pp_context->curbe_offset = ALIGN(end_offset, 64);
1224     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1225
1226     /* Interface descriptor offset */
1227     pp_context->idrt_offset = ALIGN(end_offset, 64);
1228     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1229
1230     /* Sampler state offset */
1231     pp_context->sampler_offset = ALIGN(end_offset, 64);
1232     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1233
1234     /* update the end offset of dynamic_state */
1235     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1236
1237     static_param_size = sizeof(struct gen7_pp_static_parameter);
1238     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1239
1240     memset(pp_context->pp_static_parameter, 0, static_param_size);
1241     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1242
1243     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1244     pp_context->current_pp = pp_index;
1245     pp_module = &pp_context->pp_modules[pp_index];
1246
1247     if (pp_module->initialize)
1248         va_status = pp_module->initialize(ctx, pp_context,
1249                                           src_surface,
1250                                           src_rect,
1251                                           dst_surface,
1252                                           dst_rect,
1253                                           filter_param);
1254     else
1255         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1256
1257     calculate_boundary_block_mask(pp_context, dst_rect);
1258
1259     return va_status;
1260 }
1261
1262 static void
1263 gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
1264                                    struct i965_post_processing_context *pp_context)
1265 {
1266     struct gen8_interface_descriptor_data *desc;
1267     dri_bo *bo;
1268     int pp_index = pp_context->current_pp;
1269     unsigned char *cc_ptr;
1270
1271     bo = pp_context->dynamic_state.bo;
1272
1273     dri_bo_map(bo, 1);
1274     assert(bo->virtual);
1275     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1276
1277     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1278            pp_context->idrt.num_interface_descriptors;
1279
1280     memset(desc, 0, sizeof(*desc));
1281     desc->desc0.kernel_start_pointer =
1282         pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1283     desc->desc2.single_program_flow = 1;
1284     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1285     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
1286     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1287     desc->desc4.binding_table_entry_count = 0;
1288     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1289     desc->desc5.constant_urb_entry_read_offset = 0;
1290
1291     desc->desc5.constant_urb_entry_read_length = 8; /* grf 1-8 */
1292
1293     dri_bo_unmap(bo);
1294     pp_context->idrt.num_interface_descriptors++;
1295 }
1296
1297
1298 static void
1299 gen8_pp_upload_constants(VADriverContextP ctx,
1300                          struct i965_post_processing_context *pp_context)
1301 {
1302     unsigned char *constant_buffer;
1303     int param_size;
1304
1305     assert(sizeof(struct gen7_pp_static_parameter) == 256);
1306
1307     param_size = sizeof(struct gen7_pp_static_parameter);
1308
1309     dri_bo_map(pp_context->dynamic_state.bo, 1);
1310     assert(pp_context->dynamic_state.bo->virtual);
1311     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1312                       pp_context->curbe_offset;
1313
1314     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1315     dri_bo_unmap(pp_context->dynamic_state.bo);
1316     return;
1317 }
1318
1319 void
1320 gen8_pp_states_setup(VADriverContextP ctx,
1321                      struct i965_post_processing_context *pp_context)
1322 {
1323     gen8_pp_interface_descriptor_table(ctx, pp_context);
1324     gen8_pp_upload_constants(ctx, pp_context);
1325 }
1326
1327 static void
1328 gen6_pp_pipeline_select(VADriverContextP ctx,
1329                         struct i965_post_processing_context *pp_context)
1330 {
1331     struct intel_batchbuffer *batch = pp_context->batch;
1332
1333     BEGIN_BATCH(batch, 1);
1334     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1335     ADVANCE_BATCH(batch);
1336 }
1337
1338 static void
1339 gen8_pp_state_base_address(VADriverContextP ctx,
1340                            struct i965_post_processing_context *pp_context)
1341 {
1342     struct intel_batchbuffer *batch = pp_context->batch;
1343
1344     BEGIN_BATCH(batch, 16);
1345     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1346     /* DW1 Generate state address */
1347     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1348     OUT_BATCH(batch, 0);
1349     OUT_BATCH(batch, 0);
1350
1351     /* DW4-5. Surface state address */
1352     OUT_RELOC64(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1353
1354     /* DW6-7. Dynamic state address */
1355     OUT_RELOC64(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1356                 0, 0 | BASE_ADDRESS_MODIFY);
1357
1358     /* DW8. Indirect object address */
1359     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1360     OUT_BATCH(batch, 0);
1361
1362     /* DW10-11. Instruction base address */
1363     OUT_RELOC64(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1364
1365     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1366     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1367     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1368     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1369     ADVANCE_BATCH(batch);
1370 }
1371
1372 void
1373 gen8_pp_vfe_state(VADriverContextP ctx,
1374                   struct i965_post_processing_context *pp_context)
1375 {
1376     struct intel_batchbuffer *batch = pp_context->batch;
1377
1378     BEGIN_BATCH(batch, 9);
1379     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1380     OUT_BATCH(batch, 0);
1381     OUT_BATCH(batch, 0);
1382     OUT_BATCH(batch,
1383               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1384               pp_context->vfe_gpu_state.num_urb_entries << 8);
1385     OUT_BATCH(batch, 0);
1386     OUT_BATCH(batch,
1387               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1388               /* URB Entry Allocation Size, in 256 bits unit */
1389               (pp_context->vfe_gpu_state.curbe_allocation_size));
1390     /* CURBE Allocation Size, in 256 bits unit */
1391     OUT_BATCH(batch, 0);
1392     OUT_BATCH(batch, 0);
1393     OUT_BATCH(batch, 0);
1394     ADVANCE_BATCH(batch);
1395 }
1396
1397 void
1398 gen8_interface_descriptor_load(VADriverContextP ctx,
1399                                struct i965_post_processing_context *pp_context)
1400 {
1401     struct intel_batchbuffer *batch = pp_context->batch;
1402
1403     BEGIN_BATCH(batch, 6);
1404
1405     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1406     OUT_BATCH(batch, 0);
1407
1408     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1409     OUT_BATCH(batch, 0);
1410     OUT_BATCH(batch,
1411               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1412     OUT_BATCH(batch, pp_context->idrt_offset);
1413     ADVANCE_BATCH(batch);
1414 }
1415
1416 void
1417 gen8_pp_curbe_load(VADriverContextP ctx,
1418                    struct i965_post_processing_context *pp_context)
1419 {
1420     struct intel_batchbuffer *batch = pp_context->batch;
1421     int param_size = 64;
1422
1423     param_size = sizeof(struct gen7_pp_static_parameter);
1424
1425     BEGIN_BATCH(batch, 4);
1426     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1427     OUT_BATCH(batch, 0);
1428     OUT_BATCH(batch,
1429               param_size);
1430     OUT_BATCH(batch, pp_context->curbe_offset);
1431     ADVANCE_BATCH(batch);
1432 }
1433
1434 void
1435 gen8_pp_object_walker(VADriverContextP ctx,
1436                       struct i965_post_processing_context *pp_context)
1437 {
1438     struct i965_driver_data *i965 = i965_driver_data(ctx);
1439     struct intel_batchbuffer *batch = pp_context->batch;
1440     int x, x_steps, y, y_steps;
1441     int param_size, command_length_in_dws, extra_cmd_in_dws;
1442     dri_bo *command_buffer;
1443     unsigned int *command_ptr;
1444
1445     param_size = sizeof(struct gen7_pp_inline_parameter);
1446
1447     x_steps = pp_context->pp_x_steps(pp_context->private_context);
1448     y_steps = pp_context->pp_y_steps(pp_context->private_context);
1449     command_length_in_dws = 6 + (param_size >> 2);
1450     extra_cmd_in_dws = 2;
1451     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1452                                   "command objects buffer",
1453                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1454                                   4096);
1455
1456     dri_bo_map(command_buffer, 1);
1457     command_ptr = command_buffer->virtual;
1458
1459     for (y = 0; y < y_steps; y++) {
1460         for (x = 0; x < x_steps; x++) {
1461             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1462
1463                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1464                 *command_ptr++ = 0;
1465                 *command_ptr++ = 0;
1466                 *command_ptr++ = 0;
1467                 *command_ptr++ = 0;
1468                 *command_ptr++ = 0;
1469                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1470                 command_ptr += (param_size >> 2);
1471
1472                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1473                 *command_ptr++ = 0;
1474             }
1475         }
1476     }
1477
1478     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1479         *command_ptr++ = 0;
1480
1481     *command_ptr++ = MI_BATCH_BUFFER_END;
1482     *command_ptr++ = 0;
1483
1484     dri_bo_unmap(command_buffer);
1485
1486     BEGIN_BATCH(batch, 3);
1487     OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1488     OUT_RELOC64(batch, command_buffer,
1489                 I915_GEM_DOMAIN_COMMAND, 0, 0);
1490     ADVANCE_BATCH(batch);
1491
1492     dri_bo_unreference(command_buffer);
1493
1494     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1495      * will cause control to pass back to ring buffer
1496      */
1497     intel_batchbuffer_end_atomic(batch);
1498     intel_batchbuffer_flush(batch);
1499     intel_batchbuffer_start_atomic(batch, 0x1000);
1500 }
1501
1502 static void
1503 gen8_pp_pipeline_setup(VADriverContextP ctx,
1504                        struct i965_post_processing_context *pp_context)
1505 {
1506     struct intel_batchbuffer *batch = pp_context->batch;
1507
1508     intel_batchbuffer_start_atomic(batch, 0x1000);
1509     intel_batchbuffer_emit_mi_flush(batch);
1510     gen6_pp_pipeline_select(ctx, pp_context);
1511     gen8_pp_state_base_address(ctx, pp_context);
1512     gen8_pp_vfe_state(ctx, pp_context);
1513     gen8_pp_curbe_load(ctx, pp_context);
1514     gen8_interface_descriptor_load(ctx, pp_context);
1515     gen8_pp_vfe_state(ctx, pp_context);
1516     gen8_pp_object_walker(ctx, pp_context);
1517     intel_batchbuffer_end_atomic(batch);
1518 }
1519
1520 static VAStatus
1521 gen8_post_processing(
1522     VADriverContextP   ctx,
1523     struct i965_post_processing_context *pp_context,
1524     const struct i965_surface *src_surface,
1525     const VARectangle *src_rect,
1526     struct i965_surface *dst_surface,
1527     const VARectangle *dst_rect,
1528     int                pp_index,
1529     void * filter_param
1530 )
1531 {
1532     VAStatus va_status;
1533
1534     va_status = gen8_pp_initialize(ctx, pp_context,
1535                                    src_surface,
1536                                    src_rect,
1537                                    dst_surface,
1538                                    dst_rect,
1539                                    pp_index,
1540                                    filter_param);
1541
1542     if (va_status == VA_STATUS_SUCCESS) {
1543         gen8_pp_states_setup(ctx, pp_context);
1544         gen8_pp_pipeline_setup(ctx, pp_context);
1545     }
1546
1547     return va_status;
1548 }
1549
1550 static void
1551 gen8_post_processing_context_finalize(VADriverContextP ctx,
1552                                       struct i965_post_processing_context *pp_context)
1553 {
1554     if (pp_context->scaling_gpe_context_initialized) {
1555         gen8_gpe_context_destroy(&pp_context->scaling_gpe_context);
1556         pp_context->scaling_gpe_context_initialized = 0;
1557     }
1558
1559     if (pp_context->vebox_proc_ctx) {
1560         gen75_vebox_context_destroy(ctx, pp_context->vebox_proc_ctx);
1561         pp_context->vebox_proc_ctx = NULL;
1562     }
1563
1564     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1565     pp_context->surface_state_binding_table.bo = NULL;
1566
1567     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1568     pp_context->pp_dn_context.stmm_bo = NULL;
1569
1570     if (pp_context->instruction_state.bo) {
1571         dri_bo_unreference(pp_context->instruction_state.bo);
1572         pp_context->instruction_state.bo = NULL;
1573     }
1574
1575     if (pp_context->indirect_state.bo) {
1576         dri_bo_unreference(pp_context->indirect_state.bo);
1577         pp_context->indirect_state.bo = NULL;
1578     }
1579
1580     if (pp_context->dynamic_state.bo) {
1581         dri_bo_unreference(pp_context->dynamic_state.bo);
1582         pp_context->dynamic_state.bo = NULL;
1583     }
1584
1585     free(pp_context->pp_static_parameter);
1586     free(pp_context->pp_inline_parameter);
1587     pp_context->pp_static_parameter = NULL;
1588     pp_context->pp_inline_parameter = NULL;
1589 }
1590
1591 #define VPP_CURBE_ALLOCATION_SIZE   32
1592
1593 void
1594 gen8_post_processing_context_common_init(VADriverContextP ctx,
1595                                          void *data,
1596                                          struct pp_module *pp_modules,
1597                                          int num_pp_modules,
1598                                          struct intel_batchbuffer *batch)
1599 {
1600     struct i965_driver_data *i965 = i965_driver_data(ctx);
1601     int i, kernel_size;
1602     unsigned int kernel_offset, end_offset;
1603     unsigned char *kernel_ptr;
1604     struct pp_module *pp_module;
1605     struct i965_post_processing_context *pp_context = data;
1606
1607     if (i965->intel.eu_total > 0)
1608         pp_context->vfe_gpu_state.max_num_threads = 6 * i965->intel.eu_total;
1609     else
1610         pp_context->vfe_gpu_state.max_num_threads = 60;
1611     pp_context->vfe_gpu_state.num_urb_entries = 59;
1612     pp_context->vfe_gpu_state.gpgpu_mode = 0;
1613     pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1614     pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1615
1616     pp_context->intel_post_processing = gen8_post_processing;
1617     pp_context->finalize = gen8_post_processing_context_finalize;
1618
1619     assert(ARRAY_ELEMS(pp_context->pp_modules) == num_pp_modules);
1620
1621     memcpy(pp_context->pp_modules, pp_modules, sizeof(pp_context->pp_modules));
1622
1623     kernel_size = 4096 ;
1624
1625     for (i = 0; i < NUM_PP_MODULES; i++) {
1626         pp_module = &pp_context->pp_modules[i];
1627
1628         if (pp_module->kernel.bin && pp_module->kernel.size) {
1629             kernel_size += pp_module->kernel.size;
1630         }
1631     }
1632
1633     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1634                                                     "kernel shader",
1635                                                     kernel_size,
1636                                                     0x1000);
1637     if (pp_context->instruction_state.bo == NULL) {
1638         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1639         return;
1640     }
1641
1642     assert(pp_context->instruction_state.bo);
1643
1644
1645     pp_context->instruction_state.bo_size = kernel_size;
1646     pp_context->instruction_state.end_offset = 0;
1647     end_offset = 0;
1648
1649     dri_bo_map(pp_context->instruction_state.bo, 1);
1650     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1651
1652     for (i = 0; i < NUM_PP_MODULES; i++) {
1653         pp_module = &pp_context->pp_modules[i];
1654
1655         kernel_offset = ALIGN(end_offset, 64);
1656         pp_module->kernel.kernel_offset = kernel_offset;
1657
1658         if (pp_module->kernel.bin && pp_module->kernel.size) {
1659
1660             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1661             end_offset = kernel_offset + pp_module->kernel.size;
1662         }
1663     }
1664
1665     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1666
1667     dri_bo_unmap(pp_context->instruction_state.bo);
1668
1669     /* static & inline parameters */
1670     pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1671     pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1672
1673     pp_context->batch = batch;
1674
1675     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1676     pp_context->curbe_size = 256;
1677
1678 }
1679
1680 void
1681 gen8_post_processing_context_init(VADriverContextP ctx,
1682                                   void *data,
1683                                   struct intel_batchbuffer *batch)
1684 {
1685     struct i965_driver_data *i965 = i965_driver_data(ctx);
1686     struct i965_post_processing_context *pp_context = data;
1687     struct i965_gpe_context *gpe_context;
1688
1689     gen8_post_processing_context_common_init(ctx, data, pp_modules_gen8, ARRAY_ELEMS(pp_modules_gen8), batch);
1690     avs_init_state(&pp_context->pp_avs_context.state, &gen8_avs_config);
1691
1692     /* initialize the YUV420 8-Bit scaling context. The below is supported.
1693      * NV12 ->NV12
1694      * NV12 ->I420
1695      * I420 ->I420
1696      * I420 ->NV12
1697      */
1698     gpe_context = &pp_context->scaling_gpe_context;
1699     gen8_gpe_load_kernels(ctx, gpe_context, pp_common_scaling_gen8, ARRAY_ELEMS(pp_common_scaling_gen8));
1700     gpe_context->idrt.entry_size = ALIGN(sizeof(struct gen8_interface_descriptor_data), 64);
1701     gpe_context->idrt.max_entries = ALIGN(ARRAY_ELEMS(pp_common_scaling_gen8), 2);
1702     gpe_context->sampler.entry_size = ALIGN(sizeof(struct gen8_sampler_state), 64);
1703     gpe_context->sampler.max_entries = 1;
1704     gpe_context->curbe.length = ALIGN(sizeof(struct scaling_input_parameter), 32);
1705
1706     gpe_context->surface_state_binding_table.max_entries = MAX_SCALING_SURFACES;
1707     gpe_context->surface_state_binding_table.binding_table_offset = 0;
1708     gpe_context->surface_state_binding_table.surface_state_offset = ALIGN(MAX_SCALING_SURFACES * 4, 64);
1709     gpe_context->surface_state_binding_table.length = ALIGN(MAX_SCALING_SURFACES * 4, 64) + ALIGN(MAX_SCALING_SURFACES * SURFACE_STATE_PADDED_SIZE_GEN8, 64);
1710
1711     if (i965->intel.eu_total > 0) {
1712         gpe_context->vfe_state.max_num_threads = i965->intel.eu_total * 6;
1713     } else {
1714         if (i965->intel.has_bsd2)
1715             gpe_context->vfe_state.max_num_threads = 300;
1716         else
1717             gpe_context->vfe_state.max_num_threads = 60;
1718     }
1719
1720     gpe_context->vfe_state.curbe_allocation_size = 37;
1721     gpe_context->vfe_state.urb_entry_size = 16;
1722     if (i965->intel.has_bsd2)
1723         gpe_context->vfe_state.num_urb_entries = 127;
1724     else
1725         gpe_context->vfe_state.num_urb_entries = 64;
1726
1727     gpe_context->vfe_state.gpgpu_mode = 0;
1728
1729     gen8_gpe_context_init(ctx, gpe_context);
1730     pp_context->scaling_gpe_context_initialized |= (VPPGPE_8BIT_8BIT | VPPGPE_8BIT_420_RGB32);
1731
1732     return;
1733 }
1734
1735 static void
1736 gen8_run_kernel_media_object_walker(VADriverContextP ctx,
1737                                     struct intel_batchbuffer *batch,
1738                                     struct i965_gpe_context *gpe_context,
1739                                     struct gpe_media_object_walker_parameter *param)
1740 {
1741     if (!batch || !gpe_context || !param)
1742         return;
1743
1744     intel_batchbuffer_start_atomic(batch, 0x1000);
1745
1746     intel_batchbuffer_emit_mi_flush(batch);
1747
1748     gen8_gpe_pipeline_setup(ctx, gpe_context, batch);
1749     gen8_gpe_media_object_walker(ctx, gpe_context, batch, param);
1750     gen8_gpe_media_state_flush(ctx, gpe_context, batch);
1751
1752
1753     intel_batchbuffer_end_atomic(batch);
1754
1755     intel_batchbuffer_flush(batch);
1756     return;
1757 }
1758
1759 static void
1760 gen8_add_dri_buffer_2d_gpe_surface(VADriverContextP ctx,
1761                                    struct i965_gpe_context *gpe_context,
1762                                    dri_bo *bo,
1763                                    unsigned int bo_offset,
1764                                    unsigned int width,
1765                                    unsigned int height,
1766                                    unsigned int pitch,
1767                                    int is_media_block_rw,
1768                                    unsigned int format,
1769                                    int index,
1770                                    int is_10bit)
1771 {
1772     struct i965_gpe_resource gpe_resource;
1773     struct i965_gpe_surface gpe_surface;
1774
1775     i965_dri_object_to_2d_gpe_resource(&gpe_resource, bo, width, height, pitch);
1776     memset(&gpe_surface, 0, sizeof(gpe_surface));
1777     gpe_surface.gpe_resource = &gpe_resource;
1778     gpe_surface.is_2d_surface = 1;
1779     gpe_surface.is_media_block_rw = !!is_media_block_rw;
1780     gpe_surface.cacheability_control = DEFAULT_MOCS;
1781     gpe_surface.format = format;
1782     gpe_surface.is_override_offset = 1;
1783     gpe_surface.offset = bo_offset;
1784     gpe_surface.is_16bpp = is_10bit;
1785
1786     gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
1787
1788     i965_free_gpe_resource(&gpe_resource);
1789 }
1790
1791 static void
1792 gen8_vpp_scaling_sample_state(VADriverContextP ctx,
1793                               struct i965_gpe_context *gpe_context,
1794                               VARectangle *src_rect,
1795                               VARectangle *dst_rect)
1796 {
1797     struct gen8_sampler_state *sampler_state;
1798
1799     if (gpe_context == NULL || !src_rect || !dst_rect)
1800         return;
1801     dri_bo_map(gpe_context->sampler.bo, 1);
1802
1803     if (gpe_context->sampler.bo->virtual == NULL)
1804         return;
1805
1806     assert(gpe_context->sampler.bo->virtual);
1807
1808     sampler_state = (struct gen8_sampler_state *)
1809                     (gpe_context->sampler.bo->virtual + gpe_context->sampler.offset);
1810
1811     memset(sampler_state, 0, sizeof(*sampler_state));
1812
1813     if ((src_rect->width == dst_rect->width) &&
1814         (src_rect->height == dst_rect->height)) {
1815         sampler_state->ss0.min_filter = I965_MAPFILTER_NEAREST;
1816         sampler_state->ss0.mag_filter = I965_MAPFILTER_NEAREST;
1817     } else {
1818         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
1819         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
1820     }
1821
1822     sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1823     sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1824     sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1825
1826     dri_bo_unmap(gpe_context->sampler.bo);
1827 }
1828
1829 static void
1830 gen8_gpe_context_yuv420p8_scaling_curbe(VADriverContextP ctx,
1831                                         struct i965_gpe_context *gpe_context,
1832                                         VARectangle *src_rect,
1833                                         struct i965_surface *src_surface,
1834                                         VARectangle *dst_rect,
1835                                         struct i965_surface *dst_surface)
1836 {
1837     struct scaling_input_parameter *scaling_curbe;
1838     float src_width, src_height;
1839     float coeff;
1840     unsigned int fourcc;
1841
1842     if ((gpe_context == NULL) ||
1843         (src_rect == NULL) || (src_surface == NULL) ||
1844         (dst_rect == NULL) || (dst_surface == NULL))
1845         return;
1846
1847     scaling_curbe = i965_gpe_context_map_curbe(gpe_context);
1848
1849     if (!scaling_curbe)
1850         return;
1851
1852     memset(scaling_curbe, 0, sizeof(struct scaling_input_parameter));
1853
1854     scaling_curbe->bti_input = BTI_SCALING_INPUT_Y;
1855     scaling_curbe->bti_output = BTI_SCALING_OUTPUT_Y;
1856
1857     /* As the src_rect/dst_rect is already checked, it is skipped.*/
1858     scaling_curbe->x_dst     = dst_rect->x;
1859     scaling_curbe->y_dst     = dst_rect->y;
1860
1861     src_width = src_rect->x + src_rect->width;
1862     src_height = src_rect->y + src_rect->height;
1863
1864     scaling_curbe->inv_width = 1 / src_width;
1865     scaling_curbe->inv_height = 1 / src_height;
1866
1867     coeff = (float)(src_rect->width) / dst_rect->width;
1868     scaling_curbe->x_factor = coeff / src_width;
1869     scaling_curbe->x_orig = (float)(src_rect->x) / src_width;
1870
1871     coeff = (float)(src_rect->height) / dst_rect->height;
1872     scaling_curbe->y_factor = coeff / src_height;
1873     scaling_curbe->y_orig = (float)(src_rect->y) / src_height;
1874
1875     fourcc = pp_get_surface_fourcc(ctx, src_surface);
1876     if (fourcc == VA_FOURCC_NV12) {
1877         scaling_curbe->dw2.src_packed = 1;
1878     }
1879
1880     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
1881
1882     if (fourcc == VA_FOURCC_NV12) {
1883         scaling_curbe->dw2.dst_packed = 1;
1884     }
1885
1886     i965_gpe_context_unmap_curbe(gpe_context);
1887 }
1888
1889 static bool
1890 gen8_pp_context_get_surface_conf(VADriverContextP ctx,
1891                                  struct i965_surface *surface,
1892                                  VARectangle *rect,
1893                                  int *width,
1894                                  int *height,
1895                                  int *pitch,
1896                                  int *bo_offset)
1897 {
1898     unsigned int fourcc;
1899     if (!rect || !surface || !width || !height || !pitch || !bo_offset)
1900         return false;
1901
1902     if (surface->base == NULL)
1903         return false;
1904
1905     fourcc = pp_get_surface_fourcc(ctx, surface);
1906     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
1907         struct object_surface *obj_surface;
1908
1909         obj_surface = (struct object_surface *)surface->base;
1910         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
1911         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
1912         pitch[0] = obj_surface->width;
1913         bo_offset[0] = 0;
1914
1915         if (fourcc == VA_FOURCC_RGBX ||
1916             fourcc == VA_FOURCC_RGBA ||
1917             fourcc == VA_FOURCC_BGRX ||
1918             fourcc == VA_FOURCC_BGRA) {
1919             /* nothing to do here */
1920         } else if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1921             width[1] = width[0] / 2;
1922             height[1] = height[0] / 2;
1923             pitch[1] = obj_surface->cb_cr_pitch;
1924             bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1925         } else {
1926             width[1] = width[0] / 2;
1927             height[1] = height[0] / 2;
1928             pitch[1] = obj_surface->cb_cr_pitch;
1929             bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1930             width[2] = width[0] / 2;
1931             height[2] = height[0] / 2;
1932             pitch[2] = obj_surface->cb_cr_pitch;
1933             bo_offset[2] = obj_surface->width * obj_surface->y_cr_offset;
1934         }
1935
1936     } else {
1937         struct object_image *obj_image;
1938
1939         obj_image = (struct object_image *)surface->base;
1940
1941         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
1942         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
1943         pitch[0] = obj_image->image.pitches[0];
1944         bo_offset[0] = obj_image->image.offsets[0];
1945
1946         if (fourcc == VA_FOURCC_RGBX ||
1947             fourcc == VA_FOURCC_RGBA ||
1948             fourcc == VA_FOURCC_BGRX ||
1949             fourcc == VA_FOURCC_BGRA) {
1950             /* nothing to do here */
1951         } else if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1952             width[1] = width[0] / 2;
1953             height[1] = height[0] / 2;
1954             pitch[1] = obj_image->image.pitches[1];
1955             bo_offset[1] = obj_image->image.offsets[1];
1956         } else {
1957             int u = 1, v = 2;
1958
1959             if (fourcc == VA_FOURCC_YV12 || fourcc == VA_FOURCC_IMC1)
1960                 u = 2, v = 1;
1961
1962             width[1] = width[0] / 2;
1963             height[1] = height[0] / 2;
1964             pitch[1] = obj_image->image.pitches[u];
1965             bo_offset[1] = obj_image->image.offsets[u];
1966             width[2] = width[0] / 2;
1967             height[2] = height[0] / 2;
1968             pitch[2] = obj_image->image.pitches[v];
1969             bo_offset[2] = obj_image->image.offsets[v];
1970         }
1971
1972     }
1973     return true;
1974 }
1975
1976 static void
1977 gen8_gpe_context_yuv420p8_scaling_surfaces(VADriverContextP ctx,
1978                                            struct i965_gpe_context *gpe_context,
1979                                            VARectangle *src_rect,
1980                                            struct i965_surface *src_surface,
1981                                            VARectangle *dst_rect,
1982                                            struct i965_surface *dst_surface)
1983 {
1984     unsigned int fourcc;
1985     int width[3], height[3], pitch[3], bo_offset[3];
1986     dri_bo *bo;
1987     struct object_surface *obj_surface;
1988     struct object_image *obj_image;
1989     int bti;
1990
1991     if ((gpe_context == NULL) ||
1992         (src_rect == NULL) || (src_surface == NULL) ||
1993         (dst_rect == NULL) || (dst_surface == NULL))
1994         return;
1995
1996     if (src_surface->base == NULL || dst_surface->base == NULL)
1997         return;
1998
1999     fourcc = pp_get_surface_fourcc(ctx, src_surface);
2000
2001     if (src_surface->type == I965_SURFACE_TYPE_SURFACE) {
2002         obj_surface = (struct object_surface *)src_surface->base;
2003         bo = obj_surface->bo;
2004     } else {
2005         obj_image = (struct object_image *)src_surface->base;
2006         bo = obj_image->bo;
2007     }
2008
2009     bti = 0;
2010     if (gen8_pp_context_get_surface_conf(ctx, src_surface, src_rect,
2011                                          width, height, pitch,
2012                                          bo_offset)) {
2013         bti = BTI_SCALING_INPUT_Y;
2014         /* Input surface */
2015         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2016                                            bo_offset[0],
2017                                            width[0], height[0],
2018                                            pitch[0], 0,
2019                                            I965_SURFACEFORMAT_R8_UNORM,
2020                                            bti, 0);
2021         if (fourcc == VA_FOURCC_NV12) {
2022             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2023                                                bo_offset[1],
2024                                                width[1], height[1],
2025                                                pitch[1], 0,
2026                                                I965_SURFACEFORMAT_R8G8_UNORM,
2027                                                bti + 1, 0);
2028         } else {
2029             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2030                                                bo_offset[1],
2031                                                width[1], height[1],
2032                                                pitch[1], 0,
2033                                                I965_SURFACEFORMAT_R8_UNORM,
2034                                                bti + 1, 0);
2035
2036             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2037                                                bo_offset[2],
2038                                                width[2], height[2],
2039                                                pitch[2], 0,
2040                                                I965_SURFACEFORMAT_R8_UNORM,
2041                                                bti + 2, 0);
2042         }
2043     }
2044
2045     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2046
2047     if (dst_surface->type == I965_SURFACE_TYPE_SURFACE) {
2048         obj_surface = (struct object_surface *)dst_surface->base;
2049         bo = obj_surface->bo;
2050     } else {
2051         obj_image = (struct object_image *)dst_surface->base;
2052         bo = obj_image->bo;
2053     }
2054
2055     if (gen8_pp_context_get_surface_conf(ctx, dst_surface, dst_rect,
2056                                          width, height, pitch,
2057                                          bo_offset)) {
2058         bti = BTI_SCALING_OUTPUT_Y;
2059         /* Input surface */
2060         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2061                                            bo_offset[0],
2062                                            width[0], height[0],
2063                                            pitch[0], 1,
2064                                            I965_SURFACEFORMAT_R8_UINT,
2065                                            bti, 0);
2066         if (fourcc == VA_FOURCC_NV12) {
2067             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2068                                                bo_offset[1],
2069                                                width[1] * 2, height[1],
2070                                                pitch[1], 1,
2071                                                I965_SURFACEFORMAT_R16_UINT,
2072                                                bti + 1, 0);
2073         } else {
2074             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2075                                                bo_offset[1],
2076                                                width[1], height[1],
2077                                                pitch[1], 1,
2078                                                I965_SURFACEFORMAT_R8_UINT,
2079                                                bti + 1, 0);
2080
2081             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2082                                                bo_offset[2],
2083                                                width[2], height[2],
2084                                                pitch[2], 1,
2085                                                I965_SURFACEFORMAT_R8_UINT,
2086                                                bti + 2, 0);
2087         }
2088     }
2089
2090     return;
2091 }
2092
2093 VAStatus
2094 gen8_yuv420p8_scaling_post_processing(
2095     VADriverContextP   ctx,
2096     struct i965_post_processing_context *pp_context,
2097     struct i965_surface *src_surface,
2098     VARectangle *src_rect,
2099     struct i965_surface *dst_surface,
2100     VARectangle *dst_rect)
2101 {
2102     struct i965_gpe_context *gpe_context;
2103     struct gpe_media_object_walker_parameter media_object_walker_param;
2104     struct intel_vpp_kernel_walker_parameter kernel_walker_param;
2105
2106     if (!pp_context || !src_surface || !src_rect || !dst_surface || !dst_rect)
2107         return VA_STATUS_ERROR_INVALID_PARAMETER;
2108
2109     if (!(pp_context->scaling_gpe_context_initialized & VPPGPE_8BIT_8BIT))
2110         return VA_STATUS_ERROR_UNIMPLEMENTED;
2111
2112     gpe_context = &pp_context->scaling_gpe_context;
2113
2114     gen8_gpe_context_init(ctx, gpe_context);
2115     gen8_vpp_scaling_sample_state(ctx, gpe_context, src_rect, dst_rect);
2116     gen8_gpe_reset_binding_table(ctx, gpe_context);
2117     gen8_gpe_context_yuv420p8_scaling_curbe(ctx, gpe_context,
2118                                             src_rect, src_surface,
2119                                             dst_rect, dst_surface);
2120
2121     gen8_gpe_context_yuv420p8_scaling_surfaces(ctx, gpe_context,
2122                                                src_rect, src_surface,
2123                                                dst_rect, dst_surface);
2124
2125     gen8_gpe_setup_interface_data(ctx, gpe_context);
2126
2127     memset(&kernel_walker_param, 0, sizeof(kernel_walker_param));
2128     kernel_walker_param.resolution_x = ALIGN(dst_rect->width, 16) >> 4;
2129     kernel_walker_param.resolution_y = ALIGN(dst_rect->height, 16) >> 4;
2130     kernel_walker_param.no_dependency = 1;
2131
2132     intel_vpp_init_media_object_walker_parameter(&kernel_walker_param, &media_object_walker_param);
2133     media_object_walker_param.interface_offset = 0;
2134     gen8_run_kernel_media_object_walker(ctx, pp_context->batch,
2135                                         gpe_context,
2136                                         &media_object_walker_param);
2137
2138     return VA_STATUS_SUCCESS;
2139 }
2140 static void
2141 gen8_gpe_context_8bit_420_rgb32_scaling_curbe(VADriverContextP ctx,
2142                                               struct i965_gpe_context *gpe_context,
2143                                               VARectangle *src_rect,
2144                                               struct i965_surface *src_surface,
2145                                               VARectangle *dst_rect,
2146                                               struct i965_surface *dst_surface)
2147 {
2148     struct scaling_input_parameter *scaling_curbe;
2149     float src_width, src_height;
2150     float coeff;
2151     unsigned int fourcc;
2152     int src_format = SRC_FORMAT_I420, dst_format = DST_FORMAT_RGBX;
2153     const float * yuv_to_rgb_coefs;
2154     size_t yuv_to_rgb_coefs_size;
2155
2156     if ((gpe_context == NULL) ||
2157         (src_rect == NULL) || (src_surface == NULL) ||
2158         (dst_rect == NULL) || (dst_surface == NULL))
2159         return;
2160
2161     scaling_curbe = i965_gpe_context_map_curbe(gpe_context);
2162
2163     if (!scaling_curbe)
2164         return;
2165
2166     memset(scaling_curbe, 0, sizeof(struct scaling_input_parameter));
2167
2168     scaling_curbe->bti_input = BTI_SCALING_INPUT_Y;
2169     scaling_curbe->bti_output = BTI_SCALING_OUTPUT_Y;
2170
2171     /* As the src_rect/dst_rect is already checked, it is skipped.*/
2172     scaling_curbe->x_dst     = dst_rect->x;
2173     scaling_curbe->y_dst     = dst_rect->y;
2174
2175     src_width = src_rect->x + src_rect->width;
2176     src_height = src_rect->y + src_rect->height;
2177
2178     scaling_curbe->inv_width = 1 / src_width;
2179     scaling_curbe->inv_height = 1 / src_height;
2180
2181     coeff = (float)(src_rect->width) / dst_rect->width;
2182     scaling_curbe->x_factor = coeff / src_width;
2183     scaling_curbe->x_orig = (float)(src_rect->x) / src_width;
2184
2185     coeff = (float)(src_rect->height) / dst_rect->height;
2186     scaling_curbe->y_factor = coeff / src_height;
2187     scaling_curbe->y_orig = (float)(src_rect->y) / src_height;
2188
2189     fourcc = pp_get_surface_fourcc(ctx, src_surface);
2190
2191     switch (fourcc) {
2192     case VA_FOURCC_I420:
2193     case VA_FOURCC_IMC3: /* pitch / base address is set via surface_state */
2194         src_format = SRC_FORMAT_I420;
2195         break;
2196
2197     case VA_FOURCC_NV12:
2198         src_format = SRC_FORMAT_NV12;
2199         break;
2200
2201     case VA_FOURCC_YV12:
2202     case VA_FOURCC_IMC1: /* pitch / base address is set via surface_state */
2203         src_format = SRC_FORMAT_YV12;
2204         break;
2205
2206     default:
2207         break;
2208     }
2209
2210     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2211
2212     switch (fourcc) {
2213     case VA_FOURCC_RGBX:
2214         dst_format = DST_FORMAT_RGBX;
2215         break;
2216
2217     case VA_FOURCC_RGBA:
2218         dst_format = DST_FORMAT_RGBA;
2219         break;
2220
2221     case VA_FOURCC_BGRX:
2222         dst_format = DST_FORMAT_BGRX;
2223         break;
2224
2225     case VA_FOURCC_BGRA:
2226         dst_format = DST_FORMAT_BGRA;
2227         break;
2228
2229     default:
2230         break;
2231     }
2232
2233     scaling_curbe->dw2.src_format = src_format;
2234     scaling_curbe->dw2.dst_format = dst_format;
2235
2236     yuv_to_rgb_coefs = i915_color_standard_to_coefs(i915_filter_to_color_standard(src_surface->flags & VA_SRC_COLOR_MASK), &yuv_to_rgb_coefs_size);
2237     memcpy(&scaling_curbe->coef_ry, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
2238
2239     i965_gpe_context_unmap_curbe(gpe_context);
2240 }
2241
2242 static void
2243 gen8_gpe_context_8bit_420_rgb32_scaling_surfaces(VADriverContextP ctx,
2244                                                  struct i965_gpe_context *gpe_context,
2245                                                  VARectangle *src_rect,
2246                                                  struct i965_surface *src_surface,
2247                                                  VARectangle *dst_rect,
2248                                                  struct i965_surface *dst_surface)
2249 {
2250     unsigned int fourcc;
2251     int width[3], height[3], pitch[3], bo_offset[3];
2252     dri_bo *bo;
2253     struct object_surface *obj_surface;
2254     struct object_image *obj_image;
2255     int bti;
2256
2257     if ((gpe_context == NULL) ||
2258         (src_rect == NULL) || (src_surface == NULL) ||
2259         (dst_rect == NULL) || (dst_surface == NULL))
2260         return;
2261
2262     if (src_surface->base == NULL || dst_surface->base == NULL)
2263         return;
2264
2265     fourcc = pp_get_surface_fourcc(ctx, src_surface);
2266
2267     if (src_surface->type == I965_SURFACE_TYPE_SURFACE) {
2268         obj_surface = (struct object_surface *)src_surface->base;
2269         bo = obj_surface->bo;
2270     } else {
2271         obj_image = (struct object_image *)src_surface->base;
2272         bo = obj_image->bo;
2273     }
2274
2275     if (gen8_pp_context_get_surface_conf(ctx, src_surface, src_rect,
2276                                          width, height, pitch,
2277                                          bo_offset)) {
2278         /* Input surface */
2279         bti = BTI_SCALING_INPUT_Y;
2280         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2281                                            bo_offset[0],
2282                                            width[0], height[0],
2283                                            pitch[0], 0,
2284                                            I965_SURFACEFORMAT_R8_UNORM,
2285                                            bti, 0);
2286
2287         if (fourcc == VA_FOURCC_NV12) {
2288             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2289                                                bo_offset[1],
2290                                                width[1], height[1],
2291                                                pitch[1], 0,
2292                                                I965_SURFACEFORMAT_R8G8_UNORM,
2293                                                bti + 1, 0);
2294         } else {
2295             /* The corresponding shader handles U, V plane in order */
2296             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2297                                                bo_offset[1],
2298                                                width[1], height[1],
2299                                                pitch[1], 0,
2300                                                I965_SURFACEFORMAT_R8_UNORM,
2301                                                bti + 1, 0);
2302
2303             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2304                                                bo_offset[2],
2305                                                width[2], height[2],
2306                                                pitch[2], 0,
2307                                                I965_SURFACEFORMAT_R8_UNORM,
2308                                                bti + 2, 0);
2309         }
2310     }
2311
2312     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2313
2314     if (dst_surface->type == I965_SURFACE_TYPE_SURFACE) {
2315         obj_surface = (struct object_surface *)dst_surface->base;
2316         bo = obj_surface->bo;
2317     } else {
2318         obj_image = (struct object_image *)dst_surface->base;
2319         bo = obj_image->bo;
2320     }
2321
2322     if (gen8_pp_context_get_surface_conf(ctx, dst_surface, dst_rect,
2323                                          width, height, pitch,
2324                                          bo_offset)) {
2325         assert(fourcc == VA_FOURCC_RGBX ||
2326                fourcc == VA_FOURCC_RGBA ||
2327                fourcc == VA_FOURCC_BGRX ||
2328                fourcc == VA_FOURCC_BGRA);
2329         assert(width[0] * 4 <= pitch[0]);
2330
2331         /* output surface */
2332         bti = BTI_SCALING_OUTPUT_Y;
2333         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2334                                            bo_offset[0],
2335                                            width[0] * 4, height[0],
2336                                            pitch[0], 1,
2337                                            I965_SURFACEFORMAT_R8_UINT,
2338                                            bti, 0);
2339     }
2340 }
2341
2342 VAStatus
2343 gen8_8bit_420_rgb32_scaling_post_processing(VADriverContextP   ctx,
2344                                             struct i965_post_processing_context *pp_context,
2345                                             struct i965_surface *src_surface,
2346                                             VARectangle *src_rect,
2347                                             struct i965_surface *dst_surface,
2348                                             VARectangle *dst_rect)
2349 {
2350     struct i965_gpe_context *gpe_context;
2351     struct gpe_media_object_walker_parameter media_object_walker_param;
2352     struct intel_vpp_kernel_walker_parameter kernel_walker_param;
2353
2354     if (!pp_context || !src_surface || !src_rect || !dst_surface || !dst_rect)
2355         return VA_STATUS_ERROR_INVALID_PARAMETER;
2356
2357     if (!(pp_context->scaling_gpe_context_initialized & VPPGPE_8BIT_420_RGB32))
2358         return VA_STATUS_ERROR_UNIMPLEMENTED;
2359
2360     gpe_context = &pp_context->scaling_gpe_context;
2361
2362     gen8_gpe_context_init(ctx, gpe_context);
2363     gen8_vpp_scaling_sample_state(ctx, gpe_context, src_rect, dst_rect);
2364     gen8_gpe_reset_binding_table(ctx, gpe_context);
2365     gen8_gpe_context_8bit_420_rgb32_scaling_curbe(ctx, gpe_context,
2366                                                   src_rect, src_surface,
2367                                                   dst_rect, dst_surface);
2368
2369     gen8_gpe_context_8bit_420_rgb32_scaling_surfaces(ctx, gpe_context,
2370                                                      src_rect, src_surface,
2371                                                      dst_rect, dst_surface);
2372
2373     gen8_gpe_setup_interface_data(ctx, gpe_context);
2374
2375     memset(&kernel_walker_param, 0, sizeof(kernel_walker_param));
2376     kernel_walker_param.resolution_x = ALIGN(dst_rect->width, 16) >> 4;
2377     kernel_walker_param.resolution_y = ALIGN(dst_rect->height, 16) >> 4;
2378     kernel_walker_param.no_dependency = 1;
2379
2380     intel_vpp_init_media_object_walker_parameter(&kernel_walker_param, &media_object_walker_param);
2381     media_object_walker_param.interface_offset = 1;
2382     gen8_run_kernel_media_object_walker(ctx, pp_context->batch,
2383                                         gpe_context,
2384                                         &media_object_walker_param);
2385
2386     return VA_STATUS_SUCCESS;
2387 }