OSDN Git Service

Use single GPE context for the optimization for 8bit/10bit scaling/CSC
[android-x86/hardware-intel-common-vaapi.git] / src / gen8_post_processing.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Xiang Haihao <haihao.xiang@intel.com>
26  *    Zhao Yakui <yakui.zhao@intel.com>
27  *
28  */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <assert.h>
34
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "i965_yuv_coefs.h"
43 #include "intel_media.h"
44
45 #include "gen75_picture_process.h"
46 #include "intel_common_vpp_internal.h"
47
48 #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
49
50 #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
51 #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
52
53 #define GPU_ASM_BLOCK_WIDTH         16
54 #define GPU_ASM_BLOCK_HEIGHT        8
55 #define GPU_ASM_X_OFFSET_ALIGNMENT  4
56
57 #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
58
59 VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
60                             const struct i965_surface *src_surface,
61                             const VARectangle *src_rect,
62                             struct i965_surface *dst_surface,
63                             const VARectangle *dst_rect,
64                             void *filter_param);
65
66 VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
67                                     const struct i965_surface *src_surface,
68                                     const VARectangle *src_rect,
69                                     struct i965_surface *dst_surface,
70                                     const VARectangle *dst_rect,
71                                     void *filter_param);
72
73 /* TODO: Modify the shader and then compile it again.
74  * Currently it is derived from Haswell*/
75 static const uint32_t pp_null_gen8[][4] = {
76 };
77
78 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
79 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
80 };
81
82 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
83 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
84 };
85
86 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
87 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
88 };
89
90 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
91 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
92 };
93
94 static const uint32_t pp_nv12_scaling_gen8[][4] = {
95 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
96 };
97
98 static const uint32_t pp_nv12_avs_gen8[][4] = {
99 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
100 };
101
102 static const uint32_t pp_nv12_dndi_gen8[][4] = {
103 // #include "shaders/post_processing/gen7/dndi.g75b"
104 };
105
106 static const uint32_t pp_nv12_dn_gen8[][4] = {
107 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
108 };
109 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
110 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
111 };
112 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
113 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
114 };
115 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
116 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
117 };
118 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
119 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
120 };
121 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
122 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
123 };
124 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
125 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
126 };
127 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
128 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
129 };
130
131 static struct pp_module pp_modules_gen8[] = {
132     {
133         {
134             "NULL module (for testing)",
135             PP_NULL,
136             pp_null_gen8,
137             sizeof(pp_null_gen8),
138             NULL,
139         },
140
141         pp_null_initialize,
142     },
143
144     {
145         {
146             "NV12_NV12",
147             PP_NV12_LOAD_SAVE_N12,
148             pp_nv12_load_save_nv12_gen8,
149             sizeof(pp_nv12_load_save_nv12_gen8),
150             NULL,
151         },
152
153         gen8_pp_plx_avs_initialize,
154     },
155
156     {
157         {
158             "NV12_PL3",
159             PP_NV12_LOAD_SAVE_PL3,
160             pp_nv12_load_save_pl3_gen8,
161             sizeof(pp_nv12_load_save_pl3_gen8),
162             NULL,
163         },
164         gen8_pp_plx_avs_initialize,
165     },
166
167     {
168         {
169             "PL3_NV12",
170             PP_PL3_LOAD_SAVE_N12,
171             pp_pl3_load_save_nv12_gen8,
172             sizeof(pp_pl3_load_save_nv12_gen8),
173             NULL,
174         },
175
176         gen8_pp_plx_avs_initialize,
177     },
178
179     {
180         {
181             "PL3_PL3",
182             PP_PL3_LOAD_SAVE_PL3,
183             pp_pl3_load_save_pl3_gen8,
184             sizeof(pp_pl3_load_save_pl3_gen8),
185             NULL,
186         },
187
188         gen8_pp_plx_avs_initialize,
189     },
190
191     {
192         {
193             "NV12 Scaling module",
194             PP_NV12_SCALING,
195             pp_nv12_scaling_gen8,
196             sizeof(pp_nv12_scaling_gen8),
197             NULL,
198         },
199
200         gen8_pp_plx_avs_initialize,
201     },
202
203     {
204         {
205             "NV12 AVS module",
206             PP_NV12_AVS,
207             pp_nv12_avs_gen8,
208             sizeof(pp_nv12_avs_gen8),
209             NULL,
210         },
211
212         gen8_pp_plx_avs_initialize,
213     },
214
215     {
216         {
217             "NV12 DNDI module",
218             PP_NV12_DNDI,
219             pp_nv12_dndi_gen8,
220             sizeof(pp_nv12_dndi_gen8),
221             NULL,
222         },
223
224         pp_null_initialize,
225     },
226
227     {
228         {
229             "NV12 DN module",
230             PP_NV12_DN,
231             pp_nv12_dn_gen8,
232             sizeof(pp_nv12_dn_gen8),
233             NULL,
234         },
235
236         pp_null_initialize,
237     },
238     {
239         {
240             "NV12_PA module",
241             PP_NV12_LOAD_SAVE_PA,
242             pp_nv12_load_save_pa_gen8,
243             sizeof(pp_nv12_load_save_pa_gen8),
244             NULL,
245         },
246
247         gen8_pp_plx_avs_initialize,
248     },
249
250     {
251         {
252             "PL3_PA module",
253             PP_PL3_LOAD_SAVE_PA,
254             pp_pl3_load_save_pa_gen8,
255             sizeof(pp_pl3_load_save_pa_gen8),
256             NULL,
257         },
258
259         gen8_pp_plx_avs_initialize,
260     },
261
262     {
263         {
264             "PA_NV12 module",
265             PP_PA_LOAD_SAVE_NV12,
266             pp_pa_load_save_nv12_gen8,
267             sizeof(pp_pa_load_save_nv12_gen8),
268             NULL,
269         },
270
271         gen8_pp_plx_avs_initialize,
272     },
273
274     {
275         {
276             "PA_PL3 module",
277             PP_PA_LOAD_SAVE_PL3,
278             pp_pa_load_save_pl3_gen8,
279             sizeof(pp_pa_load_save_pl3_gen8),
280             NULL,
281         },
282
283         gen8_pp_plx_avs_initialize,
284     },
285
286     {
287         {
288             "PA_PA module",
289             PP_PA_LOAD_SAVE_PA,
290             pp_pa_load_save_pa_gen8,
291             sizeof(pp_pa_load_save_pa_gen8),
292             NULL,
293         },
294
295         gen8_pp_plx_avs_initialize,
296     },
297
298     {
299         {
300             "RGBX_NV12 module",
301             PP_RGBX_LOAD_SAVE_NV12,
302             pp_rgbx_load_save_nv12_gen8,
303             sizeof(pp_rgbx_load_save_nv12_gen8),
304             NULL,
305         },
306
307         gen8_pp_plx_avs_initialize,
308     },
309
310     {
311         {
312             "NV12_RGBX module",
313             PP_NV12_LOAD_SAVE_RGBX,
314             pp_nv12_load_save_rgbx_gen8,
315             sizeof(pp_nv12_load_save_rgbx_gen8),
316             NULL,
317         },
318
319         gen8_pp_plx_avs_initialize,
320     },
321 };
322
323 #define MAX_SCALING_SURFACES    16
324
325 #define DEFAULT_MOCS    0
326
327 static const uint32_t pp_yuv420p8_scaling_gen8[][4] = {
328 #include "shaders/post_processing/gen8/conv_nv12.g8b"
329 };
330
331 static void
332 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
333 {
334     switch (tiling) {
335     case I915_TILING_NONE:
336         ss->ss0.tiled_surface = 0;
337         ss->ss0.tile_walk = 0;
338         break;
339     case I915_TILING_X:
340         ss->ss0.tiled_surface = 1;
341         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
342         break;
343     case I915_TILING_Y:
344         ss->ss0.tiled_surface = 1;
345         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
346         break;
347     }
348 }
349
350 static void
351 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
352 {
353     switch (tiling) {
354     case I915_TILING_NONE:
355         ss->ss2.tiled_surface = 0;
356         ss->ss2.tile_walk = 0;
357         break;
358     case I915_TILING_X:
359         ss->ss2.tiled_surface = 1;
360         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
361         break;
362     case I915_TILING_Y:
363         ss->ss2.tiled_surface = 1;
364         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
365         break;
366     }
367 }
368
369
370 static void
371 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
372                           dri_bo *surf_bo, unsigned long surf_bo_offset,
373                           int width, int height, int pitch, int format,
374                           int index, int is_target)
375 {
376     struct i965_driver_data *i965 = i965_driver_data(ctx);
377     struct gen8_surface_state *ss;
378     dri_bo *ss_bo;
379     unsigned int tiling;
380     unsigned int swizzle;
381
382     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
383     ss_bo = pp_context->surface_state_binding_table.bo;
384     assert(ss_bo);
385
386     dri_bo_map(ss_bo, True);
387     assert(ss_bo->virtual);
388     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
389     memset(ss, 0, sizeof(*ss));
390
391     if (IS_GEN9(i965->intel.device_info))
392         ss->ss1.surface_mocs = GEN9_CACHE_PTE;
393
394     ss->ss0.surface_type = I965_SURFACE_2D;
395     ss->ss0.surface_format = format;
396     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
397     ss->ss2.width = width - 1;
398     ss->ss2.height = height - 1;
399     ss->ss3.pitch = pitch - 1;
400
401     /* Always set 1(align 4 mode) per B-spec */
402     ss->ss0.vertical_alignment = 1;
403     ss->ss0.horizontal_alignment = 1;
404
405     gen8_pp_set_surface_tiling(ss, tiling);
406     gen8_render_set_surface_scs(ss);
407     dri_bo_emit_reloc(ss_bo,
408                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
409                       surf_bo_offset,
410                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
411                       surf_bo);
412     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
413     dri_bo_unmap(ss_bo);
414 }
415
416
417 static void
418 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
419                            dri_bo *surf_bo, unsigned long surf_bo_offset,
420                            int width, int height, int wpitch,
421                            int xoffset, int yoffset,
422                            int format, int interleave_chroma,
423                            int index)
424 {
425     struct i965_driver_data *i965 = i965_driver_data(ctx);
426     struct gen8_surface_state2 *ss2;
427     dri_bo *ss2_bo;
428     unsigned int tiling;
429     unsigned int swizzle;
430
431     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
432     ss2_bo = pp_context->surface_state_binding_table.bo;
433     assert(ss2_bo);
434
435     dri_bo_map(ss2_bo, True);
436     assert(ss2_bo->virtual);
437     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
438     memset(ss2, 0, sizeof(*ss2));
439
440     if (IS_GEN9(i965->intel.device_info))
441         ss2->ss5.surface_object_mocs = GEN9_CACHE_PTE;
442
443     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
444     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
445     ss2->ss1.width = width - 1;
446     ss2->ss1.height = height - 1;
447     ss2->ss2.pitch = wpitch - 1;
448     ss2->ss2.interleave_chroma = interleave_chroma;
449     ss2->ss2.surface_format = format;
450     ss2->ss3.x_offset_for_cb = xoffset;
451     ss2->ss3.y_offset_for_cb = yoffset;
452     gen8_pp_set_surface2_tiling(ss2, tiling);
453     dri_bo_emit_reloc(ss2_bo,
454                       I915_GEM_DOMAIN_RENDER, 0,
455                       surf_bo_offset,
456                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
457                       surf_bo);
458     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
459     dri_bo_unmap(ss2_bo);
460 }
461
462 static void
463 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
464                                      const struct i965_surface *surface,
465                                      int base_index, int is_target,
466                                      const VARectangle *rect,
467                                      int *width, int *height, int *pitch, int *offset)
468 {
469     struct object_surface *obj_surface;
470     struct object_image *obj_image;
471     dri_bo *bo;
472     int fourcc = pp_get_surface_fourcc(ctx, surface);
473     const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
474
475     if (fourcc_info == NULL)
476         return;
477
478     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
479         obj_surface = (struct object_surface *)surface->base;
480         bo = obj_surface->bo;
481         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
482         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
483         pitch[0] = obj_surface->width;
484         offset[0] = 0;
485
486         if (fourcc_info->num_planes == 1 && is_target)
487             width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
488
489         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
490         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
491         pitch[1] = obj_surface->cb_cr_pitch;
492         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
493
494         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
495         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
496         pitch[2] = obj_surface->cb_cr_pitch;
497         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
498     } else {
499         int U = 0, V = 0;
500
501         /* FIXME: add support for ARGB/ABGR image */
502         obj_image = (struct object_image *)surface->base;
503         bo = obj_image->bo;
504         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
505         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
506         pitch[0] = obj_image->image.pitches[0];
507         offset[0] = obj_image->image.offsets[0];
508
509         if (fourcc_info->num_planes == 1) {
510             if (is_target)
511                 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
512         } else if (fourcc_info->num_planes == 2) {
513             U = 1, V = 1;
514         } else {
515             assert(fourcc_info->num_components == 3);
516
517             U = fourcc_info->components[1].plane;
518             V = fourcc_info->components[2].plane;
519             assert((U == 1 && V == 2) ||
520                    (U == 2 && V == 1));
521         }
522
523         /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
524         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
525         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
526         pitch[1] = obj_image->image.pitches[U];
527         offset[1] = obj_image->image.offsets[U];
528
529         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
530         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
531         pitch[2] = obj_image->image.pitches[V];
532         offset[2] = obj_image->image.offsets[V];
533     }
534
535     if (is_target) {
536         gen8_pp_set_surface_state(ctx, pp_context,
537                                   bo, 0,
538                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
539                                   I965_SURFACEFORMAT_R8_UINT,
540                                   base_index, 1);
541
542         if (fourcc_info->num_planes == 2) {
543             gen8_pp_set_surface_state(ctx, pp_context,
544                                       bo, offset[1],
545                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
546                                       I965_SURFACEFORMAT_R8G8_SINT,
547                                       base_index + 1, 1);
548         } else if (fourcc_info->num_planes == 3) {
549             gen8_pp_set_surface_state(ctx, pp_context,
550                                       bo, offset[1],
551                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
552                                       I965_SURFACEFORMAT_R8_SINT,
553                                       base_index + 1, 1);
554             gen8_pp_set_surface_state(ctx, pp_context,
555                                       bo, offset[2],
556                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
557                                       I965_SURFACEFORMAT_R8_SINT,
558                                       base_index + 2, 1);
559         }
560
561         if (fourcc_info->format == I965_COLOR_RGB) {
562             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
563             /* the format is MSB: X-B-G-R */
564             pp_static_parameter->grf2.save_avs_rgb_swap = 0;
565             if ((fourcc == VA_FOURCC_BGRA) ||
566                 (fourcc == VA_FOURCC_BGRX)) {
567                 /* It is stored as MSB: X-R-G-B */
568                 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
569             }
570         }
571     } else {
572         int format0 = SURFACE_FORMAT_Y8_UNORM;
573
574         switch (fourcc) {
575         case VA_FOURCC_YUY2:
576             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
577             break;
578
579         case VA_FOURCC_UYVY:
580             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
581             break;
582
583         default:
584             break;
585         }
586
587         if (fourcc_info->format == I965_COLOR_RGB) {
588             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
589             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
590             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
591             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
592             if ((fourcc == VA_FOURCC_BGRA) ||
593                 (fourcc == VA_FOURCC_BGRX)) {
594                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
595             }
596         }
597
598         gen8_pp_set_surface2_state(ctx, pp_context,
599                                    bo, offset[0],
600                                    width[0], height[0], pitch[0],
601                                    0, 0,
602                                    format0, 0,
603                                    base_index);
604
605         if (fourcc_info->num_planes == 2) {
606             gen8_pp_set_surface2_state(ctx, pp_context,
607                                        bo, offset[1],
608                                        width[1], height[1], pitch[1],
609                                        0, 0,
610                                        SURFACE_FORMAT_R8B8_UNORM, 0,
611                                        base_index + 1);
612         } else if (fourcc_info->num_planes == 3) {
613             gen8_pp_set_surface2_state(ctx, pp_context,
614                                        bo, offset[1],
615                                        width[1], height[1], pitch[1],
616                                        0, 0,
617                                        SURFACE_FORMAT_R8_UNORM, 0,
618                                        base_index + 1);
619             gen8_pp_set_surface2_state(ctx, pp_context,
620                                        bo, offset[2],
621                                        width[2], height[2], pitch[2],
622                                        0, 0,
623                                        SURFACE_FORMAT_R8_UNORM, 0,
624                                        base_index + 2);
625         }
626
627         gen8_pp_set_surface_state(ctx, pp_context,
628                                   bo, 0,
629                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
630                                   I965_SURFACEFORMAT_R8_UINT,
631                                   base_index + 3, 1);
632
633         if (fourcc_info->num_planes == 2) {
634             gen8_pp_set_surface_state(ctx, pp_context,
635                                       bo, offset[1],
636                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
637                                       I965_SURFACEFORMAT_R8G8_SINT,
638                                       base_index + 4, 1);
639         } else if (fourcc_info->num_planes == 3) {
640             gen8_pp_set_surface_state(ctx, pp_context,
641                                       bo, offset[1],
642                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
643                                       I965_SURFACEFORMAT_R8_SINT,
644                                       base_index + 4, 1);
645             gen8_pp_set_surface_state(ctx, pp_context,
646                                       bo, offset[2],
647                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
648                                       I965_SURFACEFORMAT_R8_SINT,
649                                       base_index + 5, 1);
650         }
651     }
652 }
653
654 static int
655 pp_null_x_steps(void *private_context)
656 {
657     return 1;
658 }
659
660 static int
661 pp_null_y_steps(void *private_context)
662 {
663     return 1;
664 }
665
666 static int
667 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
668 {
669     return 0;
670 }
671
672 VAStatus
673 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
674                    const struct i965_surface *src_surface,
675                    const VARectangle *src_rect,
676                    struct i965_surface *dst_surface,
677                    const VARectangle *dst_rect,
678                    void *filter_param)
679 {
680     /* private function & data */
681     pp_context->pp_x_steps = pp_null_x_steps;
682     pp_context->pp_y_steps = pp_null_y_steps;
683     pp_context->private_context = NULL;
684     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
685
686     dst_surface->flags = src_surface->flags;
687
688     return VA_STATUS_SUCCESS;
689 }
690
691 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
692 {
693     int i, dst_width_adjust;
694     /* x offset of dest surface must be dword aligned.
695      * so we have to extend dst surface on left edge, and mask out pixels not interested
696      */
697     if (dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT) {
698         pp_context->block_horizontal_mask_left = 0;
699         for (i = dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT; i < GPU_ASM_BLOCK_WIDTH; i++) {
700             pp_context->block_horizontal_mask_left |= 1 << i;
701         }
702     } else {
703         pp_context->block_horizontal_mask_left = 0xffff;
704     }
705
706     dst_width_adjust = dst_rect->width + dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT;
707     if (dst_width_adjust % GPU_ASM_BLOCK_WIDTH) {
708         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust % GPU_ASM_BLOCK_WIDTH)) - 1;
709     } else {
710         pp_context->block_horizontal_mask_right = 0xffff;
711     }
712
713     if (dst_rect->height % GPU_ASM_BLOCK_HEIGHT) {
714         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height % GPU_ASM_BLOCK_HEIGHT)) - 1;
715     } else {
716         pp_context->block_vertical_mask_bottom = 0xff;
717     }
718
719 }
720
721 static int
722 gen7_pp_avs_x_steps(void *private_context)
723 {
724     struct pp_avs_context *pp_avs_context = private_context;
725
726     return pp_avs_context->dest_w / 16;
727 }
728
729 static int
730 gen7_pp_avs_y_steps(void *private_context)
731 {
732     struct pp_avs_context *pp_avs_context = private_context;
733
734     return pp_avs_context->dest_h / 16;
735 }
736
737 static int
738 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
739 {
740     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
741     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
742
743     pp_inline_parameter->grf9.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
744     pp_inline_parameter->grf9.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
745     pp_inline_parameter->grf9.constant_0 = 0xffffffff;
746     pp_inline_parameter->grf9.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
747
748     return 0;
749 }
750
751 static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
752                                               struct i965_post_processing_context *pp_context,
753                                               const struct i965_surface *surface)
754 {
755     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
756     int fourcc = pp_get_surface_fourcc(ctx, surface);
757
758     if (fourcc == VA_FOURCC_YUY2) {
759         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
760         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
761         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
762     } else if (fourcc == VA_FOURCC_UYVY) {
763         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
764         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
765         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
766     }
767 }
768
769 static const AVSConfig gen8_avs_config = {
770     .coeff_frac_bits = 6,
771     .coeff_epsilon = 1.0f / (1U << 6),
772     .num_phases = 16,
773     .num_luma_coeffs = 8,
774     .num_chroma_coeffs = 4,
775
776     .coeff_range = {
777         .lower_bound = {
778             .y_k_h = { -2, -2, -2, -2, -2, -2, -2, -2 },
779             .y_k_v = { -2, -2, -2, -2, -2, -2, -2, -2 },
780             .uv_k_h = { -1, -2, -2, -1 },
781             .uv_k_v = { -1, -2, -2, -1 },
782         },
783         .upper_bound = {
784             .y_k_h = { 2, 2, 2, 2, 2, 2, 2, 2 },
785             .y_k_v = { 2, 2, 2, 2, 2, 2, 2, 2 },
786             .uv_k_h = { 1, 2, 2, 1 },
787             .uv_k_v = { 1, 2, 2, 1 },
788         },
789     },
790 };
791
792 static int
793 gen8_pp_get_8tap_filter_mode(VADriverContextP ctx,
794                              const struct i965_surface *surface)
795 {
796     int fourcc = pp_get_surface_fourcc(ctx, surface);
797
798     if (fourcc == VA_FOURCC_YUY2 ||
799         fourcc == VA_FOURCC_UYVY)
800         return 1;
801     else
802         return 3;
803 }
804
805 static int
806 gen8_pp_kernel_use_media_read_msg(VADriverContextP ctx,
807                                   const struct i965_surface *src_surface,
808                                   const VARectangle *src_rect,
809                                   const struct i965_surface *dst_surface,
810                                   const VARectangle *dst_rect)
811 {
812     int src_fourcc = pp_get_surface_fourcc(ctx, src_surface);
813     int dst_fourcc = pp_get_surface_fourcc(ctx, dst_surface);
814     const i965_fourcc_info *src_fourcc_info = get_fourcc_info(src_fourcc);
815     const i965_fourcc_info *dst_fourcc_info = get_fourcc_info(dst_fourcc);
816
817     if (!src_fourcc_info ||
818         src_fourcc_info->subsampling != SUBSAMPLE_YUV420 ||
819         !dst_fourcc_info ||
820         dst_fourcc_info->subsampling != SUBSAMPLE_YUV420)
821         return 0;
822
823     if (src_rect->x == dst_rect->x &&
824         src_rect->y == dst_rect->y &&
825         src_rect->width == dst_rect->width &&
826         src_rect->height == dst_rect->height)
827         return 1;
828
829     return 0;
830 }
831
832 VAStatus
833 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
834                            const struct i965_surface *src_surface,
835                            const VARectangle *src_rect,
836                            struct i965_surface *dst_surface,
837                            const VARectangle *dst_rect,
838                            void *filter_param)
839 {
840     /* TODO: Add the sampler_8x8 state */
841     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
842     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
843     struct gen8_sampler_8x8_avs *sampler_8x8;
844     int i;
845     int width[3], height[3], pitch[3], offset[3];
846     int src_width, src_height;
847     unsigned char *cc_ptr;
848     AVSState * const avs = &pp_avs_context->state;
849     float sx, sy;
850     const float * yuv_to_rgb_coefs;
851     size_t yuv_to_rgb_coefs_size;
852
853     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
854
855     /* source surface */
856     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
857                                          src_rect,
858                                          width, height, pitch, offset);
859     src_height = height[0];
860     src_width  = width[0];
861
862     /* destination surface */
863     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
864                                          dst_rect,
865                                          width, height, pitch, offset);
866
867     /* sampler 8x8 state */
868     dri_bo_map(pp_context->dynamic_state.bo, True);
869     assert(pp_context->dynamic_state.bo->virtual);
870
871     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
872              pp_context->sampler_offset;
873     /* Currently only one gen8 sampler_8x8 is initialized */
874     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
875     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
876
877     sampler_8x8->dw0.gain_factor = 44;
878     sampler_8x8->dw0.weak_edge_threshold = 1;
879     sampler_8x8->dw0.strong_edge_threshold = 8;
880     /* Use the value like that on Ivy instead of default
881      * sampler_8x8->dw0.r3x_coefficient = 5;
882      */
883     sampler_8x8->dw0.r3x_coefficient = 27;
884     sampler_8x8->dw0.r3c_coefficient = 5;
885
886     sampler_8x8->dw2.global_noise_estimation = 255;
887     sampler_8x8->dw2.non_edge_weight = 1;
888     sampler_8x8->dw2.regular_weight = 2;
889     sampler_8x8->dw2.strong_edge_weight = 7;
890     /* Use the value like that on Ivy instead of default
891      * sampler_8x8->dw2.r5x_coefficient = 7;
892      * sampler_8x8->dw2.r5cx_coefficient = 7;
893      * sampler_8x8->dw2.r5c_coefficient = 7;
894      */
895     sampler_8x8->dw2.r5x_coefficient = 9;
896     sampler_8x8->dw2.r5cx_coefficient = 8;
897     sampler_8x8->dw2.r5c_coefficient = 3;
898
899     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
900     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
901     sampler_8x8->dw3.sat_max = 0x1f;
902     sampler_8x8->dw3.hue_max = 14;
903     /* The 8tap filter will determine whether the adaptive Filter is
904      * applied for all channels(dw153).
905      * If the 8tap filter is disabled, the adaptive filter should be disabled.
906      * Only when 8tap filter is enabled, it can be enabled or not.
907      */
908     sampler_8x8->dw3.enable_8tap_filter = gen8_pp_get_8tap_filter_mode(ctx, src_surface);
909     sampler_8x8->dw3.ief4_smooth_enable = 0;
910
911     sampler_8x8->dw4.s3u = 0;
912     sampler_8x8->dw4.diamond_margin = 4;
913     sampler_8x8->dw4.vy_std_enable = 0;
914     sampler_8x8->dw4.umid = 110;
915     sampler_8x8->dw4.vmid = 154;
916
917     sampler_8x8->dw5.diamond_dv = 0;
918     sampler_8x8->dw5.diamond_th = 35;
919     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
920     sampler_8x8->dw5.hs_margin = 3;
921     sampler_8x8->dw5.diamond_du = 2;
922
923     sampler_8x8->dw6.y_point1 = 46;
924     sampler_8x8->dw6.y_point2 = 47;
925     sampler_8x8->dw6.y_point3 = 254;
926     sampler_8x8->dw6.y_point4 = 255;
927
928     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
929
930     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
931     sampler_8x8->dw8.p0l = 46;
932     sampler_8x8->dw8.p1l = 216;
933
934     sampler_8x8->dw9.p2l = 236;
935     sampler_8x8->dw9.p3l = 236;
936     sampler_8x8->dw9.b0l = 133;
937     sampler_8x8->dw9.b1l = 130;
938
939     sampler_8x8->dw10.b2l = 130;
940     sampler_8x8->dw10.b3l = 130;
941     /* s0l = -5 / 256. s2.8 */
942     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
943     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
944
945     sampler_8x8->dw11.s1l = 0;
946     sampler_8x8->dw11.s2l = 0;
947
948     sampler_8x8->dw12.s3l = 0;
949     sampler_8x8->dw12.p0u = 46;
950     sampler_8x8->dw12.p1u = 66;
951     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
952
953     sampler_8x8->dw13.p2u = 130;
954     sampler_8x8->dw13.p3u = 236;
955     sampler_8x8->dw13.b0u = 143;
956     sampler_8x8->dw13.b1u = 163;
957
958     sampler_8x8->dw14.b2u = 200;
959     sampler_8x8->dw14.b3u = 140;
960     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
961
962     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
963     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
964
965     sx = (float)dst_rect->width / src_rect->width;
966     sy = (float)dst_rect->height / src_rect->height;
967     avs_update_coefficients(avs, sx, sy, pp_context->filter_flags);
968
969     assert(avs->config->num_phases >= 16);
970     for (i = 0; i <= 16; i++) {
971         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
972                     &sampler_8x8->coefficients[i];
973         const AVSCoeffs * const coeffs = &avs->coeffs[i];
974
975         sampler_8x8_state->dw0.table_0x_filter_c0 =
976             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
977         sampler_8x8_state->dw0.table_0y_filter_c0 =
978             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
979         sampler_8x8_state->dw0.table_0x_filter_c1 =
980             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
981         sampler_8x8_state->dw0.table_0y_filter_c1 =
982             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
983
984         sampler_8x8_state->dw1.table_0x_filter_c2 =
985             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
986         sampler_8x8_state->dw1.table_0y_filter_c2 =
987             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
988         sampler_8x8_state->dw1.table_0x_filter_c3 =
989             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
990         sampler_8x8_state->dw1.table_0y_filter_c3 =
991             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
992
993         sampler_8x8_state->dw2.table_0x_filter_c4 =
994             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
995         sampler_8x8_state->dw2.table_0y_filter_c4 =
996             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
997         sampler_8x8_state->dw2.table_0x_filter_c5 =
998             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
999         sampler_8x8_state->dw2.table_0y_filter_c5 =
1000             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1001
1002         sampler_8x8_state->dw3.table_0x_filter_c6 =
1003             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1004         sampler_8x8_state->dw3.table_0y_filter_c6 =
1005             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1006         sampler_8x8_state->dw3.table_0x_filter_c7 =
1007             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1008         sampler_8x8_state->dw3.table_0y_filter_c7 =
1009             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1010
1011         sampler_8x8_state->dw4.pad0 = 0;
1012         sampler_8x8_state->dw5.pad0 = 0;
1013         sampler_8x8_state->dw4.table_1x_filter_c2 =
1014             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1015         sampler_8x8_state->dw4.table_1x_filter_c3 =
1016             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1017         sampler_8x8_state->dw5.table_1x_filter_c4 =
1018             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1019         sampler_8x8_state->dw5.table_1x_filter_c5 =
1020             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1021
1022         sampler_8x8_state->dw6.pad0 =
1023             sampler_8x8_state->dw7.pad0 =
1024                 sampler_8x8_state->dw6.table_1y_filter_c2 =
1025                     intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1026         sampler_8x8_state->dw6.table_1y_filter_c3 =
1027             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1028         sampler_8x8_state->dw7.table_1y_filter_c4 =
1029             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1030         sampler_8x8_state->dw7.table_1y_filter_c5 =
1031             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1032     }
1033
1034     sampler_8x8->dw152.default_sharpness_level =
1035         -avs_is_needed(pp_context->filter_flags);
1036     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
1037     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
1038     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
1039
1040     for (; i <= avs->config->num_phases; i++) {
1041         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
1042                     &sampler_8x8->coefficients1[i - 17];
1043         const AVSCoeffs * const coeffs = &avs->coeffs[i];
1044
1045         sampler_8x8_state->dw0.table_0x_filter_c0 =
1046             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
1047         sampler_8x8_state->dw0.table_0y_filter_c0 =
1048             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
1049         sampler_8x8_state->dw0.table_0x_filter_c1 =
1050             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1051         sampler_8x8_state->dw0.table_0y_filter_c1 =
1052             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1053
1054         sampler_8x8_state->dw1.table_0x_filter_c2 =
1055             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1056         sampler_8x8_state->dw1.table_0y_filter_c2 =
1057             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1058         sampler_8x8_state->dw1.table_0x_filter_c3 =
1059             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1060         sampler_8x8_state->dw1.table_0y_filter_c3 =
1061             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1062
1063         sampler_8x8_state->dw2.table_0x_filter_c4 =
1064             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1065         sampler_8x8_state->dw2.table_0y_filter_c4 =
1066             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1067         sampler_8x8_state->dw2.table_0x_filter_c5 =
1068             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1069         sampler_8x8_state->dw2.table_0y_filter_c5 =
1070             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1071
1072         sampler_8x8_state->dw3.table_0x_filter_c6 =
1073             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1074         sampler_8x8_state->dw3.table_0y_filter_c6 =
1075             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1076         sampler_8x8_state->dw3.table_0x_filter_c7 =
1077             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1078         sampler_8x8_state->dw3.table_0y_filter_c7 =
1079             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1080
1081         sampler_8x8_state->dw4.pad0 = 0;
1082         sampler_8x8_state->dw5.pad0 = 0;
1083         sampler_8x8_state->dw4.table_1x_filter_c2 =
1084             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1085         sampler_8x8_state->dw4.table_1x_filter_c3 =
1086             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1087         sampler_8x8_state->dw5.table_1x_filter_c4 =
1088             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1089         sampler_8x8_state->dw5.table_1x_filter_c5 =
1090             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1091
1092         sampler_8x8_state->dw6.pad0 =
1093             sampler_8x8_state->dw7.pad0 =
1094                 sampler_8x8_state->dw6.table_1y_filter_c2 =
1095                     intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1096         sampler_8x8_state->dw6.table_1y_filter_c3 =
1097             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1098         sampler_8x8_state->dw7.table_1y_filter_c4 =
1099             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1100         sampler_8x8_state->dw7.table_1y_filter_c5 =
1101             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1102     }
1103
1104     dri_bo_unmap(pp_context->dynamic_state.bo);
1105
1106
1107     /* private function & data */
1108     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
1109     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
1110     pp_context->private_context = &pp_context->pp_avs_context;
1111     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
1112
1113     int dst_left_edge_extend = dst_rect->x % GPU_ASM_X_OFFSET_ALIGNMENT;
1114     pp_avs_context->dest_x = dst_rect->x - dst_left_edge_extend;
1115     pp_avs_context->dest_y = dst_rect->y;
1116     pp_avs_context->dest_w = ALIGN(dst_rect->width + dst_left_edge_extend, 16);
1117     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
1118     pp_avs_context->src_w = src_rect->width;
1119     pp_avs_context->src_h = src_rect->height;
1120     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
1121
1122     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
1123     dw = MAX(dw, dst_rect->width + dst_left_edge_extend);
1124
1125     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
1126     pp_static_parameter->grf2.avs_wa_enable = gen8_pp_kernel_use_media_read_msg(ctx,
1127                                                                                 src_surface, src_rect,
1128                                                                                 dst_surface, dst_rect); /* reuse this flag for media block reading on gen8+ */
1129     pp_static_parameter->grf2.alpha = 255;
1130
1131     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
1132     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
1133     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
1134                                                                    (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
1135     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
1136                                                                      (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
1137
1138     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
1139
1140     yuv_to_rgb_coefs = i915_color_standard_to_coefs(i915_filter_to_color_standard(src_surface->flags &
1141                                                                                   VA_SRC_COLOR_MASK),
1142                                                     &yuv_to_rgb_coefs_size);
1143     memcpy(&pp_static_parameter->grf7, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
1144
1145     dst_surface->flags = src_surface->flags;
1146
1147     return VA_STATUS_SUCCESS;
1148 }
1149
1150 VAStatus
1151 gen8_pp_initialize(
1152     VADriverContextP   ctx,
1153     struct i965_post_processing_context *pp_context,
1154     const struct i965_surface *src_surface,
1155     const VARectangle *src_rect,
1156     struct i965_surface *dst_surface,
1157     const VARectangle *dst_rect,
1158     int                pp_index,
1159     void * filter_param
1160 )
1161 {
1162     VAStatus va_status;
1163     struct i965_driver_data *i965 = i965_driver_data(ctx);
1164     dri_bo *bo;
1165     int bo_size;
1166     unsigned int end_offset;
1167     struct pp_module *pp_module;
1168     int static_param_size, inline_param_size;
1169
1170     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1171     bo = dri_bo_alloc(i965->intel.bufmgr,
1172                       "surface state & binding table",
1173                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1174                       4096);
1175     assert(bo);
1176     pp_context->surface_state_binding_table.bo = bo;
1177
1178     pp_context->idrt.num_interface_descriptors = 0;
1179
1180     pp_context->sampler_size = 4 * 4096;
1181
1182     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1183               + pp_context->idrt_size;
1184
1185     dri_bo_unreference(pp_context->dynamic_state.bo);
1186     bo = dri_bo_alloc(i965->intel.bufmgr,
1187                       "dynamic_state",
1188                       bo_size,
1189                       4096);
1190
1191     assert(bo);
1192     pp_context->dynamic_state.bo = bo;
1193     pp_context->dynamic_state.bo_size = bo_size;
1194
1195     end_offset = 0;
1196     pp_context->dynamic_state.end_offset = 0;
1197
1198     /* Constant buffer offset */
1199     pp_context->curbe_offset = ALIGN(end_offset, 64);
1200     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1201
1202     /* Interface descriptor offset */
1203     pp_context->idrt_offset = ALIGN(end_offset, 64);
1204     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1205
1206     /* Sampler state offset */
1207     pp_context->sampler_offset = ALIGN(end_offset, 64);
1208     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1209
1210     /* update the end offset of dynamic_state */
1211     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1212
1213     static_param_size = sizeof(struct gen7_pp_static_parameter);
1214     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1215
1216     memset(pp_context->pp_static_parameter, 0, static_param_size);
1217     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1218
1219     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1220     pp_context->current_pp = pp_index;
1221     pp_module = &pp_context->pp_modules[pp_index];
1222
1223     if (pp_module->initialize)
1224         va_status = pp_module->initialize(ctx, pp_context,
1225                                           src_surface,
1226                                           src_rect,
1227                                           dst_surface,
1228                                           dst_rect,
1229                                           filter_param);
1230     else
1231         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1232
1233     calculate_boundary_block_mask(pp_context, dst_rect);
1234
1235     return va_status;
1236 }
1237
1238 static void
1239 gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
1240                                    struct i965_post_processing_context *pp_context)
1241 {
1242     struct gen8_interface_descriptor_data *desc;
1243     dri_bo *bo;
1244     int pp_index = pp_context->current_pp;
1245     unsigned char *cc_ptr;
1246
1247     bo = pp_context->dynamic_state.bo;
1248
1249     dri_bo_map(bo, 1);
1250     assert(bo->virtual);
1251     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1252
1253     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1254            pp_context->idrt.num_interface_descriptors;
1255
1256     memset(desc, 0, sizeof(*desc));
1257     desc->desc0.kernel_start_pointer =
1258         pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1259     desc->desc2.single_program_flow = 1;
1260     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1261     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
1262     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1263     desc->desc4.binding_table_entry_count = 0;
1264     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1265     desc->desc5.constant_urb_entry_read_offset = 0;
1266
1267     desc->desc5.constant_urb_entry_read_length = 8; /* grf 1-8 */
1268
1269     dri_bo_unmap(bo);
1270     pp_context->idrt.num_interface_descriptors++;
1271 }
1272
1273
1274 static void
1275 gen8_pp_upload_constants(VADriverContextP ctx,
1276                          struct i965_post_processing_context *pp_context)
1277 {
1278     unsigned char *constant_buffer;
1279     int param_size;
1280
1281     assert(sizeof(struct gen7_pp_static_parameter) == 256);
1282
1283     param_size = sizeof(struct gen7_pp_static_parameter);
1284
1285     dri_bo_map(pp_context->dynamic_state.bo, 1);
1286     assert(pp_context->dynamic_state.bo->virtual);
1287     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1288                       pp_context->curbe_offset;
1289
1290     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1291     dri_bo_unmap(pp_context->dynamic_state.bo);
1292     return;
1293 }
1294
1295 void
1296 gen8_pp_states_setup(VADriverContextP ctx,
1297                      struct i965_post_processing_context *pp_context)
1298 {
1299     gen8_pp_interface_descriptor_table(ctx, pp_context);
1300     gen8_pp_upload_constants(ctx, pp_context);
1301 }
1302
1303 static void
1304 gen6_pp_pipeline_select(VADriverContextP ctx,
1305                         struct i965_post_processing_context *pp_context)
1306 {
1307     struct intel_batchbuffer *batch = pp_context->batch;
1308
1309     BEGIN_BATCH(batch, 1);
1310     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1311     ADVANCE_BATCH(batch);
1312 }
1313
1314 static void
1315 gen8_pp_state_base_address(VADriverContextP ctx,
1316                            struct i965_post_processing_context *pp_context)
1317 {
1318     struct intel_batchbuffer *batch = pp_context->batch;
1319
1320     BEGIN_BATCH(batch, 16);
1321     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1322     /* DW1 Generate state address */
1323     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1324     OUT_BATCH(batch, 0);
1325     OUT_BATCH(batch, 0);
1326
1327     /* DW4-5. Surface state address */
1328     OUT_RELOC64(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1329
1330     /* DW6-7. Dynamic state address */
1331     OUT_RELOC64(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1332                 0, 0 | BASE_ADDRESS_MODIFY);
1333
1334     /* DW8. Indirect object address */
1335     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1336     OUT_BATCH(batch, 0);
1337
1338     /* DW10-11. Instruction base address */
1339     OUT_RELOC64(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1340
1341     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1342     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1343     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1344     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1345     ADVANCE_BATCH(batch);
1346 }
1347
1348 void
1349 gen8_pp_vfe_state(VADriverContextP ctx,
1350                   struct i965_post_processing_context *pp_context)
1351 {
1352     struct intel_batchbuffer *batch = pp_context->batch;
1353
1354     BEGIN_BATCH(batch, 9);
1355     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1356     OUT_BATCH(batch, 0);
1357     OUT_BATCH(batch, 0);
1358     OUT_BATCH(batch,
1359               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1360               pp_context->vfe_gpu_state.num_urb_entries << 8);
1361     OUT_BATCH(batch, 0);
1362     OUT_BATCH(batch,
1363               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1364               /* URB Entry Allocation Size, in 256 bits unit */
1365               (pp_context->vfe_gpu_state.curbe_allocation_size));
1366     /* CURBE Allocation Size, in 256 bits unit */
1367     OUT_BATCH(batch, 0);
1368     OUT_BATCH(batch, 0);
1369     OUT_BATCH(batch, 0);
1370     ADVANCE_BATCH(batch);
1371 }
1372
1373 void
1374 gen8_interface_descriptor_load(VADriverContextP ctx,
1375                                struct i965_post_processing_context *pp_context)
1376 {
1377     struct intel_batchbuffer *batch = pp_context->batch;
1378
1379     BEGIN_BATCH(batch, 6);
1380
1381     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1382     OUT_BATCH(batch, 0);
1383
1384     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1385     OUT_BATCH(batch, 0);
1386     OUT_BATCH(batch,
1387               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1388     OUT_BATCH(batch, pp_context->idrt_offset);
1389     ADVANCE_BATCH(batch);
1390 }
1391
1392 void
1393 gen8_pp_curbe_load(VADriverContextP ctx,
1394                    struct i965_post_processing_context *pp_context)
1395 {
1396     struct intel_batchbuffer *batch = pp_context->batch;
1397     int param_size = 64;
1398
1399     param_size = sizeof(struct gen7_pp_static_parameter);
1400
1401     BEGIN_BATCH(batch, 4);
1402     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1403     OUT_BATCH(batch, 0);
1404     OUT_BATCH(batch,
1405               param_size);
1406     OUT_BATCH(batch, pp_context->curbe_offset);
1407     ADVANCE_BATCH(batch);
1408 }
1409
1410 void
1411 gen8_pp_object_walker(VADriverContextP ctx,
1412                       struct i965_post_processing_context *pp_context)
1413 {
1414     struct i965_driver_data *i965 = i965_driver_data(ctx);
1415     struct intel_batchbuffer *batch = pp_context->batch;
1416     int x, x_steps, y, y_steps;
1417     int param_size, command_length_in_dws, extra_cmd_in_dws;
1418     dri_bo *command_buffer;
1419     unsigned int *command_ptr;
1420
1421     param_size = sizeof(struct gen7_pp_inline_parameter);
1422
1423     x_steps = pp_context->pp_x_steps(pp_context->private_context);
1424     y_steps = pp_context->pp_y_steps(pp_context->private_context);
1425     command_length_in_dws = 6 + (param_size >> 2);
1426     extra_cmd_in_dws = 2;
1427     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1428                                   "command objects buffer",
1429                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1430                                   4096);
1431
1432     dri_bo_map(command_buffer, 1);
1433     command_ptr = command_buffer->virtual;
1434
1435     for (y = 0; y < y_steps; y++) {
1436         for (x = 0; x < x_steps; x++) {
1437             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1438
1439                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1440                 *command_ptr++ = 0;
1441                 *command_ptr++ = 0;
1442                 *command_ptr++ = 0;
1443                 *command_ptr++ = 0;
1444                 *command_ptr++ = 0;
1445                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1446                 command_ptr += (param_size >> 2);
1447
1448                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1449                 *command_ptr++ = 0;
1450             }
1451         }
1452     }
1453
1454     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1455         *command_ptr++ = 0;
1456
1457     *command_ptr++ = MI_BATCH_BUFFER_END;
1458     *command_ptr++ = 0;
1459
1460     dri_bo_unmap(command_buffer);
1461
1462     BEGIN_BATCH(batch, 3);
1463     OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1464     OUT_RELOC64(batch, command_buffer,
1465                 I915_GEM_DOMAIN_COMMAND, 0, 0);
1466     ADVANCE_BATCH(batch);
1467
1468     dri_bo_unreference(command_buffer);
1469
1470     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1471      * will cause control to pass back to ring buffer
1472      */
1473     intel_batchbuffer_end_atomic(batch);
1474     intel_batchbuffer_flush(batch);
1475     intel_batchbuffer_start_atomic(batch, 0x1000);
1476 }
1477
1478 static void
1479 gen8_pp_pipeline_setup(VADriverContextP ctx,
1480                        struct i965_post_processing_context *pp_context)
1481 {
1482     struct intel_batchbuffer *batch = pp_context->batch;
1483
1484     intel_batchbuffer_start_atomic(batch, 0x1000);
1485     intel_batchbuffer_emit_mi_flush(batch);
1486     gen6_pp_pipeline_select(ctx, pp_context);
1487     gen8_pp_state_base_address(ctx, pp_context);
1488     gen8_pp_vfe_state(ctx, pp_context);
1489     gen8_pp_curbe_load(ctx, pp_context);
1490     gen8_interface_descriptor_load(ctx, pp_context);
1491     gen8_pp_vfe_state(ctx, pp_context);
1492     gen8_pp_object_walker(ctx, pp_context);
1493     intel_batchbuffer_end_atomic(batch);
1494 }
1495
1496 static VAStatus
1497 gen8_post_processing(
1498     VADriverContextP   ctx,
1499     struct i965_post_processing_context *pp_context,
1500     const struct i965_surface *src_surface,
1501     const VARectangle *src_rect,
1502     struct i965_surface *dst_surface,
1503     const VARectangle *dst_rect,
1504     int                pp_index,
1505     void * filter_param
1506 )
1507 {
1508     VAStatus va_status;
1509
1510     va_status = gen8_pp_initialize(ctx, pp_context,
1511                                    src_surface,
1512                                    src_rect,
1513                                    dst_surface,
1514                                    dst_rect,
1515                                    pp_index,
1516                                    filter_param);
1517
1518     if (va_status == VA_STATUS_SUCCESS) {
1519         gen8_pp_states_setup(ctx, pp_context);
1520         gen8_pp_pipeline_setup(ctx, pp_context);
1521     }
1522
1523     return va_status;
1524 }
1525
1526 static void
1527 gen8_post_processing_context_finalize(VADriverContextP ctx,
1528                                       struct i965_post_processing_context *pp_context)
1529 {
1530     if (pp_context->scaling_gpe_context_initialized) {
1531         gen8_gpe_context_destroy(&pp_context->scaling_gpe_context);
1532         pp_context->scaling_gpe_context_initialized = 0;
1533     }
1534
1535     if (pp_context->vebox_proc_ctx) {
1536         gen75_vebox_context_destroy(ctx, pp_context->vebox_proc_ctx);
1537         pp_context->vebox_proc_ctx = NULL;
1538     }
1539
1540     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1541     pp_context->surface_state_binding_table.bo = NULL;
1542
1543     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1544     pp_context->pp_dn_context.stmm_bo = NULL;
1545
1546     if (pp_context->instruction_state.bo) {
1547         dri_bo_unreference(pp_context->instruction_state.bo);
1548         pp_context->instruction_state.bo = NULL;
1549     }
1550
1551     if (pp_context->indirect_state.bo) {
1552         dri_bo_unreference(pp_context->indirect_state.bo);
1553         pp_context->indirect_state.bo = NULL;
1554     }
1555
1556     if (pp_context->dynamic_state.bo) {
1557         dri_bo_unreference(pp_context->dynamic_state.bo);
1558         pp_context->dynamic_state.bo = NULL;
1559     }
1560
1561     free(pp_context->pp_static_parameter);
1562     free(pp_context->pp_inline_parameter);
1563     pp_context->pp_static_parameter = NULL;
1564     pp_context->pp_inline_parameter = NULL;
1565 }
1566
1567 #define VPP_CURBE_ALLOCATION_SIZE   32
1568
1569 void
1570 gen8_post_processing_context_common_init(VADriverContextP ctx,
1571                                          void *data,
1572                                          struct pp_module *pp_modules,
1573                                          int num_pp_modules,
1574                                          struct intel_batchbuffer *batch)
1575 {
1576     struct i965_driver_data *i965 = i965_driver_data(ctx);
1577     int i, kernel_size;
1578     unsigned int kernel_offset, end_offset;
1579     unsigned char *kernel_ptr;
1580     struct pp_module *pp_module;
1581     struct i965_post_processing_context *pp_context = data;
1582
1583     if (i965->intel.eu_total > 0)
1584         pp_context->vfe_gpu_state.max_num_threads = 6 * i965->intel.eu_total;
1585     else
1586         pp_context->vfe_gpu_state.max_num_threads = 60;
1587     pp_context->vfe_gpu_state.num_urb_entries = 59;
1588     pp_context->vfe_gpu_state.gpgpu_mode = 0;
1589     pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1590     pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1591
1592     pp_context->intel_post_processing = gen8_post_processing;
1593     pp_context->finalize = gen8_post_processing_context_finalize;
1594
1595     assert(ARRAY_ELEMS(pp_context->pp_modules) == num_pp_modules);
1596
1597     memcpy(pp_context->pp_modules, pp_modules, sizeof(pp_context->pp_modules));
1598
1599     kernel_size = 4096 ;
1600
1601     for (i = 0; i < NUM_PP_MODULES; i++) {
1602         pp_module = &pp_context->pp_modules[i];
1603
1604         if (pp_module->kernel.bin && pp_module->kernel.size) {
1605             kernel_size += pp_module->kernel.size;
1606         }
1607     }
1608
1609     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1610                                                     "kernel shader",
1611                                                     kernel_size,
1612                                                     0x1000);
1613     if (pp_context->instruction_state.bo == NULL) {
1614         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1615         return;
1616     }
1617
1618     assert(pp_context->instruction_state.bo);
1619
1620
1621     pp_context->instruction_state.bo_size = kernel_size;
1622     pp_context->instruction_state.end_offset = 0;
1623     end_offset = 0;
1624
1625     dri_bo_map(pp_context->instruction_state.bo, 1);
1626     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1627
1628     for (i = 0; i < NUM_PP_MODULES; i++) {
1629         pp_module = &pp_context->pp_modules[i];
1630
1631         kernel_offset = ALIGN(end_offset, 64);
1632         pp_module->kernel.kernel_offset = kernel_offset;
1633
1634         if (pp_module->kernel.bin && pp_module->kernel.size) {
1635
1636             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1637             end_offset = kernel_offset + pp_module->kernel.size;
1638         }
1639     }
1640
1641     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1642
1643     dri_bo_unmap(pp_context->instruction_state.bo);
1644
1645     /* static & inline parameters */
1646     pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1647     pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1648
1649     pp_context->batch = batch;
1650
1651     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1652     pp_context->curbe_size = 256;
1653
1654 }
1655
1656 void
1657 gen8_post_processing_context_init(VADriverContextP ctx,
1658                                   void *data,
1659                                   struct intel_batchbuffer *batch)
1660 {
1661     struct i965_driver_data *i965 = i965_driver_data(ctx);
1662     struct i965_post_processing_context *pp_context = data;
1663     struct i965_gpe_context *gpe_context;
1664     struct i965_kernel scaling_kernel;
1665
1666     gen8_post_processing_context_common_init(ctx, data, pp_modules_gen8, ARRAY_ELEMS(pp_modules_gen8), batch);
1667     avs_init_state(&pp_context->pp_avs_context.state, &gen8_avs_config);
1668
1669     /* initialize the YUV420 8-Bit scaling context. The below is supported.
1670      * NV12 ->NV12
1671      * NV12 ->I420
1672      * I420 ->I420
1673      * I420 ->NV12
1674      */
1675     gpe_context = &pp_context->scaling_gpe_context;
1676     memset(&scaling_kernel, 0, sizeof(scaling_kernel));
1677     scaling_kernel.bin = pp_yuv420p8_scaling_gen8;
1678     scaling_kernel.size = sizeof(pp_yuv420p8_scaling_gen8);
1679     gen8_gpe_load_kernels(ctx, gpe_context, &scaling_kernel, 1);
1680     gpe_context->idrt.entry_size = ALIGN(sizeof(struct gen8_interface_descriptor_data), 64);
1681     gpe_context->idrt.max_entries = 1;
1682     gpe_context->sampler.entry_size = ALIGN(sizeof(struct gen8_sampler_state), 64);
1683     gpe_context->sampler.max_entries = 1;
1684     gpe_context->curbe.length = ALIGN(sizeof(struct scaling_input_parameter), 32);
1685
1686     gpe_context->surface_state_binding_table.max_entries = MAX_SCALING_SURFACES;
1687     gpe_context->surface_state_binding_table.binding_table_offset = 0;
1688     gpe_context->surface_state_binding_table.surface_state_offset = ALIGN(MAX_SCALING_SURFACES * 4, 64);
1689     gpe_context->surface_state_binding_table.length = ALIGN(MAX_SCALING_SURFACES * 4, 64) + ALIGN(MAX_SCALING_SURFACES * SURFACE_STATE_PADDED_SIZE_GEN8, 64);
1690
1691     if (i965->intel.eu_total > 0) {
1692         gpe_context->vfe_state.max_num_threads = i965->intel.eu_total * 6;
1693     } else {
1694         if (i965->intel.has_bsd2)
1695             gpe_context->vfe_state.max_num_threads = 300;
1696         else
1697             gpe_context->vfe_state.max_num_threads = 60;
1698     }
1699
1700     gpe_context->vfe_state.curbe_allocation_size = 37;
1701     gpe_context->vfe_state.urb_entry_size = 16;
1702     if (i965->intel.has_bsd2)
1703         gpe_context->vfe_state.num_urb_entries = 127;
1704     else
1705         gpe_context->vfe_state.num_urb_entries = 64;
1706
1707     gpe_context->vfe_state.gpgpu_mode = 0;
1708
1709     gen8_gpe_context_init(ctx, gpe_context);
1710     pp_context->scaling_gpe_context_initialized |= VPPGPE_8BIT_8BIT;
1711
1712     return;
1713 }
1714
1715 static void
1716 gen8_run_kernel_media_object_walker(VADriverContextP ctx,
1717                                     struct intel_batchbuffer *batch,
1718                                     struct i965_gpe_context *gpe_context,
1719                                     struct gpe_media_object_walker_parameter *param)
1720 {
1721     if (!batch || !gpe_context || !param)
1722         return;
1723
1724     intel_batchbuffer_start_atomic(batch, 0x1000);
1725
1726     intel_batchbuffer_emit_mi_flush(batch);
1727
1728     gen8_gpe_pipeline_setup(ctx, gpe_context, batch);
1729     gen8_gpe_media_object_walker(ctx, gpe_context, batch, param);
1730     gen8_gpe_media_state_flush(ctx, gpe_context, batch);
1731
1732
1733     intel_batchbuffer_end_atomic(batch);
1734
1735     intel_batchbuffer_flush(batch);
1736     return;
1737 }
1738
1739 static void
1740 gen8_add_dri_buffer_2d_gpe_surface(VADriverContextP ctx,
1741                                    struct i965_gpe_context *gpe_context,
1742                                    dri_bo *bo,
1743                                    unsigned int bo_offset,
1744                                    unsigned int width,
1745                                    unsigned int height,
1746                                    unsigned int pitch,
1747                                    int is_media_block_rw,
1748                                    unsigned int format,
1749                                    int index,
1750                                    int is_10bit)
1751 {
1752     struct i965_gpe_resource gpe_resource;
1753     struct i965_gpe_surface gpe_surface;
1754
1755     i965_dri_object_to_2d_gpe_resource(&gpe_resource, bo, width, height, pitch);
1756     memset(&gpe_surface, 0, sizeof(gpe_surface));
1757     gpe_surface.gpe_resource = &gpe_resource;
1758     gpe_surface.is_2d_surface = 1;
1759     gpe_surface.is_media_block_rw = !!is_media_block_rw;
1760     gpe_surface.cacheability_control = DEFAULT_MOCS;
1761     gpe_surface.format = format;
1762     gpe_surface.is_override_offset = 1;
1763     gpe_surface.offset = bo_offset;
1764     gpe_surface.is_16bpp = is_10bit;
1765
1766     gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
1767
1768     i965_free_gpe_resource(&gpe_resource);
1769 }
1770
1771 static void
1772 gen8_vpp_scaling_sample_state(VADriverContextP ctx,
1773                               struct i965_gpe_context *gpe_context,
1774                               VARectangle *src_rect,
1775                               VARectangle *dst_rect)
1776 {
1777     struct gen8_sampler_state *sampler_state;
1778
1779     if (gpe_context == NULL || !src_rect || !dst_rect)
1780         return;
1781     dri_bo_map(gpe_context->sampler.bo, 1);
1782
1783     if (gpe_context->sampler.bo->virtual == NULL)
1784         return;
1785
1786     assert(gpe_context->sampler.bo->virtual);
1787
1788     sampler_state = (struct gen8_sampler_state *)
1789                     (gpe_context->sampler.bo->virtual + gpe_context->sampler.offset);
1790
1791     memset(sampler_state, 0, sizeof(*sampler_state));
1792
1793     if ((src_rect->width == dst_rect->width) &&
1794         (src_rect->height == dst_rect->height)) {
1795         sampler_state->ss0.min_filter = I965_MAPFILTER_NEAREST;
1796         sampler_state->ss0.mag_filter = I965_MAPFILTER_NEAREST;
1797     } else {
1798         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
1799         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
1800     }
1801
1802     sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1803     sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1804     sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1805
1806     dri_bo_unmap(gpe_context->sampler.bo);
1807 }
1808
1809 static void
1810 gen8_gpe_context_yuv420p8_scaling_curbe(VADriverContextP ctx,
1811                                         struct i965_gpe_context *gpe_context,
1812                                         VARectangle *src_rect,
1813                                         struct i965_surface *src_surface,
1814                                         VARectangle *dst_rect,
1815                                         struct i965_surface *dst_surface)
1816 {
1817     struct scaling_input_parameter *scaling_curbe;
1818     float src_width, src_height;
1819     float coeff;
1820     unsigned int fourcc;
1821
1822     if ((gpe_context == NULL) ||
1823         (src_rect == NULL) || (src_surface == NULL) ||
1824         (dst_rect == NULL) || (dst_surface == NULL))
1825         return;
1826
1827     scaling_curbe = i965_gpe_context_map_curbe(gpe_context);
1828
1829     if (!scaling_curbe)
1830         return;
1831
1832     memset(scaling_curbe, 0, sizeof(struct scaling_input_parameter));
1833
1834     scaling_curbe->bti_input = BTI_SCALING_INPUT_Y;
1835     scaling_curbe->bti_output = BTI_SCALING_OUTPUT_Y;
1836
1837     /* As the src_rect/dst_rect is already checked, it is skipped.*/
1838     scaling_curbe->x_dst     = dst_rect->x;
1839     scaling_curbe->y_dst     = dst_rect->y;
1840
1841     src_width = src_rect->x + src_rect->width;
1842     src_height = src_rect->y + src_rect->height;
1843
1844     scaling_curbe->inv_width = 1 / src_width;
1845     scaling_curbe->inv_height = 1 / src_height;
1846
1847     coeff = (float)(src_rect->width) / dst_rect->width;
1848     scaling_curbe->x_factor = coeff / src_width;
1849     scaling_curbe->x_orig = (float)(src_rect->x) / src_width;
1850
1851     coeff = (float)(src_rect->height) / dst_rect->height;
1852     scaling_curbe->y_factor = coeff / src_height;
1853     scaling_curbe->y_orig = (float)(src_rect->y) / src_height;
1854
1855     fourcc = pp_get_surface_fourcc(ctx, src_surface);
1856     if (fourcc == VA_FOURCC_NV12) {
1857         scaling_curbe->dw7.src_packed = 1;
1858     }
1859
1860     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
1861
1862     if (fourcc == VA_FOURCC_NV12) {
1863         scaling_curbe->dw7.dst_packed = 1;
1864     }
1865
1866     i965_gpe_context_unmap_curbe(gpe_context);
1867 }
1868
1869 static bool
1870 gen8_pp_context_get_surface_conf(VADriverContextP ctx,
1871                                  struct i965_surface *surface,
1872                                  VARectangle *rect,
1873                                  int *width,
1874                                  int *height,
1875                                  int *pitch,
1876                                  int *bo_offset)
1877 {
1878     unsigned int fourcc;
1879     if (!rect || !surface || !width || !height || !pitch || !bo_offset)
1880         return false;
1881
1882     if (surface->base == NULL)
1883         return false;
1884
1885     fourcc = pp_get_surface_fourcc(ctx, surface);
1886     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
1887         struct object_surface *obj_surface;
1888
1889         obj_surface = (struct object_surface *)surface->base;
1890         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
1891         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
1892         pitch[0] = obj_surface->width;
1893         bo_offset[0] = 0;
1894
1895         if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1896             width[1] = width[0] / 2;
1897             height[1] = height[0] / 2;
1898             pitch[1] = obj_surface->cb_cr_pitch;
1899             bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1900         } else {
1901             /* I010/I420 format */
1902             width[1] = width[0] / 2;
1903             height[1] = height[0] / 2;
1904             pitch[1] = obj_surface->cb_cr_pitch;
1905             bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1906             width[2] = width[0] / 2;
1907             height[2] = height[0] / 2;
1908             pitch[2] = obj_surface->cb_cr_pitch;
1909             bo_offset[2] = obj_surface->width * obj_surface->y_cr_offset;
1910         }
1911
1912     } else {
1913         struct object_image *obj_image;
1914
1915         obj_image = (struct object_image *)surface->base;
1916
1917         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
1918         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
1919         pitch[0] = obj_image->image.pitches[0];
1920         bo_offset[0] = obj_image->image.offsets[0];
1921
1922         if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1923             width[1] = width[0] / 2;
1924             height[1] = height[0] / 2;
1925             pitch[1] = obj_image->image.pitches[1];
1926             bo_offset[1] = obj_image->image.offsets[1];
1927         } else {
1928             /* I010/I420 format */
1929             /* YV12 is TBD */
1930             width[1] = width[0] / 2;
1931             height[1] = height[0] / 2;
1932             pitch[1] = obj_image->image.pitches[1];
1933             bo_offset[1] = obj_image->image.offsets[1];
1934             width[2] = width[0] / 2;
1935             height[2] = height[0] / 2;
1936             pitch[2] = obj_image->image.pitches[2];
1937             bo_offset[2] = obj_image->image.offsets[2];
1938         }
1939
1940     }
1941     return true;
1942 }
1943
1944 static void
1945 gen8_gpe_context_yuv420p8_scaling_surfaces(VADriverContextP ctx,
1946                                            struct i965_gpe_context *gpe_context,
1947                                            VARectangle *src_rect,
1948                                            struct i965_surface *src_surface,
1949                                            VARectangle *dst_rect,
1950                                            struct i965_surface *dst_surface)
1951 {
1952     unsigned int fourcc;
1953     int width[3], height[3], pitch[3], bo_offset[3];
1954     dri_bo *bo;
1955     struct object_surface *obj_surface;
1956     struct object_image *obj_image;
1957     int bti;
1958
1959     if ((gpe_context == NULL) ||
1960         (src_rect == NULL) || (src_surface == NULL) ||
1961         (dst_rect == NULL) || (dst_surface == NULL))
1962         return;
1963
1964     if (src_surface->base == NULL || dst_surface->base == NULL)
1965         return;
1966
1967     fourcc = pp_get_surface_fourcc(ctx, src_surface);
1968
1969     if (src_surface->type == I965_SURFACE_TYPE_SURFACE) {
1970         obj_surface = (struct object_surface *)src_surface->base;
1971         bo = obj_surface->bo;
1972     } else {
1973         obj_image = (struct object_image *)src_surface->base;
1974         bo = obj_image->bo;
1975     }
1976
1977     bti = 0;
1978     if (gen8_pp_context_get_surface_conf(ctx, src_surface, src_rect,
1979                                          width, height, pitch,
1980                                          bo_offset)) {
1981         bti = BTI_SCALING_INPUT_Y;
1982         /* Input surface */
1983         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
1984                                            bo_offset[0],
1985                                            width[0], height[0],
1986                                            pitch[0], 0,
1987                                            I965_SURFACEFORMAT_R8_UNORM,
1988                                            bti, 0);
1989         if (fourcc == VA_FOURCC_NV12) {
1990             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
1991                                                bo_offset[1],
1992                                                width[1], height[1],
1993                                                pitch[1], 0,
1994                                                I965_SURFACEFORMAT_R8G8_UNORM,
1995                                                bti + 1, 0);
1996         } else {
1997             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
1998                                                bo_offset[1],
1999                                                width[1], height[1],
2000                                                pitch[1], 0,
2001                                                I965_SURFACEFORMAT_R8_UNORM,
2002                                                bti + 1, 0);
2003
2004             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2005                                                bo_offset[2],
2006                                                width[2], height[2],
2007                                                pitch[2], 0,
2008                                                I965_SURFACEFORMAT_R8_UNORM,
2009                                                bti + 2, 0);
2010         }
2011     }
2012
2013     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2014
2015     if (dst_surface->type == I965_SURFACE_TYPE_SURFACE) {
2016         obj_surface = (struct object_surface *)dst_surface->base;
2017         bo = obj_surface->bo;
2018     } else {
2019         obj_image = (struct object_image *)dst_surface->base;
2020         bo = obj_image->bo;
2021     }
2022
2023     if (gen8_pp_context_get_surface_conf(ctx, dst_surface, dst_rect,
2024                                          width, height, pitch,
2025                                          bo_offset)) {
2026         bti = BTI_SCALING_OUTPUT_Y;
2027         /* Input surface */
2028         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2029                                            bo_offset[0],
2030                                            width[0], height[0],
2031                                            pitch[0], 1,
2032                                            I965_SURFACEFORMAT_R8_UINT,
2033                                            bti, 0);
2034         if (fourcc == VA_FOURCC_NV12) {
2035             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2036                                                bo_offset[1],
2037                                                width[1] * 2, height[1],
2038                                                pitch[1], 1,
2039                                                I965_SURFACEFORMAT_R16_UINT,
2040                                                bti + 1, 0);
2041         } else {
2042             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2043                                                bo_offset[1],
2044                                                width[1], height[1],
2045                                                pitch[1], 1,
2046                                                I965_SURFACEFORMAT_R8_UINT,
2047                                                bti + 1, 0);
2048
2049             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2050                                                bo_offset[2],
2051                                                width[2], height[2],
2052                                                pitch[2], 1,
2053                                                I965_SURFACEFORMAT_R8_UINT,
2054                                                bti + 2, 0);
2055         }
2056     }
2057
2058     return;
2059 }
2060
2061 VAStatus
2062 gen8_yuv420p8_scaling_post_processing(
2063     VADriverContextP   ctx,
2064     struct i965_post_processing_context *pp_context,
2065     struct i965_surface *src_surface,
2066     VARectangle *src_rect,
2067     struct i965_surface *dst_surface,
2068     VARectangle *dst_rect)
2069 {
2070     struct i965_gpe_context *gpe_context;
2071     struct gpe_media_object_walker_parameter media_object_walker_param;
2072     struct intel_vpp_kernel_walker_parameter kernel_walker_param;
2073
2074     if (!pp_context || !src_surface || !src_rect || !dst_surface || !dst_rect)
2075         return VA_STATUS_ERROR_INVALID_PARAMETER;
2076
2077     if (!(pp_context->scaling_gpe_context_initialized & VPPGPE_8BIT_8BIT))
2078         return VA_STATUS_ERROR_UNIMPLEMENTED;
2079
2080     gpe_context = &pp_context->scaling_gpe_context;
2081
2082     gen8_gpe_context_init(ctx, gpe_context);
2083     gen8_vpp_scaling_sample_state(ctx, gpe_context, src_rect, dst_rect);
2084     gen8_gpe_reset_binding_table(ctx, gpe_context);
2085     gen8_gpe_context_yuv420p8_scaling_curbe(ctx, gpe_context,
2086                                             src_rect, src_surface,
2087                                             dst_rect, dst_surface);
2088
2089     gen8_gpe_context_yuv420p8_scaling_surfaces(ctx, gpe_context,
2090                                                src_rect, src_surface,
2091                                                dst_rect, dst_surface);
2092
2093     gen8_gpe_setup_interface_data(ctx, gpe_context);
2094
2095     memset(&kernel_walker_param, 0, sizeof(kernel_walker_param));
2096     kernel_walker_param.resolution_x = ALIGN(dst_rect->width, 16) >> 4;
2097     kernel_walker_param.resolution_y = ALIGN(dst_rect->height, 16) >> 4;
2098     kernel_walker_param.no_dependency = 1;
2099
2100     intel_vpp_init_media_object_walker_parameter(&kernel_walker_param, &media_object_walker_param);
2101     media_object_walker_param.interface_offset = 0;
2102     gen8_run_kernel_media_object_walker(ctx, pp_context->batch,
2103                                         gpe_context,
2104                                         &media_object_walker_param);
2105
2106     return VA_STATUS_SUCCESS;
2107 }