OSDN Git Service

35e46f95a027de39c50b05f8173d10bb6ec0e4e7
[android-x86/hardware-intel-common-vaapi.git] / src / gen8_post_processing.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Xiang Haihao <haihao.xiang@intel.com>
26  *    Zhao Yakui <yakui.zhao@intel.com>
27  *
28  */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <assert.h>
34
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "i965_yuv_coefs.h"
43 #include "intel_media.h"
44
45 #include "gen75_picture_process.h"
46 #include "intel_common_vpp_internal.h"
47
48 #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
49
50 #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
51 #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
52
53 #define GPU_ASM_BLOCK_WIDTH         16
54 #define GPU_ASM_BLOCK_HEIGHT        8
55 #define GPU_ASM_X_OFFSET_ALIGNMENT  4
56
57 #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
58
59 VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
60                             const struct i965_surface *src_surface,
61                             const VARectangle *src_rect,
62                             struct i965_surface *dst_surface,
63                             const VARectangle *dst_rect,
64                             void *filter_param);
65
66 VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
67                                     const struct i965_surface *src_surface,
68                                     const VARectangle *src_rect,
69                                     struct i965_surface *dst_surface,
70                                     const VARectangle *dst_rect,
71                                     void *filter_param);
72
73 /* TODO: Modify the shader and then compile it again.
74  * Currently it is derived from Haswell*/
75 static const uint32_t pp_null_gen8[][4] = {
76 };
77
78 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
79 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
80 };
81
82 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
83 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
84 };
85
86 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
87 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
88 };
89
90 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
91 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
92 };
93
94 static const uint32_t pp_nv12_scaling_gen8[][4] = {
95 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
96 };
97
98 static const uint32_t pp_nv12_avs_gen8[][4] = {
99 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
100 };
101
102 static const uint32_t pp_nv12_dndi_gen8[][4] = {
103 // #include "shaders/post_processing/gen7/dndi.g75b"
104 };
105
106 static const uint32_t pp_nv12_dn_gen8[][4] = {
107 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
108 };
109 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
110 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
111 };
112 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
113 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
114 };
115 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
116 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
117 };
118 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
119 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
120 };
121 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
122 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
123 };
124 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
125 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
126 };
127 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
128 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
129 };
130
131 static struct pp_module pp_modules_gen8[] = {
132     {
133         {
134             "NULL module (for testing)",
135             PP_NULL,
136             pp_null_gen8,
137             sizeof(pp_null_gen8),
138             NULL,
139         },
140
141         pp_null_initialize,
142     },
143
144     {
145         {
146             "NV12_NV12",
147             PP_NV12_LOAD_SAVE_N12,
148             pp_nv12_load_save_nv12_gen8,
149             sizeof(pp_nv12_load_save_nv12_gen8),
150             NULL,
151         },
152
153         gen8_pp_plx_avs_initialize,
154     },
155
156     {
157         {
158             "NV12_PL3",
159             PP_NV12_LOAD_SAVE_PL3,
160             pp_nv12_load_save_pl3_gen8,
161             sizeof(pp_nv12_load_save_pl3_gen8),
162             NULL,
163         },
164         gen8_pp_plx_avs_initialize,
165     },
166
167     {
168         {
169             "PL3_NV12",
170             PP_PL3_LOAD_SAVE_N12,
171             pp_pl3_load_save_nv12_gen8,
172             sizeof(pp_pl3_load_save_nv12_gen8),
173             NULL,
174         },
175
176         gen8_pp_plx_avs_initialize,
177     },
178
179     {
180         {
181             "PL3_PL3",
182             PP_PL3_LOAD_SAVE_PL3,
183             pp_pl3_load_save_pl3_gen8,
184             sizeof(pp_pl3_load_save_pl3_gen8),
185             NULL,
186         },
187
188         gen8_pp_plx_avs_initialize,
189     },
190
191     {
192         {
193             "NV12 Scaling module",
194             PP_NV12_SCALING,
195             pp_nv12_scaling_gen8,
196             sizeof(pp_nv12_scaling_gen8),
197             NULL,
198         },
199
200         gen8_pp_plx_avs_initialize,
201     },
202
203     {
204         {
205             "NV12 AVS module",
206             PP_NV12_AVS,
207             pp_nv12_avs_gen8,
208             sizeof(pp_nv12_avs_gen8),
209             NULL,
210         },
211
212         gen8_pp_plx_avs_initialize,
213     },
214
215     {
216         {
217             "NV12 DNDI module",
218             PP_NV12_DNDI,
219             pp_nv12_dndi_gen8,
220             sizeof(pp_nv12_dndi_gen8),
221             NULL,
222         },
223
224         pp_null_initialize,
225     },
226
227     {
228         {
229             "NV12 DN module",
230             PP_NV12_DN,
231             pp_nv12_dn_gen8,
232             sizeof(pp_nv12_dn_gen8),
233             NULL,
234         },
235
236         pp_null_initialize,
237     },
238     {
239         {
240             "NV12_PA module",
241             PP_NV12_LOAD_SAVE_PA,
242             pp_nv12_load_save_pa_gen8,
243             sizeof(pp_nv12_load_save_pa_gen8),
244             NULL,
245         },
246
247         gen8_pp_plx_avs_initialize,
248     },
249
250     {
251         {
252             "PL3_PA module",
253             PP_PL3_LOAD_SAVE_PA,
254             pp_pl3_load_save_pa_gen8,
255             sizeof(pp_pl3_load_save_pa_gen8),
256             NULL,
257         },
258
259         gen8_pp_plx_avs_initialize,
260     },
261
262     {
263         {
264             "PA_NV12 module",
265             PP_PA_LOAD_SAVE_NV12,
266             pp_pa_load_save_nv12_gen8,
267             sizeof(pp_pa_load_save_nv12_gen8),
268             NULL,
269         },
270
271         gen8_pp_plx_avs_initialize,
272     },
273
274     {
275         {
276             "PA_PL3 module",
277             PP_PA_LOAD_SAVE_PL3,
278             pp_pa_load_save_pl3_gen8,
279             sizeof(pp_pa_load_save_pl3_gen8),
280             NULL,
281         },
282
283         gen8_pp_plx_avs_initialize,
284     },
285
286     {
287         {
288             "PA_PA module",
289             PP_PA_LOAD_SAVE_PA,
290             pp_pa_load_save_pa_gen8,
291             sizeof(pp_pa_load_save_pa_gen8),
292             NULL,
293         },
294
295         gen8_pp_plx_avs_initialize,
296     },
297
298     {
299         {
300             "RGBX_NV12 module",
301             PP_RGBX_LOAD_SAVE_NV12,
302             pp_rgbx_load_save_nv12_gen8,
303             sizeof(pp_rgbx_load_save_nv12_gen8),
304             NULL,
305         },
306
307         gen8_pp_plx_avs_initialize,
308     },
309
310     {
311         {
312             "NV12_RGBX module",
313             PP_NV12_LOAD_SAVE_RGBX,
314             pp_nv12_load_save_rgbx_gen8,
315             sizeof(pp_nv12_load_save_rgbx_gen8),
316             NULL,
317         },
318
319         gen8_pp_plx_avs_initialize,
320     },
321 };
322
323 #define MAX_SCALING_SURFACES    16
324
325 #define DEFAULT_MOCS    0
326
327 static const uint32_t pp_yuv420p8_scaling_gen8[][4] = {
328 #include "shaders/post_processing/gen8/conv_nv12.g8b"
329 };
330
331 static int
332 pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
333 {
334     int fourcc;
335
336     if (surface->type == I965_SURFACE_TYPE_IMAGE) {
337         struct object_image *obj_image = (struct object_image *)surface->base;
338         fourcc = obj_image->image.format.fourcc;
339     } else {
340         struct object_surface *obj_surface = (struct object_surface *)surface->base;
341         fourcc = obj_surface->fourcc;
342     }
343
344     return fourcc;
345 }
346
347 static void
348 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
349 {
350     switch (tiling) {
351     case I915_TILING_NONE:
352         ss->ss0.tiled_surface = 0;
353         ss->ss0.tile_walk = 0;
354         break;
355     case I915_TILING_X:
356         ss->ss0.tiled_surface = 1;
357         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
358         break;
359     case I915_TILING_Y:
360         ss->ss0.tiled_surface = 1;
361         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
362         break;
363     }
364 }
365
366 static void
367 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
368 {
369     switch (tiling) {
370     case I915_TILING_NONE:
371         ss->ss2.tiled_surface = 0;
372         ss->ss2.tile_walk = 0;
373         break;
374     case I915_TILING_X:
375         ss->ss2.tiled_surface = 1;
376         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
377         break;
378     case I915_TILING_Y:
379         ss->ss2.tiled_surface = 1;
380         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
381         break;
382     }
383 }
384
385
386 static void
387 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
388                           dri_bo *surf_bo, unsigned long surf_bo_offset,
389                           int width, int height, int pitch, int format,
390                           int index, int is_target)
391 {
392     struct i965_driver_data *i965 = i965_driver_data(ctx);
393     struct gen8_surface_state *ss;
394     dri_bo *ss_bo;
395     unsigned int tiling;
396     unsigned int swizzle;
397
398     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
399     ss_bo = pp_context->surface_state_binding_table.bo;
400     assert(ss_bo);
401
402     dri_bo_map(ss_bo, True);
403     assert(ss_bo->virtual);
404     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
405     memset(ss, 0, sizeof(*ss));
406
407     if (IS_GEN9(i965->intel.device_info))
408         ss->ss1.surface_mocs = GEN9_CACHE_PTE;
409
410     ss->ss0.surface_type = I965_SURFACE_2D;
411     ss->ss0.surface_format = format;
412     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
413     ss->ss2.width = width - 1;
414     ss->ss2.height = height - 1;
415     ss->ss3.pitch = pitch - 1;
416
417     /* Always set 1(align 4 mode) per B-spec */
418     ss->ss0.vertical_alignment = 1;
419     ss->ss0.horizontal_alignment = 1;
420
421     gen8_pp_set_surface_tiling(ss, tiling);
422     gen8_render_set_surface_scs(ss);
423     dri_bo_emit_reloc(ss_bo,
424                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
425                       surf_bo_offset,
426                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
427                       surf_bo);
428     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
429     dri_bo_unmap(ss_bo);
430 }
431
432
433 static void
434 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
435                            dri_bo *surf_bo, unsigned long surf_bo_offset,
436                            int width, int height, int wpitch,
437                            int xoffset, int yoffset,
438                            int format, int interleave_chroma,
439                            int index)
440 {
441     struct i965_driver_data *i965 = i965_driver_data(ctx);
442     struct gen8_surface_state2 *ss2;
443     dri_bo *ss2_bo;
444     unsigned int tiling;
445     unsigned int swizzle;
446
447     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
448     ss2_bo = pp_context->surface_state_binding_table.bo;
449     assert(ss2_bo);
450
451     dri_bo_map(ss2_bo, True);
452     assert(ss2_bo->virtual);
453     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
454     memset(ss2, 0, sizeof(*ss2));
455
456     if (IS_GEN9(i965->intel.device_info))
457         ss2->ss5.surface_object_mocs = GEN9_CACHE_PTE;
458
459     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
460     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
461     ss2->ss1.width = width - 1;
462     ss2->ss1.height = height - 1;
463     ss2->ss2.pitch = wpitch - 1;
464     ss2->ss2.interleave_chroma = interleave_chroma;
465     ss2->ss2.surface_format = format;
466     ss2->ss3.x_offset_for_cb = xoffset;
467     ss2->ss3.y_offset_for_cb = yoffset;
468     gen8_pp_set_surface2_tiling(ss2, tiling);
469     dri_bo_emit_reloc(ss2_bo,
470                       I915_GEM_DOMAIN_RENDER, 0,
471                       surf_bo_offset,
472                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
473                       surf_bo);
474     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
475     dri_bo_unmap(ss2_bo);
476 }
477
478 static void
479 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
480                                      const struct i965_surface *surface,
481                                      int base_index, int is_target,
482                                      const VARectangle *rect,
483                                      int *width, int *height, int *pitch, int *offset)
484 {
485     struct object_surface *obj_surface;
486     struct object_image *obj_image;
487     dri_bo *bo;
488     int fourcc = pp_get_surface_fourcc(ctx, surface);
489     const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
490
491     if (fourcc_info == NULL)
492         return;
493
494     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
495         obj_surface = (struct object_surface *)surface->base;
496         bo = obj_surface->bo;
497         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
498         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
499         pitch[0] = obj_surface->width;
500         offset[0] = 0;
501
502         if (fourcc_info->num_planes == 1 && is_target)
503             width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
504
505         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
506         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
507         pitch[1] = obj_surface->cb_cr_pitch;
508         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
509
510         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
511         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
512         pitch[2] = obj_surface->cb_cr_pitch;
513         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
514     } else {
515         int U = 0, V = 0;
516
517         /* FIXME: add support for ARGB/ABGR image */
518         obj_image = (struct object_image *)surface->base;
519         bo = obj_image->bo;
520         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
521         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
522         pitch[0] = obj_image->image.pitches[0];
523         offset[0] = obj_image->image.offsets[0];
524
525         if (fourcc_info->num_planes == 1) {
526             if (is_target)
527                 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
528         } else if (fourcc_info->num_planes == 2) {
529             U = 1, V = 1;
530         } else {
531             assert(fourcc_info->num_components == 3);
532
533             U = fourcc_info->components[1].plane;
534             V = fourcc_info->components[2].plane;
535             assert((U == 1 && V == 2) ||
536                    (U == 2 && V == 1));
537         }
538
539         /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
540         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
541         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
542         pitch[1] = obj_image->image.pitches[U];
543         offset[1] = obj_image->image.offsets[U];
544
545         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
546         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
547         pitch[2] = obj_image->image.pitches[V];
548         offset[2] = obj_image->image.offsets[V];
549     }
550
551     if (is_target) {
552         gen8_pp_set_surface_state(ctx, pp_context,
553                                   bo, 0,
554                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
555                                   I965_SURFACEFORMAT_R8_UINT,
556                                   base_index, 1);
557
558         if (fourcc_info->num_planes == 2) {
559             gen8_pp_set_surface_state(ctx, pp_context,
560                                       bo, offset[1],
561                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
562                                       I965_SURFACEFORMAT_R8G8_SINT,
563                                       base_index + 1, 1);
564         } else if (fourcc_info->num_planes == 3) {
565             gen8_pp_set_surface_state(ctx, pp_context,
566                                       bo, offset[1],
567                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
568                                       I965_SURFACEFORMAT_R8_SINT,
569                                       base_index + 1, 1);
570             gen8_pp_set_surface_state(ctx, pp_context,
571                                       bo, offset[2],
572                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
573                                       I965_SURFACEFORMAT_R8_SINT,
574                                       base_index + 2, 1);
575         }
576
577         if (fourcc_info->format == I965_COLOR_RGB) {
578             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
579             /* the format is MSB: X-B-G-R */
580             pp_static_parameter->grf2.save_avs_rgb_swap = 0;
581             if ((fourcc == VA_FOURCC_BGRA) ||
582                 (fourcc == VA_FOURCC_BGRX)) {
583                 /* It is stored as MSB: X-R-G-B */
584                 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
585             }
586         }
587     } else {
588         int format0 = SURFACE_FORMAT_Y8_UNORM;
589
590         switch (fourcc) {
591         case VA_FOURCC_YUY2:
592             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
593             break;
594
595         case VA_FOURCC_UYVY:
596             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
597             break;
598
599         default:
600             break;
601         }
602
603         if (fourcc_info->format == I965_COLOR_RGB) {
604             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
605             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
606             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
607             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
608             if ((fourcc == VA_FOURCC_BGRA) ||
609                 (fourcc == VA_FOURCC_BGRX)) {
610                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
611             }
612         }
613
614         gen8_pp_set_surface2_state(ctx, pp_context,
615                                    bo, offset[0],
616                                    width[0], height[0], pitch[0],
617                                    0, 0,
618                                    format0, 0,
619                                    base_index);
620
621         if (fourcc_info->num_planes == 2) {
622             gen8_pp_set_surface2_state(ctx, pp_context,
623                                        bo, offset[1],
624                                        width[1], height[1], pitch[1],
625                                        0, 0,
626                                        SURFACE_FORMAT_R8B8_UNORM, 0,
627                                        base_index + 1);
628         } else if (fourcc_info->num_planes == 3) {
629             gen8_pp_set_surface2_state(ctx, pp_context,
630                                        bo, offset[1],
631                                        width[1], height[1], pitch[1],
632                                        0, 0,
633                                        SURFACE_FORMAT_R8_UNORM, 0,
634                                        base_index + 1);
635             gen8_pp_set_surface2_state(ctx, pp_context,
636                                        bo, offset[2],
637                                        width[2], height[2], pitch[2],
638                                        0, 0,
639                                        SURFACE_FORMAT_R8_UNORM, 0,
640                                        base_index + 2);
641         }
642
643         gen8_pp_set_surface_state(ctx, pp_context,
644                                   bo, 0,
645                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
646                                   I965_SURFACEFORMAT_R8_UINT,
647                                   base_index + 3, 1);
648
649         if (fourcc_info->num_planes == 2) {
650             gen8_pp_set_surface_state(ctx, pp_context,
651                                       bo, offset[1],
652                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
653                                       I965_SURFACEFORMAT_R8G8_SINT,
654                                       base_index + 4, 1);
655         } else if (fourcc_info->num_planes == 3) {
656             gen8_pp_set_surface_state(ctx, pp_context,
657                                       bo, offset[1],
658                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
659                                       I965_SURFACEFORMAT_R8_SINT,
660                                       base_index + 4, 1);
661             gen8_pp_set_surface_state(ctx, pp_context,
662                                       bo, offset[2],
663                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
664                                       I965_SURFACEFORMAT_R8_SINT,
665                                       base_index + 5, 1);
666         }
667     }
668 }
669
670 static int
671 pp_null_x_steps(void *private_context)
672 {
673     return 1;
674 }
675
676 static int
677 pp_null_y_steps(void *private_context)
678 {
679     return 1;
680 }
681
682 static int
683 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
684 {
685     return 0;
686 }
687
688 VAStatus
689 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
690                    const struct i965_surface *src_surface,
691                    const VARectangle *src_rect,
692                    struct i965_surface *dst_surface,
693                    const VARectangle *dst_rect,
694                    void *filter_param)
695 {
696     /* private function & data */
697     pp_context->pp_x_steps = pp_null_x_steps;
698     pp_context->pp_y_steps = pp_null_y_steps;
699     pp_context->private_context = NULL;
700     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
701
702     dst_surface->flags = src_surface->flags;
703
704     return VA_STATUS_SUCCESS;
705 }
706
707 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
708 {
709     int i, dst_width_adjust;
710     /* x offset of dest surface must be dword aligned.
711      * so we have to extend dst surface on left edge, and mask out pixels not interested
712      */
713     if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
714         pp_context->block_horizontal_mask_left = 0;
715         for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
716         {
717             pp_context->block_horizontal_mask_left |= 1<<i;
718         }
719     }
720     else {
721         pp_context->block_horizontal_mask_left = 0xffff;
722     }
723
724     dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
725     if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
726         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
727     }
728     else {
729         pp_context->block_horizontal_mask_right = 0xffff;
730     }
731
732     if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
733         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
734     }
735     else {
736         pp_context->block_vertical_mask_bottom = 0xff;
737     }
738
739 }
740
741 static int
742 gen7_pp_avs_x_steps(void *private_context)
743 {
744     struct pp_avs_context *pp_avs_context = private_context;
745
746     return pp_avs_context->dest_w / 16;
747 }
748
749 static int
750 gen7_pp_avs_y_steps(void *private_context)
751 {
752     struct pp_avs_context *pp_avs_context = private_context;
753
754     return pp_avs_context->dest_h / 16;
755 }
756
757 static int
758 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
759 {
760     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
761     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
762
763     pp_inline_parameter->grf9.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
764     pp_inline_parameter->grf9.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
765     pp_inline_parameter->grf9.constant_0 = 0xffffffff;
766     pp_inline_parameter->grf9.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
767
768     return 0;
769 }
770
771 static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
772                                               struct i965_post_processing_context *pp_context,
773                                               const struct i965_surface *surface)
774 {
775     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
776     int fourcc = pp_get_surface_fourcc(ctx, surface);
777
778     if (fourcc == VA_FOURCC_YUY2) {
779         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
780         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
781         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
782     } else if (fourcc == VA_FOURCC_UYVY) {
783         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
784         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
785         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
786     }
787 }
788
789 static const AVSConfig gen8_avs_config = {
790     .coeff_frac_bits = 6,
791     .coeff_epsilon = 1.0f / (1U << 6),
792     .num_phases = 16,
793     .num_luma_coeffs = 8,
794     .num_chroma_coeffs = 4,
795
796     .coeff_range = {
797         .lower_bound = {
798             .y_k_h = { -2, -2, -2, -2, -2, -2, -2, -2 },
799             .y_k_v = { -2, -2, -2, -2, -2, -2, -2, -2 },
800             .uv_k_h = { -1, -2, -2, -1 },
801             .uv_k_v = { -1, -2, -2, -1 },
802         },
803         .upper_bound = {
804             .y_k_h = { 2, 2, 2, 2, 2, 2, 2, 2 },
805             .y_k_v = { 2, 2, 2, 2, 2, 2, 2, 2 },
806             .uv_k_h = { 1, 2, 2, 1 },
807             .uv_k_v = { 1, 2, 2, 1 },
808         },
809     },
810 };
811
812 static int
813 gen8_pp_get_8tap_filter_mode(VADriverContextP ctx,
814                              const struct i965_surface *surface)
815 {
816     int fourcc = pp_get_surface_fourcc(ctx, surface);
817
818     if (fourcc == VA_FOURCC_YUY2 ||
819         fourcc == VA_FOURCC_UYVY)
820         return 1;
821     else
822         return 3;
823 }
824
825 static int
826 gen8_pp_kernel_use_media_read_msg(VADriverContextP ctx,
827                                   const struct i965_surface *src_surface,
828                                   const VARectangle *src_rect,
829                                   const struct i965_surface *dst_surface,
830                                   const VARectangle *dst_rect)
831 {
832     int src_fourcc = pp_get_surface_fourcc(ctx, src_surface);
833     int dst_fourcc = pp_get_surface_fourcc(ctx, dst_surface);
834     const i965_fourcc_info *src_fourcc_info = get_fourcc_info(src_fourcc);
835     const i965_fourcc_info *dst_fourcc_info = get_fourcc_info(dst_fourcc);
836
837     if (!src_fourcc_info ||
838         src_fourcc_info->subsampling != SUBSAMPLE_YUV420 ||
839         !dst_fourcc_info ||
840         dst_fourcc_info->subsampling != SUBSAMPLE_YUV420)
841         return 0;
842
843     if (src_rect->x == dst_rect->x &&
844         src_rect->y == dst_rect->y &&
845         src_rect->width == dst_rect->width &&
846         src_rect->height == dst_rect->height)
847         return 1;
848
849     return 0;
850 }
851
852 VAStatus
853 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
854                            const struct i965_surface *src_surface,
855                            const VARectangle *src_rect,
856                            struct i965_surface *dst_surface,
857                            const VARectangle *dst_rect,
858                            void *filter_param)
859 {
860 /* TODO: Add the sampler_8x8 state */
861     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
862     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
863     struct gen8_sampler_8x8_avs *sampler_8x8;
864     int i;
865     int width[3], height[3], pitch[3], offset[3];
866     int src_width, src_height;
867     unsigned char *cc_ptr;
868     AVSState * const avs = &pp_avs_context->state;
869     float sx, sy;
870     const float * yuv_to_rgb_coefs;
871     size_t yuv_to_rgb_coefs_size;
872
873     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
874
875     /* source surface */
876     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
877                                          src_rect,
878                                          width, height, pitch, offset);
879     src_height = height[0];
880     src_width  = width[0];
881
882     /* destination surface */
883     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
884                                          dst_rect,
885                                          width, height, pitch, offset);
886
887     /* sampler 8x8 state */
888     dri_bo_map(pp_context->dynamic_state.bo, True);
889     assert(pp_context->dynamic_state.bo->virtual);
890
891     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
892                         pp_context->sampler_offset;
893     /* Currently only one gen8 sampler_8x8 is initialized */
894     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
895     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
896
897     sampler_8x8->dw0.gain_factor = 44;
898     sampler_8x8->dw0.weak_edge_threshold = 1;
899     sampler_8x8->dw0.strong_edge_threshold = 8;
900     /* Use the value like that on Ivy instead of default
901      * sampler_8x8->dw0.r3x_coefficient = 5;
902      */
903     sampler_8x8->dw0.r3x_coefficient = 27;
904     sampler_8x8->dw0.r3c_coefficient = 5;
905
906     sampler_8x8->dw2.global_noise_estimation = 255;
907     sampler_8x8->dw2.non_edge_weight = 1;
908     sampler_8x8->dw2.regular_weight = 2;
909     sampler_8x8->dw2.strong_edge_weight = 7;
910     /* Use the value like that on Ivy instead of default
911      * sampler_8x8->dw2.r5x_coefficient = 7;
912      * sampler_8x8->dw2.r5cx_coefficient = 7;
913      * sampler_8x8->dw2.r5c_coefficient = 7;
914      */
915     sampler_8x8->dw2.r5x_coefficient = 9;
916     sampler_8x8->dw2.r5cx_coefficient = 8;
917     sampler_8x8->dw2.r5c_coefficient = 3;
918
919     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
920     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
921     sampler_8x8->dw3.sat_max = 0x1f;
922     sampler_8x8->dw3.hue_max = 14;
923     /* The 8tap filter will determine whether the adaptive Filter is
924      * applied for all channels(dw153).
925      * If the 8tap filter is disabled, the adaptive filter should be disabled.
926      * Only when 8tap filter is enabled, it can be enabled or not.
927      */
928     sampler_8x8->dw3.enable_8tap_filter = gen8_pp_get_8tap_filter_mode(ctx, src_surface);
929     sampler_8x8->dw3.ief4_smooth_enable = 0;
930
931     sampler_8x8->dw4.s3u = 0;
932     sampler_8x8->dw4.diamond_margin = 4;
933     sampler_8x8->dw4.vy_std_enable = 0;
934     sampler_8x8->dw4.umid = 110;
935     sampler_8x8->dw4.vmid = 154;
936
937     sampler_8x8->dw5.diamond_dv = 0;
938     sampler_8x8->dw5.diamond_th = 35;
939     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
940     sampler_8x8->dw5.hs_margin = 3;
941     sampler_8x8->dw5.diamond_du = 2;
942
943     sampler_8x8->dw6.y_point1 = 46;
944     sampler_8x8->dw6.y_point2 = 47;
945     sampler_8x8->dw6.y_point3 = 254;
946     sampler_8x8->dw6.y_point4 = 255;
947
948     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
949
950     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
951     sampler_8x8->dw8.p0l = 46;
952     sampler_8x8->dw8.p1l = 216;
953
954     sampler_8x8->dw9.p2l = 236;
955     sampler_8x8->dw9.p3l = 236;
956     sampler_8x8->dw9.b0l = 133;
957     sampler_8x8->dw9.b1l = 130;
958
959     sampler_8x8->dw10.b2l = 130;
960     sampler_8x8->dw10.b3l = 130;
961     /* s0l = -5 / 256. s2.8 */
962     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
963     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
964
965     sampler_8x8->dw11.s1l = 0;
966     sampler_8x8->dw11.s2l = 0;
967
968     sampler_8x8->dw12.s3l = 0;
969     sampler_8x8->dw12.p0u = 46;
970     sampler_8x8->dw12.p1u = 66;
971     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
972
973     sampler_8x8->dw13.p2u = 130;
974     sampler_8x8->dw13.p3u = 236;
975     sampler_8x8->dw13.b0u = 143;
976     sampler_8x8->dw13.b1u = 163;
977
978     sampler_8x8->dw14.b2u = 200;
979     sampler_8x8->dw14.b3u = 140;
980     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
981
982     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
983     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
984
985     sx = (float)dst_rect->width / src_rect->width;
986     sy = (float)dst_rect->height / src_rect->height;
987     avs_update_coefficients(avs, sx, sy, pp_context->filter_flags);
988
989     assert(avs->config->num_phases >= 16);
990     for (i = 0; i <= 16; i++) {
991         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
992             &sampler_8x8->coefficients[i];
993         const AVSCoeffs * const coeffs = &avs->coeffs[i];
994
995         sampler_8x8_state->dw0.table_0x_filter_c0 =
996             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
997         sampler_8x8_state->dw0.table_0y_filter_c0 =
998             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
999         sampler_8x8_state->dw0.table_0x_filter_c1 =
1000             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1001         sampler_8x8_state->dw0.table_0y_filter_c1 =
1002             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1003
1004         sampler_8x8_state->dw1.table_0x_filter_c2 =
1005             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1006         sampler_8x8_state->dw1.table_0y_filter_c2 =
1007             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1008         sampler_8x8_state->dw1.table_0x_filter_c3 =
1009             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1010         sampler_8x8_state->dw1.table_0y_filter_c3 =
1011             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1012
1013         sampler_8x8_state->dw2.table_0x_filter_c4 =
1014             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1015         sampler_8x8_state->dw2.table_0y_filter_c4 =
1016             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1017         sampler_8x8_state->dw2.table_0x_filter_c5 =
1018             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1019         sampler_8x8_state->dw2.table_0y_filter_c5 =
1020             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1021
1022         sampler_8x8_state->dw3.table_0x_filter_c6 =
1023             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1024         sampler_8x8_state->dw3.table_0y_filter_c6 =
1025             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1026         sampler_8x8_state->dw3.table_0x_filter_c7 =
1027             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1028         sampler_8x8_state->dw3.table_0y_filter_c7 =
1029             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1030
1031         sampler_8x8_state->dw4.pad0 = 0;
1032         sampler_8x8_state->dw5.pad0 = 0;
1033         sampler_8x8_state->dw4.table_1x_filter_c2 =
1034             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1035         sampler_8x8_state->dw4.table_1x_filter_c3 =
1036             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1037         sampler_8x8_state->dw5.table_1x_filter_c4 =
1038             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1039         sampler_8x8_state->dw5.table_1x_filter_c5 =
1040             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1041
1042         sampler_8x8_state->dw6.pad0 =
1043         sampler_8x8_state->dw7.pad0 =
1044         sampler_8x8_state->dw6.table_1y_filter_c2 =
1045             intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1046         sampler_8x8_state->dw6.table_1y_filter_c3 =
1047             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1048         sampler_8x8_state->dw7.table_1y_filter_c4 =
1049             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1050         sampler_8x8_state->dw7.table_1y_filter_c5 =
1051             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1052     }
1053
1054     sampler_8x8->dw152.default_sharpness_level =
1055         -avs_is_needed(pp_context->filter_flags);
1056     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
1057     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
1058     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
1059
1060     for ( ; i <= avs->config->num_phases; i++) {
1061         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
1062             &sampler_8x8->coefficients1[i - 17];
1063         const AVSCoeffs * const coeffs = &avs->coeffs[i];
1064
1065         sampler_8x8_state->dw0.table_0x_filter_c0 =
1066             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
1067         sampler_8x8_state->dw0.table_0y_filter_c0 =
1068             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
1069         sampler_8x8_state->dw0.table_0x_filter_c1 =
1070             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1071         sampler_8x8_state->dw0.table_0y_filter_c1 =
1072             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1073
1074         sampler_8x8_state->dw1.table_0x_filter_c2 =
1075             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1076         sampler_8x8_state->dw1.table_0y_filter_c2 =
1077             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1078         sampler_8x8_state->dw1.table_0x_filter_c3 =
1079             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1080         sampler_8x8_state->dw1.table_0y_filter_c3 =
1081             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1082
1083         sampler_8x8_state->dw2.table_0x_filter_c4 =
1084             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1085         sampler_8x8_state->dw2.table_0y_filter_c4 =
1086             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1087         sampler_8x8_state->dw2.table_0x_filter_c5 =
1088             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1089         sampler_8x8_state->dw2.table_0y_filter_c5 =
1090             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1091
1092         sampler_8x8_state->dw3.table_0x_filter_c6 =
1093             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1094         sampler_8x8_state->dw3.table_0y_filter_c6 =
1095             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1096         sampler_8x8_state->dw3.table_0x_filter_c7 =
1097             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1098         sampler_8x8_state->dw3.table_0y_filter_c7 =
1099             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1100
1101         sampler_8x8_state->dw4.pad0 = 0;
1102         sampler_8x8_state->dw5.pad0 = 0;
1103         sampler_8x8_state->dw4.table_1x_filter_c2 =
1104             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1105         sampler_8x8_state->dw4.table_1x_filter_c3 =
1106             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1107         sampler_8x8_state->dw5.table_1x_filter_c4 =
1108             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1109         sampler_8x8_state->dw5.table_1x_filter_c5 =
1110             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1111
1112         sampler_8x8_state->dw6.pad0 =
1113         sampler_8x8_state->dw7.pad0 =
1114         sampler_8x8_state->dw6.table_1y_filter_c2 =
1115             intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1116         sampler_8x8_state->dw6.table_1y_filter_c3 =
1117             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1118         sampler_8x8_state->dw7.table_1y_filter_c4 =
1119             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1120         sampler_8x8_state->dw7.table_1y_filter_c5 =
1121             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1122     }
1123
1124     dri_bo_unmap(pp_context->dynamic_state.bo);
1125
1126
1127     /* private function & data */
1128     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
1129     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
1130     pp_context->private_context = &pp_context->pp_avs_context;
1131     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
1132
1133     int dst_left_edge_extend = dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
1134     pp_avs_context->dest_x = dst_rect->x - dst_left_edge_extend;
1135     pp_avs_context->dest_y = dst_rect->y;
1136     pp_avs_context->dest_w = ALIGN(dst_rect->width + dst_left_edge_extend, 16);
1137     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
1138     pp_avs_context->src_w = src_rect->width;
1139     pp_avs_context->src_h = src_rect->height;
1140     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
1141
1142     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
1143     dw = MAX(dw, dst_rect->width + dst_left_edge_extend);
1144
1145     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
1146     pp_static_parameter->grf2.avs_wa_enable = gen8_pp_kernel_use_media_read_msg(ctx,
1147                                                                                 src_surface, src_rect,
1148                                                                                 dst_surface, dst_rect); /* reuse this flag for media block reading on gen8+ */
1149     pp_static_parameter->grf2.alpha = 255;
1150
1151     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
1152     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
1153     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
1154         (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
1155     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
1156         (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
1157
1158     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
1159
1160     yuv_to_rgb_coefs = i915_color_standard_to_coefs (i915_filter_to_color_standard (src_surface->flags &
1161                                                                                     VA_SRC_COLOR_MASK),
1162                                                      &yuv_to_rgb_coefs_size);
1163     memcpy(&pp_static_parameter->grf7, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
1164
1165     dst_surface->flags = src_surface->flags;
1166
1167     return VA_STATUS_SUCCESS;
1168 }
1169
1170 VAStatus
1171 gen8_pp_initialize(
1172     VADriverContextP   ctx,
1173     struct i965_post_processing_context *pp_context,
1174     const struct i965_surface *src_surface,
1175     const VARectangle *src_rect,
1176     struct i965_surface *dst_surface,
1177     const VARectangle *dst_rect,
1178     int                pp_index,
1179     void * filter_param
1180 )
1181 {
1182     VAStatus va_status;
1183     struct i965_driver_data *i965 = i965_driver_data(ctx);
1184     dri_bo *bo;
1185     int bo_size;
1186     unsigned int end_offset;
1187     struct pp_module *pp_module;
1188     int static_param_size, inline_param_size;
1189
1190     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1191     bo = dri_bo_alloc(i965->intel.bufmgr,
1192                       "surface state & binding table",
1193                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1194                       4096);
1195     assert(bo);
1196     pp_context->surface_state_binding_table.bo = bo;
1197
1198     pp_context->idrt.num_interface_descriptors = 0;
1199
1200     pp_context->sampler_size = 4 * 4096;
1201
1202     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1203                 + pp_context->idrt_size;
1204
1205     dri_bo_unreference(pp_context->dynamic_state.bo);
1206     bo = dri_bo_alloc(i965->intel.bufmgr,
1207                       "dynamic_state",
1208                       bo_size,
1209                       4096);
1210
1211     assert(bo);
1212     pp_context->dynamic_state.bo = bo;
1213     pp_context->dynamic_state.bo_size = bo_size;
1214
1215     end_offset = 0;
1216     pp_context->dynamic_state.end_offset = 0;
1217
1218     /* Constant buffer offset */
1219     pp_context->curbe_offset = ALIGN(end_offset, 64);
1220     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1221
1222     /* Interface descriptor offset */
1223     pp_context->idrt_offset = ALIGN(end_offset, 64);
1224     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1225
1226     /* Sampler state offset */
1227     pp_context->sampler_offset = ALIGN(end_offset, 64);
1228     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1229
1230     /* update the end offset of dynamic_state */
1231     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1232
1233     static_param_size = sizeof(struct gen7_pp_static_parameter);
1234     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1235
1236     memset(pp_context->pp_static_parameter, 0, static_param_size);
1237     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1238
1239     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1240     pp_context->current_pp = pp_index;
1241     pp_module = &pp_context->pp_modules[pp_index];
1242
1243     if (pp_module->initialize)
1244         va_status = pp_module->initialize(ctx, pp_context,
1245                                           src_surface,
1246                                           src_rect,
1247                                           dst_surface,
1248                                           dst_rect,
1249                                           filter_param);
1250     else
1251         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1252
1253     calculate_boundary_block_mask(pp_context, dst_rect);
1254
1255     return va_status;
1256 }
1257
1258 static void
1259 gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
1260                                    struct i965_post_processing_context *pp_context)
1261 {
1262     struct gen8_interface_descriptor_data *desc;
1263     dri_bo *bo;
1264     int pp_index = pp_context->current_pp;
1265     unsigned char *cc_ptr;
1266
1267     bo = pp_context->dynamic_state.bo;
1268
1269     dri_bo_map(bo, 1);
1270     assert(bo->virtual);
1271     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1272
1273     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1274                 pp_context->idrt.num_interface_descriptors;
1275
1276     memset(desc, 0, sizeof(*desc));
1277     desc->desc0.kernel_start_pointer =
1278                 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1279     desc->desc2.single_program_flow = 1;
1280     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1281     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
1282     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1283     desc->desc4.binding_table_entry_count = 0;
1284     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1285     desc->desc5.constant_urb_entry_read_offset = 0;
1286
1287     desc->desc5.constant_urb_entry_read_length = 8; /* grf 1-8 */
1288
1289     dri_bo_unmap(bo);
1290     pp_context->idrt.num_interface_descriptors++;
1291 }
1292
1293
1294 static void
1295 gen8_pp_upload_constants(VADriverContextP ctx,
1296                          struct i965_post_processing_context *pp_context)
1297 {
1298     unsigned char *constant_buffer;
1299     int param_size;
1300
1301     assert(sizeof(struct gen7_pp_static_parameter) == 256);
1302
1303     param_size = sizeof(struct gen7_pp_static_parameter);
1304
1305     dri_bo_map(pp_context->dynamic_state.bo, 1);
1306     assert(pp_context->dynamic_state.bo->virtual);
1307     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1308                         pp_context->curbe_offset;
1309
1310     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1311     dri_bo_unmap(pp_context->dynamic_state.bo);
1312     return;
1313 }
1314
1315 void
1316 gen8_pp_states_setup(VADriverContextP ctx,
1317                      struct i965_post_processing_context *pp_context)
1318 {
1319     gen8_pp_interface_descriptor_table(ctx, pp_context);
1320     gen8_pp_upload_constants(ctx, pp_context);
1321 }
1322
1323 static void
1324 gen6_pp_pipeline_select(VADriverContextP ctx,
1325                         struct i965_post_processing_context *pp_context)
1326 {
1327     struct intel_batchbuffer *batch = pp_context->batch;
1328
1329     BEGIN_BATCH(batch, 1);
1330     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1331     ADVANCE_BATCH(batch);
1332 }
1333
1334 static void
1335 gen8_pp_state_base_address(VADriverContextP ctx,
1336                            struct i965_post_processing_context *pp_context)
1337 {
1338     struct intel_batchbuffer *batch = pp_context->batch;
1339
1340     BEGIN_BATCH(batch, 16);
1341     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1342         /* DW1 Generate state address */
1343     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1344         OUT_BATCH(batch, 0);
1345         OUT_BATCH(batch, 0);
1346
1347         /* DW4-5. Surface state address */
1348     OUT_RELOC64(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1349
1350         /* DW6-7. Dynamic state address */
1351     OUT_RELOC64(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1352                 0, 0 | BASE_ADDRESS_MODIFY);
1353
1354         /* DW8. Indirect object address */
1355     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1356         OUT_BATCH(batch, 0);
1357
1358         /* DW10-11. Instruction base address */
1359     OUT_RELOC64(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1360
1361     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1362     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1363     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1364     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1365     ADVANCE_BATCH(batch);
1366 }
1367
1368 void
1369 gen8_pp_vfe_state(VADriverContextP ctx,
1370                   struct i965_post_processing_context *pp_context)
1371 {
1372     struct intel_batchbuffer *batch = pp_context->batch;
1373
1374     BEGIN_BATCH(batch, 9);
1375     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1376     OUT_BATCH(batch, 0);
1377     OUT_BATCH(batch, 0);
1378     OUT_BATCH(batch,
1379               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1380               pp_context->vfe_gpu_state.num_urb_entries << 8);
1381     OUT_BATCH(batch, 0);
1382     OUT_BATCH(batch,
1383               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1384                 /* URB Entry Allocation Size, in 256 bits unit */
1385               (pp_context->vfe_gpu_state.curbe_allocation_size));
1386                 /* CURBE Allocation Size, in 256 bits unit */
1387     OUT_BATCH(batch, 0);
1388     OUT_BATCH(batch, 0);
1389     OUT_BATCH(batch, 0);
1390     ADVANCE_BATCH(batch);
1391 }
1392
1393 void
1394 gen8_interface_descriptor_load(VADriverContextP ctx,
1395                                struct i965_post_processing_context *pp_context)
1396 {
1397     struct intel_batchbuffer *batch = pp_context->batch;
1398
1399     BEGIN_BATCH(batch, 6);
1400
1401     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1402     OUT_BATCH(batch, 0);
1403
1404     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1405     OUT_BATCH(batch, 0);
1406     OUT_BATCH(batch,
1407               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1408     OUT_BATCH(batch, pp_context->idrt_offset);
1409     ADVANCE_BATCH(batch);
1410 }
1411
1412 void
1413 gen8_pp_curbe_load(VADriverContextP ctx,
1414                    struct i965_post_processing_context *pp_context)
1415 {
1416     struct intel_batchbuffer *batch = pp_context->batch;
1417     int param_size = 64;
1418
1419     param_size = sizeof(struct gen7_pp_static_parameter);
1420
1421     BEGIN_BATCH(batch, 4);
1422     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1423     OUT_BATCH(batch, 0);
1424     OUT_BATCH(batch,
1425               param_size);
1426     OUT_BATCH(batch, pp_context->curbe_offset);
1427     ADVANCE_BATCH(batch);
1428 }
1429
1430 void
1431 gen8_pp_object_walker(VADriverContextP ctx,
1432                       struct i965_post_processing_context *pp_context)
1433 {
1434     struct i965_driver_data *i965 = i965_driver_data(ctx);
1435     struct intel_batchbuffer *batch = pp_context->batch;
1436     int x, x_steps, y, y_steps;
1437     int param_size, command_length_in_dws, extra_cmd_in_dws;
1438     dri_bo *command_buffer;
1439     unsigned int *command_ptr;
1440
1441     param_size = sizeof(struct gen7_pp_inline_parameter);
1442
1443     x_steps = pp_context->pp_x_steps(pp_context->private_context);
1444     y_steps = pp_context->pp_y_steps(pp_context->private_context);
1445     command_length_in_dws = 6 + (param_size >> 2);
1446     extra_cmd_in_dws = 2;
1447     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1448                                   "command objects buffer",
1449                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1450                                   4096);
1451
1452     dri_bo_map(command_buffer, 1);
1453     command_ptr = command_buffer->virtual;
1454
1455     for (y = 0; y < y_steps; y++) {
1456         for (x = 0; x < x_steps; x++) {
1457             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1458
1459                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1460                 *command_ptr++ = 0;
1461                 *command_ptr++ = 0;
1462                 *command_ptr++ = 0;
1463                 *command_ptr++ = 0;
1464                 *command_ptr++ = 0;
1465                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1466                 command_ptr += (param_size >> 2);
1467
1468                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1469                 *command_ptr++ = 0;
1470             }
1471         }
1472     }
1473
1474     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1475         *command_ptr++ = 0;
1476
1477     *command_ptr++ = MI_BATCH_BUFFER_END;
1478     *command_ptr++ = 0;
1479
1480     dri_bo_unmap(command_buffer);
1481
1482     BEGIN_BATCH(batch, 3);
1483     OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1484     OUT_RELOC64(batch, command_buffer,
1485               I915_GEM_DOMAIN_COMMAND, 0, 0);
1486     ADVANCE_BATCH(batch);
1487
1488     dri_bo_unreference(command_buffer);
1489
1490     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1491      * will cause control to pass back to ring buffer
1492      */
1493     intel_batchbuffer_end_atomic(batch);
1494     intel_batchbuffer_flush(batch);
1495     intel_batchbuffer_start_atomic(batch, 0x1000);
1496 }
1497
1498 static void
1499 gen8_pp_pipeline_setup(VADriverContextP ctx,
1500                        struct i965_post_processing_context *pp_context)
1501 {
1502     struct intel_batchbuffer *batch = pp_context->batch;
1503
1504     intel_batchbuffer_start_atomic(batch, 0x1000);
1505     intel_batchbuffer_emit_mi_flush(batch);
1506     gen6_pp_pipeline_select(ctx, pp_context);
1507     gen8_pp_state_base_address(ctx, pp_context);
1508     gen8_pp_vfe_state(ctx, pp_context);
1509     gen8_pp_curbe_load(ctx, pp_context);
1510     gen8_interface_descriptor_load(ctx, pp_context);
1511     gen8_pp_vfe_state(ctx, pp_context);
1512     gen8_pp_object_walker(ctx, pp_context);
1513     intel_batchbuffer_end_atomic(batch);
1514 }
1515
1516 static VAStatus
1517 gen8_post_processing(
1518     VADriverContextP   ctx,
1519     struct i965_post_processing_context *pp_context,
1520     const struct i965_surface *src_surface,
1521     const VARectangle *src_rect,
1522     struct i965_surface *dst_surface,
1523     const VARectangle *dst_rect,
1524     int                pp_index,
1525     void * filter_param
1526 )
1527 {
1528     VAStatus va_status;
1529
1530     va_status = gen8_pp_initialize(ctx, pp_context,
1531                                    src_surface,
1532                                    src_rect,
1533                                    dst_surface,
1534                                    dst_rect,
1535                                    pp_index,
1536                                    filter_param);
1537
1538     if (va_status == VA_STATUS_SUCCESS) {
1539         gen8_pp_states_setup(ctx, pp_context);
1540         gen8_pp_pipeline_setup(ctx, pp_context);
1541     }
1542
1543     return va_status;
1544 }
1545
1546 static void
1547 gen8_post_processing_context_finalize(VADriverContextP ctx,
1548     struct i965_post_processing_context *pp_context)
1549 {
1550     if (pp_context->scaling_context_initialized) {
1551         gen8_gpe_context_destroy(&pp_context->scaling_10bit_context);
1552         pp_context->scaling_context_initialized = 0;
1553     }
1554
1555     if (pp_context->scaling_8bit_initialized & VPPGPE_8BIT_420) {
1556         gen8_gpe_context_destroy(&pp_context->scaling_yuv420p8_context);
1557         pp_context->scaling_8bit_initialized &= ~(VPPGPE_8BIT_420);
1558     }
1559
1560     if(pp_context->vebox_proc_ctx){
1561        gen75_vebox_context_destroy(ctx,pp_context->vebox_proc_ctx);
1562        pp_context->vebox_proc_ctx = NULL;
1563     }
1564
1565     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1566     pp_context->surface_state_binding_table.bo = NULL;
1567
1568     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1569     pp_context->pp_dn_context.stmm_bo = NULL;
1570
1571     if (pp_context->instruction_state.bo) {
1572         dri_bo_unreference(pp_context->instruction_state.bo);
1573         pp_context->instruction_state.bo = NULL;
1574     }
1575
1576     if (pp_context->indirect_state.bo) {
1577         dri_bo_unreference(pp_context->indirect_state.bo);
1578         pp_context->indirect_state.bo = NULL;
1579     }
1580
1581     if (pp_context->dynamic_state.bo) {
1582         dri_bo_unreference(pp_context->dynamic_state.bo);
1583         pp_context->dynamic_state.bo = NULL;
1584     }
1585
1586     free(pp_context->pp_static_parameter);
1587     free(pp_context->pp_inline_parameter);
1588     pp_context->pp_static_parameter = NULL;
1589     pp_context->pp_inline_parameter = NULL;
1590 }
1591
1592 #define VPP_CURBE_ALLOCATION_SIZE       32
1593
1594 void
1595 gen8_post_processing_context_common_init(VADriverContextP ctx,
1596                                          void *data,
1597                                          struct pp_module *pp_modules,
1598                                          int num_pp_modules,
1599                                          struct intel_batchbuffer *batch)
1600 {
1601     struct i965_driver_data *i965 = i965_driver_data(ctx);
1602     int i, kernel_size;
1603     unsigned int kernel_offset, end_offset;
1604     unsigned char *kernel_ptr;
1605     struct pp_module *pp_module;
1606     struct i965_post_processing_context *pp_context = data;
1607
1608     if (i965->intel.eu_total > 0)
1609         pp_context->vfe_gpu_state.max_num_threads = 6 * i965->intel.eu_total;
1610     else
1611         pp_context->vfe_gpu_state.max_num_threads = 60;
1612     pp_context->vfe_gpu_state.num_urb_entries = 59;
1613     pp_context->vfe_gpu_state.gpgpu_mode = 0;
1614     pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1615     pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1616
1617     pp_context->intel_post_processing = gen8_post_processing;
1618     pp_context->finalize = gen8_post_processing_context_finalize;
1619
1620     assert(ARRAY_ELEMS(pp_context->pp_modules) == num_pp_modules);
1621
1622     memcpy(pp_context->pp_modules, pp_modules, sizeof(pp_context->pp_modules));
1623
1624     kernel_size = 4096 ;
1625
1626     for (i = 0; i < NUM_PP_MODULES; i++) {
1627         pp_module = &pp_context->pp_modules[i];
1628
1629         if (pp_module->kernel.bin && pp_module->kernel.size) {
1630             kernel_size += pp_module->kernel.size;
1631         }
1632     }
1633
1634     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1635                                   "kernel shader",
1636                                   kernel_size,
1637                                   0x1000);
1638     if (pp_context->instruction_state.bo == NULL) {
1639         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1640         return;
1641     }
1642
1643     assert(pp_context->instruction_state.bo);
1644
1645
1646     pp_context->instruction_state.bo_size = kernel_size;
1647     pp_context->instruction_state.end_offset = 0;
1648     end_offset = 0;
1649
1650     dri_bo_map(pp_context->instruction_state.bo, 1);
1651     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1652
1653     for (i = 0; i < NUM_PP_MODULES; i++) {
1654         pp_module = &pp_context->pp_modules[i];
1655
1656         kernel_offset = ALIGN(end_offset, 64);
1657         pp_module->kernel.kernel_offset = kernel_offset;
1658
1659         if (pp_module->kernel.bin && pp_module->kernel.size) {
1660
1661             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1662             end_offset = kernel_offset + pp_module->kernel.size;
1663         }
1664     }
1665
1666     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1667
1668     dri_bo_unmap(pp_context->instruction_state.bo);
1669
1670     /* static & inline parameters */
1671     pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1672     pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1673
1674     pp_context->batch = batch;
1675
1676     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1677     pp_context->curbe_size = 256;
1678
1679 }
1680
1681 void
1682 gen8_post_processing_context_init(VADriverContextP ctx,
1683                                   void *data,
1684                                   struct intel_batchbuffer *batch)
1685 {
1686     struct i965_driver_data *i965 = i965_driver_data(ctx);
1687     struct i965_post_processing_context *pp_context = data;
1688     struct i965_gpe_context *gpe_context;
1689     struct i965_kernel scaling_kernel;
1690
1691     gen8_post_processing_context_common_init(ctx, data, pp_modules_gen8, ARRAY_ELEMS(pp_modules_gen8), batch);
1692     avs_init_state(&pp_context->pp_avs_context.state, &gen8_avs_config);
1693
1694     /* initialize the YUV420 8-Bit scaling context. The below is supported.
1695      * NV12 ->NV12
1696      * NV12 ->I420
1697      * I420 ->I420
1698      * I420 ->NV12
1699      */
1700     gpe_context = &pp_context->scaling_yuv420p8_context;
1701     memset(&scaling_kernel, 0, sizeof(scaling_kernel));
1702     scaling_kernel.bin = pp_yuv420p8_scaling_gen8;
1703     scaling_kernel.size = sizeof(pp_yuv420p8_scaling_gen8);
1704     gen8_gpe_load_kernels(ctx, gpe_context, &scaling_kernel, 1);
1705     gpe_context->idrt.entry_size = ALIGN(sizeof(struct gen8_interface_descriptor_data), 64);
1706     gpe_context->idrt.max_entries = 1;
1707     gpe_context->sampler.entry_size = ALIGN(sizeof(struct gen8_sampler_state), 64);
1708     gpe_context->sampler.max_entries = 1;
1709     gpe_context->curbe.length = ALIGN(sizeof(struct scaling_input_parameter), 32);
1710
1711     gpe_context->surface_state_binding_table.max_entries = MAX_SCALING_SURFACES;
1712     gpe_context->surface_state_binding_table.binding_table_offset = 0;
1713     gpe_context->surface_state_binding_table.surface_state_offset = ALIGN(MAX_SCALING_SURFACES * 4, 64);
1714     gpe_context->surface_state_binding_table.length = ALIGN(MAX_SCALING_SURFACES * 4, 64) + ALIGN(MAX_SCALING_SURFACES * SURFACE_STATE_PADDED_SIZE_GEN8, 64);
1715
1716     if (i965->intel.eu_total > 0) {
1717         gpe_context->vfe_state.max_num_threads = i965->intel.eu_total * 6;
1718     } else {
1719         if (i965->intel.has_bsd2)
1720             gpe_context->vfe_state.max_num_threads = 300;
1721         else
1722             gpe_context->vfe_state.max_num_threads = 60;
1723     }
1724
1725     gpe_context->vfe_state.curbe_allocation_size = 37;
1726     gpe_context->vfe_state.urb_entry_size = 16;
1727     if (i965->intel.has_bsd2)
1728         gpe_context->vfe_state.num_urb_entries = 127;
1729     else
1730         gpe_context->vfe_state.num_urb_entries = 64;
1731
1732     gpe_context->vfe_state.gpgpu_mode = 0;
1733
1734     gen8_gpe_context_init(ctx, gpe_context);
1735     pp_context->scaling_8bit_initialized = VPPGPE_8BIT_420;
1736     return;
1737 }
1738
1739 static void
1740 gen8_run_kernel_media_object_walker(VADriverContextP ctx,
1741                                     struct intel_batchbuffer *batch,
1742                                     struct i965_gpe_context *gpe_context,
1743                                     struct gpe_media_object_walker_parameter *param)
1744 {
1745     if (!batch || !gpe_context || !param)
1746         return;
1747
1748     intel_batchbuffer_start_atomic(batch, 0x1000);
1749
1750     intel_batchbuffer_emit_mi_flush(batch);
1751
1752     gen8_gpe_pipeline_setup(ctx, gpe_context, batch);
1753     gen8_gpe_media_object_walker(ctx, gpe_context, batch, param);
1754     gen8_gpe_media_state_flush(ctx, gpe_context, batch);
1755
1756
1757     intel_batchbuffer_end_atomic(batch);
1758
1759     intel_batchbuffer_flush(batch);
1760     return;
1761 }
1762
1763 static void
1764 gen8_add_dri_buffer_2d_gpe_surface(VADriverContextP ctx,
1765                                    struct i965_gpe_context *gpe_context,
1766                                    dri_bo *bo,
1767                                    unsigned int bo_offset,
1768                                    unsigned int width,
1769                                    unsigned int height,
1770                                    unsigned int pitch,
1771                                    int is_media_block_rw,
1772                                    unsigned int format,
1773                                    int index,
1774                                    int is_10bit)
1775 {
1776     struct i965_gpe_resource gpe_resource;
1777     struct i965_gpe_surface gpe_surface;
1778
1779     i965_dri_object_to_2d_gpe_resource(&gpe_resource, bo, width, height, pitch);
1780     memset(&gpe_surface, 0, sizeof(gpe_surface));
1781     gpe_surface.gpe_resource = &gpe_resource;
1782     gpe_surface.is_2d_surface = 1;
1783     gpe_surface.is_media_block_rw = !!is_media_block_rw;
1784     gpe_surface.cacheability_control = DEFAULT_MOCS;
1785     gpe_surface.format = format;
1786     gpe_surface.is_override_offset = 1;
1787     gpe_surface.offset = bo_offset;
1788     gpe_surface.is_16bpp = is_10bit;
1789
1790     gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
1791
1792     i965_free_gpe_resource(&gpe_resource);
1793 }
1794
1795 static void
1796 gen8_vpp_scaling_sample_state(VADriverContextP ctx,
1797                                struct i965_gpe_context *gpe_context,
1798                                VARectangle *src_rect,
1799                                VARectangle *dst_rect)
1800 {
1801     struct gen8_sampler_state *sampler_state;
1802
1803     if (gpe_context == NULL || !src_rect || !dst_rect)
1804         return;
1805     dri_bo_map(gpe_context->sampler.bo, 1);
1806
1807     if (gpe_context->sampler.bo->virtual == NULL)
1808         return;
1809
1810     assert(gpe_context->sampler.bo->virtual);
1811
1812     sampler_state = (struct gen8_sampler_state *)
1813        (gpe_context->sampler.bo->virtual + gpe_context->sampler.offset);
1814
1815     memset(sampler_state, 0, sizeof(*sampler_state));
1816
1817     if ((src_rect->width == dst_rect->width) &&
1818         (src_rect->height == dst_rect->height)) {
1819         sampler_state->ss0.min_filter = I965_MAPFILTER_NEAREST;
1820         sampler_state->ss0.mag_filter = I965_MAPFILTER_NEAREST;
1821     } else {
1822         sampler_state->ss0.min_filter = I965_MAPFILTER_LINEAR;
1823         sampler_state->ss0.mag_filter = I965_MAPFILTER_LINEAR;
1824     }
1825
1826     sampler_state->ss3.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1827     sampler_state->ss3.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1828     sampler_state->ss3.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1829
1830     dri_bo_unmap(gpe_context->sampler.bo);
1831 }
1832
1833 static void
1834 gen8_gpe_context_yuv420p8_scaling_curbe(VADriverContextP ctx,
1835                                struct i965_gpe_context *gpe_context,
1836                                VARectangle *src_rect,
1837                                struct i965_surface *src_surface,
1838                                VARectangle *dst_rect,
1839                                struct i965_surface *dst_surface)
1840 {
1841     struct scaling_input_parameter *scaling_curbe;
1842     float src_width, src_height;
1843     float coeff;
1844     unsigned int fourcc;
1845
1846     if ((gpe_context == NULL) ||
1847         (src_rect == NULL) || (src_surface == NULL) ||
1848         (dst_rect == NULL) || (dst_surface == NULL))
1849         return;
1850
1851     scaling_curbe = i965_gpe_context_map_curbe(gpe_context);
1852
1853     if (!scaling_curbe)
1854         return;
1855
1856     memset(scaling_curbe, 0, sizeof(struct scaling_input_parameter));
1857
1858     scaling_curbe->bti_input = BTI_SCALING_INPUT_Y;
1859     scaling_curbe->bti_output = BTI_SCALING_OUTPUT_Y;
1860
1861     /* As the src_rect/dst_rect is already checked, it is skipped.*/
1862     scaling_curbe->x_dst     = dst_rect->x;
1863     scaling_curbe->y_dst     = dst_rect->y;
1864
1865     src_width = src_rect->x + src_rect->width;
1866     src_height = src_rect->y + src_rect->height;
1867
1868     scaling_curbe->inv_width = 1 / src_width;
1869     scaling_curbe->inv_height = 1 / src_height;
1870
1871     coeff = (float) (src_rect->width) / dst_rect->width;
1872     scaling_curbe->x_factor = coeff / src_width;
1873     scaling_curbe->x_orig = (float)(src_rect->x) / src_width;
1874
1875     coeff = (float) (src_rect->height) / dst_rect->height;
1876     scaling_curbe->y_factor = coeff / src_height;
1877     scaling_curbe->y_orig = (float)(src_rect->y) / src_height;
1878
1879     fourcc = pp_get_surface_fourcc(ctx, src_surface);
1880     if (fourcc == VA_FOURCC_NV12) {
1881         scaling_curbe->dw7.src_packed = 1;
1882     }
1883
1884     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
1885
1886     if (fourcc == VA_FOURCC_NV12) {
1887         scaling_curbe->dw7.dst_packed = 1;
1888     }
1889
1890     i965_gpe_context_unmap_curbe(gpe_context);
1891 }
1892
1893 static bool
1894 gen8_pp_context_get_surface_conf(VADriverContextP ctx,
1895                                  struct i965_surface *surface,
1896                                  VARectangle *rect,
1897                                  int *width,
1898                                  int *height,
1899                                  int *pitch,
1900                                  int *bo_offset)
1901 {
1902     unsigned int fourcc;
1903     if (!rect || !surface || !width || !height || !pitch || !bo_offset)
1904         return false;
1905
1906     if (surface->base == NULL)
1907         return false;
1908
1909     fourcc = pp_get_surface_fourcc(ctx, surface);
1910     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
1911         struct object_surface *obj_surface;
1912
1913         obj_surface = (struct object_surface *)surface->base;
1914         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
1915         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
1916         pitch[0] = obj_surface->width;
1917         bo_offset[0] = 0;
1918
1919         if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1920             width[1] = width[0] / 2;
1921             height[1] = height[0] / 2;
1922             pitch[1] = obj_surface->cb_cr_pitch;
1923             bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1924         } else {
1925             /* I010/I420 format */
1926             width[1] = width[0] / 2;
1927             height[1] = height[0] / 2;
1928             pitch[1] = obj_surface->cb_cr_pitch;
1929             bo_offset[1] = obj_surface->width * obj_surface->y_cb_offset;
1930             width[2] = width[0] / 2;
1931             height[2] = height[0] / 2;
1932             pitch[2] = obj_surface->cb_cr_pitch;
1933             bo_offset[2] = obj_surface->width * obj_surface->y_cr_offset;
1934         }
1935
1936     } else {
1937         struct object_image *obj_image;
1938
1939         obj_image = (struct object_image *)surface->base;
1940
1941         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
1942         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
1943         pitch[0] = obj_image->image.pitches[0];
1944         bo_offset[0] = obj_image->image.offsets[0];
1945
1946         if (fourcc == VA_FOURCC_P010 || fourcc == VA_FOURCC_NV12) {
1947             width[1] = width[0] / 2;
1948             height[1] = height[0] / 2;
1949             pitch[1] = obj_image->image.pitches[1];
1950             bo_offset[1] = obj_image->image.offsets[1];
1951         } else {
1952             /* I010/I420 format */
1953             /* YV12 is TBD */
1954             width[1] = width[0] / 2;
1955             height[1] = height[0] / 2;
1956             pitch[1] = obj_image->image.pitches[1];
1957             bo_offset[1] = obj_image->image.offsets[1];
1958             width[2] = width[0] / 2;
1959             height[2] = height[0] / 2;
1960             pitch[2] = obj_image->image.pitches[2];
1961             bo_offset[2] = obj_image->image.offsets[2];
1962         }
1963
1964     }
1965     return true;
1966 }
1967
1968 static void
1969 gen8_gpe_context_yuv420p8_scaling_surfaces(VADriverContextP ctx,
1970                                struct i965_gpe_context *gpe_context,
1971                                VARectangle *src_rect,
1972                                struct i965_surface *src_surface,
1973                                VARectangle *dst_rect,
1974                                struct i965_surface *dst_surface)
1975 {
1976     unsigned int fourcc;
1977     int width[3], height[3], pitch[3], bo_offset[3];
1978     dri_bo *bo;
1979     struct object_surface *obj_surface;
1980     struct object_image *obj_image;
1981     int bti;
1982
1983     if ((gpe_context == NULL) ||
1984         (src_rect == NULL) || (src_surface == NULL) ||
1985         (dst_rect == NULL) || (dst_surface == NULL))
1986         return;
1987
1988     if (src_surface->base == NULL || dst_surface->base == NULL)
1989         return;
1990
1991     fourcc = pp_get_surface_fourcc(ctx, src_surface);
1992
1993     if (src_surface->type == I965_SURFACE_TYPE_SURFACE) {
1994         obj_surface = (struct object_surface *)src_surface->base;
1995         bo = obj_surface->bo;
1996     } else {
1997         obj_image = (struct object_image *)src_surface->base;
1998         bo = obj_image->bo;
1999     }
2000
2001     bti = 0;
2002     if (gen8_pp_context_get_surface_conf(ctx, src_surface, src_rect,
2003                                          width, height, pitch,
2004                                          bo_offset)) {
2005         bti = BTI_SCALING_INPUT_Y;
2006         /* Input surface */
2007         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2008                                            bo_offset[0],
2009                                            width[0], height[0],
2010                                            pitch[0], 0,
2011                                            I965_SURFACEFORMAT_R8_UNORM,
2012                                            bti, 0);
2013         if (fourcc == VA_FOURCC_NV12) {
2014             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2015                                            bo_offset[1],
2016                                            width[1], height[1],
2017                                            pitch[1], 0,
2018                                            I965_SURFACEFORMAT_R8G8_UNORM,
2019                                            bti + 1, 0);
2020         } else {
2021             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2022                                            bo_offset[1],
2023                                            width[1], height[1],
2024                                            pitch[1], 0,
2025                                            I965_SURFACEFORMAT_R8_UNORM,
2026                                            bti + 1, 0);
2027
2028             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2029                                            bo_offset[2],
2030                                            width[2], height[2],
2031                                            pitch[2], 0,
2032                                            I965_SURFACEFORMAT_R8_UNORM,
2033                                            bti + 2, 0);
2034         }
2035     }
2036
2037     fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2038
2039     if (dst_surface->type == I965_SURFACE_TYPE_SURFACE) {
2040         obj_surface = (struct object_surface *)dst_surface->base;
2041         bo = obj_surface->bo;
2042     } else {
2043         obj_image = (struct object_image *)dst_surface->base;
2044         bo = obj_image->bo;
2045     }
2046
2047     if (gen8_pp_context_get_surface_conf(ctx, dst_surface, dst_rect,
2048                                          width, height, pitch,
2049                                          bo_offset)) {
2050         bti = BTI_SCALING_OUTPUT_Y;
2051         /* Input surface */
2052         gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2053                                            bo_offset[0],
2054                                            width[0], height[0],
2055                                            pitch[0], 1,
2056                                            I965_SURFACEFORMAT_R8_UINT,
2057                                            bti, 0);
2058         if (fourcc == VA_FOURCC_NV12) {
2059             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2060                                            bo_offset[1],
2061                                            width[1] * 2, height[1],
2062                                            pitch[1], 1,
2063                                            I965_SURFACEFORMAT_R16_UINT,
2064                                            bti + 1, 0);
2065         } else {
2066             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2067                                            bo_offset[1],
2068                                            width[1], height[1],
2069                                            pitch[1], 1,
2070                                            I965_SURFACEFORMAT_R8_UINT,
2071                                            bti + 1, 0);
2072
2073             gen8_add_dri_buffer_2d_gpe_surface(ctx, gpe_context, bo,
2074                                            bo_offset[2],
2075                                            width[2], height[2],
2076                                            pitch[2], 1,
2077                                            I965_SURFACEFORMAT_R8_UINT,
2078                                            bti + 2, 0);
2079         }
2080     }
2081
2082     return;
2083 }
2084
2085 VAStatus
2086 gen8_yuv420p8_scaling_post_processing(
2087     VADriverContextP   ctx,
2088     struct i965_post_processing_context *pp_context,
2089     struct i965_surface *src_surface,
2090     VARectangle *src_rect,
2091     struct i965_surface *dst_surface,
2092     VARectangle *dst_rect)
2093 {
2094     struct i965_gpe_context *gpe_context;
2095     struct gpe_media_object_walker_parameter media_object_walker_param;
2096     struct intel_vpp_kernel_walker_parameter kernel_walker_param;
2097
2098     if (!pp_context || !src_surface || !src_rect || !dst_surface || !dst_rect)
2099         return VA_STATUS_ERROR_INVALID_PARAMETER;
2100
2101     if (!(pp_context->scaling_8bit_initialized & VPPGPE_8BIT_420))
2102         return VA_STATUS_ERROR_UNIMPLEMENTED;
2103
2104     gpe_context = &pp_context->scaling_yuv420p8_context;
2105
2106     gen8_gpe_context_init(ctx, gpe_context);
2107     gen8_vpp_scaling_sample_state(ctx, gpe_context, src_rect, dst_rect);
2108     gen8_gpe_reset_binding_table(ctx, gpe_context);
2109     gen8_gpe_context_yuv420p8_scaling_curbe(ctx, gpe_context,
2110                                         src_rect, src_surface,
2111                                         dst_rect, dst_surface);
2112
2113     gen8_gpe_context_yuv420p8_scaling_surfaces(ctx, gpe_context,
2114                                         src_rect, src_surface,
2115                                         dst_rect, dst_surface);
2116
2117     gen8_gpe_setup_interface_data(ctx, gpe_context);
2118
2119     memset(&kernel_walker_param, 0, sizeof(kernel_walker_param));
2120     kernel_walker_param.resolution_x = ALIGN(dst_rect->width, 16) >> 4;
2121     kernel_walker_param.resolution_y = ALIGN(dst_rect->height, 16) >> 4;
2122     kernel_walker_param.no_dependency = 1;
2123
2124     intel_vpp_init_media_object_walker_parameter(&kernel_walker_param, &media_object_walker_param);
2125
2126     gen8_run_kernel_media_object_walker(ctx, pp_context->batch,
2127                                         gpe_context,
2128                                         &media_object_walker_param);
2129
2130     return VA_STATUS_SUCCESS;
2131 }