OSDN Git Service

Add the support of CBR/VBR for Vp9 Encoding
[android-x86/hardware-intel-common-vaapi.git] / src / gen8_post_processing.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Xiang Haihao <haihao.xiang@intel.com>
26  *    Zhao Yakui <yakui.zhao@intel.com>
27  *
28  */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <assert.h>
34
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "i965_yuv_coefs.h"
43 #include "intel_media.h"
44
45 #include "gen75_picture_process.h"
46
47 #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
48
49 #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
50 #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
51
52 #define GPU_ASM_BLOCK_WIDTH         16
53 #define GPU_ASM_BLOCK_HEIGHT        8
54 #define GPU_ASM_X_OFFSET_ALIGNMENT  4
55
56 #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
57
58 VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
59                             const struct i965_surface *src_surface,
60                             const VARectangle *src_rect,
61                             struct i965_surface *dst_surface,
62                             const VARectangle *dst_rect,
63                             void *filter_param);
64
65 VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
66                                     const struct i965_surface *src_surface,
67                                     const VARectangle *src_rect,
68                                     struct i965_surface *dst_surface,
69                                     const VARectangle *dst_rect,
70                                     void *filter_param);
71
72 /* TODO: Modify the shader and then compile it again.
73  * Currently it is derived from Haswell*/
74 static const uint32_t pp_null_gen8[][4] = {
75 };
76
77 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
78 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
79 };
80
81 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
82 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
83 };
84
85 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
86 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
87 };
88
89 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
90 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
91 };
92
93 static const uint32_t pp_nv12_scaling_gen8[][4] = {
94 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
95 };
96
97 static const uint32_t pp_nv12_avs_gen8[][4] = {
98 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
99 };
100
101 static const uint32_t pp_nv12_dndi_gen8[][4] = {
102 // #include "shaders/post_processing/gen7/dndi.g75b"
103 };
104
105 static const uint32_t pp_nv12_dn_gen8[][4] = {
106 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
107 };
108 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
109 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
110 };
111 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
112 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
113 };
114 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
115 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
116 };
117 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
118 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
119 };
120 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
121 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
122 };
123 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
124 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
125 };
126 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
127 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
128 };
129
130 static struct pp_module pp_modules_gen8[] = {
131     {
132         {
133             "NULL module (for testing)",
134             PP_NULL,
135             pp_null_gen8,
136             sizeof(pp_null_gen8),
137             NULL,
138         },
139
140         pp_null_initialize,
141     },
142
143     {
144         {
145             "NV12_NV12",
146             PP_NV12_LOAD_SAVE_N12,
147             pp_nv12_load_save_nv12_gen8,
148             sizeof(pp_nv12_load_save_nv12_gen8),
149             NULL,
150         },
151
152         gen8_pp_plx_avs_initialize,
153     },
154
155     {
156         {
157             "NV12_PL3",
158             PP_NV12_LOAD_SAVE_PL3,
159             pp_nv12_load_save_pl3_gen8,
160             sizeof(pp_nv12_load_save_pl3_gen8),
161             NULL,
162         },
163         gen8_pp_plx_avs_initialize,
164     },
165
166     {
167         {
168             "PL3_NV12",
169             PP_PL3_LOAD_SAVE_N12,
170             pp_pl3_load_save_nv12_gen8,
171             sizeof(pp_pl3_load_save_nv12_gen8),
172             NULL,
173         },
174
175         gen8_pp_plx_avs_initialize,
176     },
177
178     {
179         {
180             "PL3_PL3",
181             PP_PL3_LOAD_SAVE_PL3,
182             pp_pl3_load_save_pl3_gen8,
183             sizeof(pp_pl3_load_save_pl3_gen8),
184             NULL,
185         },
186
187         gen8_pp_plx_avs_initialize,
188     },
189
190     {
191         {
192             "NV12 Scaling module",
193             PP_NV12_SCALING,
194             pp_nv12_scaling_gen8,
195             sizeof(pp_nv12_scaling_gen8),
196             NULL,
197         },
198
199         gen8_pp_plx_avs_initialize,
200     },
201
202     {
203         {
204             "NV12 AVS module",
205             PP_NV12_AVS,
206             pp_nv12_avs_gen8,
207             sizeof(pp_nv12_avs_gen8),
208             NULL,
209         },
210
211         gen8_pp_plx_avs_initialize,
212     },
213
214     {
215         {
216             "NV12 DNDI module",
217             PP_NV12_DNDI,
218             pp_nv12_dndi_gen8,
219             sizeof(pp_nv12_dndi_gen8),
220             NULL,
221         },
222
223         pp_null_initialize,
224     },
225
226     {
227         {
228             "NV12 DN module",
229             PP_NV12_DN,
230             pp_nv12_dn_gen8,
231             sizeof(pp_nv12_dn_gen8),
232             NULL,
233         },
234
235         pp_null_initialize,
236     },
237     {
238         {
239             "NV12_PA module",
240             PP_NV12_LOAD_SAVE_PA,
241             pp_nv12_load_save_pa_gen8,
242             sizeof(pp_nv12_load_save_pa_gen8),
243             NULL,
244         },
245
246         gen8_pp_plx_avs_initialize,
247     },
248
249     {
250         {
251             "PL3_PA module",
252             PP_PL3_LOAD_SAVE_PA,
253             pp_pl3_load_save_pa_gen8,
254             sizeof(pp_pl3_load_save_pa_gen8),
255             NULL,
256         },
257
258         gen8_pp_plx_avs_initialize,
259     },
260
261     {
262         {
263             "PA_NV12 module",
264             PP_PA_LOAD_SAVE_NV12,
265             pp_pa_load_save_nv12_gen8,
266             sizeof(pp_pa_load_save_nv12_gen8),
267             NULL,
268         },
269
270         gen8_pp_plx_avs_initialize,
271     },
272
273     {
274         {
275             "PA_PL3 module",
276             PP_PA_LOAD_SAVE_PL3,
277             pp_pa_load_save_pl3_gen8,
278             sizeof(pp_pa_load_save_pl3_gen8),
279             NULL,
280         },
281
282         gen8_pp_plx_avs_initialize,
283     },
284
285     {
286         {
287             "PA_PA module",
288             PP_PA_LOAD_SAVE_PA,
289             pp_pa_load_save_pa_gen8,
290             sizeof(pp_pa_load_save_pa_gen8),
291             NULL,
292         },
293
294         gen8_pp_plx_avs_initialize,
295     },
296
297     {
298         {
299             "RGBX_NV12 module",
300             PP_RGBX_LOAD_SAVE_NV12,
301             pp_rgbx_load_save_nv12_gen8,
302             sizeof(pp_rgbx_load_save_nv12_gen8),
303             NULL,
304         },
305
306         gen8_pp_plx_avs_initialize,
307     },
308
309     {
310         {
311             "NV12_RGBX module",
312             PP_NV12_LOAD_SAVE_RGBX,
313             pp_nv12_load_save_rgbx_gen8,
314             sizeof(pp_nv12_load_save_rgbx_gen8),
315             NULL,
316         },
317
318         gen8_pp_plx_avs_initialize,
319     },
320 };
321
322 static int
323 pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
324 {
325     int fourcc;
326
327     if (surface->type == I965_SURFACE_TYPE_IMAGE) {
328         struct object_image *obj_image = (struct object_image *)surface->base;
329         fourcc = obj_image->image.format.fourcc;
330     } else {
331         struct object_surface *obj_surface = (struct object_surface *)surface->base;
332         fourcc = obj_surface->fourcc;
333     }
334
335     return fourcc;
336 }
337
338 static void
339 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
340 {
341     switch (tiling) {
342     case I915_TILING_NONE:
343         ss->ss0.tiled_surface = 0;
344         ss->ss0.tile_walk = 0;
345         break;
346     case I915_TILING_X:
347         ss->ss0.tiled_surface = 1;
348         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
349         break;
350     case I915_TILING_Y:
351         ss->ss0.tiled_surface = 1;
352         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
353         break;
354     }
355 }
356
357 static void
358 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
359 {
360     switch (tiling) {
361     case I915_TILING_NONE:
362         ss->ss2.tiled_surface = 0;
363         ss->ss2.tile_walk = 0;
364         break;
365     case I915_TILING_X:
366         ss->ss2.tiled_surface = 1;
367         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
368         break;
369     case I915_TILING_Y:
370         ss->ss2.tiled_surface = 1;
371         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
372         break;
373     }
374 }
375
376
377 static void
378 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
379                           dri_bo *surf_bo, unsigned long surf_bo_offset,
380                           int width, int height, int pitch, int format,
381                           int index, int is_target)
382 {
383     struct gen8_surface_state *ss;
384     dri_bo *ss_bo;
385     unsigned int tiling;
386     unsigned int swizzle;
387
388     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
389     ss_bo = pp_context->surface_state_binding_table.bo;
390     assert(ss_bo);
391
392     dri_bo_map(ss_bo, True);
393     assert(ss_bo->virtual);
394     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
395     memset(ss, 0, sizeof(*ss));
396     ss->ss0.surface_type = I965_SURFACE_2D;
397     ss->ss0.surface_format = format;
398     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
399     ss->ss2.width = width - 1;
400     ss->ss2.height = height - 1;
401     ss->ss3.pitch = pitch - 1;
402
403     /* Always set 1(align 4 mode) per B-spec */
404     ss->ss0.vertical_alignment = 1;
405     ss->ss0.horizontal_alignment = 1;
406
407     gen8_pp_set_surface_tiling(ss, tiling);
408     gen8_render_set_surface_scs(ss);
409     dri_bo_emit_reloc(ss_bo,
410                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
411                       surf_bo_offset,
412                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
413                       surf_bo);
414     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
415     dri_bo_unmap(ss_bo);
416 }
417
418
419 static void
420 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
421                            dri_bo *surf_bo, unsigned long surf_bo_offset,
422                            int width, int height, int wpitch,
423                            int xoffset, int yoffset,
424                            int format, int interleave_chroma,
425                            int index)
426 {
427     struct gen8_surface_state2 *ss2;
428     dri_bo *ss2_bo;
429     unsigned int tiling;
430     unsigned int swizzle;
431
432     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
433     ss2_bo = pp_context->surface_state_binding_table.bo;
434     assert(ss2_bo);
435
436     dri_bo_map(ss2_bo, True);
437     assert(ss2_bo->virtual);
438     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
439     memset(ss2, 0, sizeof(*ss2));
440     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
441     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
442     ss2->ss1.width = width - 1;
443     ss2->ss1.height = height - 1;
444     ss2->ss2.pitch = wpitch - 1;
445     ss2->ss2.interleave_chroma = interleave_chroma;
446     ss2->ss2.surface_format = format;
447     ss2->ss3.x_offset_for_cb = xoffset;
448     ss2->ss3.y_offset_for_cb = yoffset;
449     gen8_pp_set_surface2_tiling(ss2, tiling);
450     dri_bo_emit_reloc(ss2_bo,
451                       I915_GEM_DOMAIN_RENDER, 0,
452                       surf_bo_offset,
453                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
454                       surf_bo);
455     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
456     dri_bo_unmap(ss2_bo);
457 }
458
459 static void
460 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
461                                      const struct i965_surface *surface,
462                                      int base_index, int is_target,
463                                      const VARectangle *rect,
464                                      int *width, int *height, int *pitch, int *offset)
465 {
466     struct object_surface *obj_surface;
467     struct object_image *obj_image;
468     dri_bo *bo;
469     int fourcc = pp_get_surface_fourcc(ctx, surface);
470     const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
471
472     if (fourcc_info == NULL)
473         return;
474
475     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
476         obj_surface = (struct object_surface *)surface->base;
477         bo = obj_surface->bo;
478         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
479         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
480         pitch[0] = obj_surface->width;
481         offset[0] = 0;
482
483         if (fourcc_info->num_planes == 1 && is_target)
484             width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
485
486         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
487         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
488         pitch[1] = obj_surface->cb_cr_pitch;
489         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
490
491         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
492         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
493         pitch[2] = obj_surface->cb_cr_pitch;
494         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
495     } else {
496         int U = 0, V = 0;
497
498         /* FIXME: add support for ARGB/ABGR image */
499         obj_image = (struct object_image *)surface->base;
500         bo = obj_image->bo;
501         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
502         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
503         pitch[0] = obj_image->image.pitches[0];
504         offset[0] = obj_image->image.offsets[0];
505
506         if (fourcc_info->num_planes == 1) {
507             if (is_target)
508                 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
509         } else if (fourcc_info->num_planes == 2) {
510             U = 1, V = 1;
511         } else {
512             assert(fourcc_info->num_components == 3);
513
514             U = fourcc_info->components[1].plane;
515             V = fourcc_info->components[2].plane;
516             assert((U == 1 && V == 2) ||
517                    (U == 2 && V == 1));
518         }
519
520         /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
521         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
522         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
523         pitch[1] = obj_image->image.pitches[U];
524         offset[1] = obj_image->image.offsets[U];
525
526         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
527         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
528         pitch[2] = obj_image->image.pitches[V];
529         offset[2] = obj_image->image.offsets[V];
530     }
531
532     if (is_target) {
533         gen8_pp_set_surface_state(ctx, pp_context,
534                                   bo, 0,
535                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
536                                   I965_SURFACEFORMAT_R8_UINT,
537                                   base_index, 1);
538
539         if (fourcc_info->num_planes == 2) {
540             gen8_pp_set_surface_state(ctx, pp_context,
541                                       bo, offset[1],
542                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
543                                       I965_SURFACEFORMAT_R8G8_SINT,
544                                       base_index + 1, 1);
545         } else if (fourcc_info->num_planes == 3) {
546             gen8_pp_set_surface_state(ctx, pp_context,
547                                       bo, offset[1],
548                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
549                                       I965_SURFACEFORMAT_R8_SINT,
550                                       base_index + 1, 1);
551             gen8_pp_set_surface_state(ctx, pp_context,
552                                       bo, offset[2],
553                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
554                                       I965_SURFACEFORMAT_R8_SINT,
555                                       base_index + 2, 1);
556         }
557
558         if (fourcc_info->format == I965_COLOR_RGB) {
559             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
560             /* the format is MSB: X-B-G-R */
561             pp_static_parameter->grf2.save_avs_rgb_swap = 0;
562             if ((fourcc == VA_FOURCC_BGRA) ||
563                 (fourcc == VA_FOURCC_BGRX)) {
564                 /* It is stored as MSB: X-R-G-B */
565                 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
566             }
567         }
568     } else {
569         int format0 = SURFACE_FORMAT_Y8_UNORM;
570
571         switch (fourcc) {
572         case VA_FOURCC_YUY2:
573             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
574             break;
575
576         case VA_FOURCC_UYVY:
577             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
578             break;
579
580         default:
581             break;
582         }
583
584         if (fourcc_info->format == I965_COLOR_RGB) {
585             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
586             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
587             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
588             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
589             if ((fourcc == VA_FOURCC_BGRA) ||
590                 (fourcc == VA_FOURCC_BGRX)) {
591                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
592             }
593         }
594
595         gen8_pp_set_surface2_state(ctx, pp_context,
596                                    bo, offset[0],
597                                    width[0], height[0], pitch[0],
598                                    0, 0,
599                                    format0, 0,
600                                    base_index);
601
602         if (fourcc_info->num_planes == 2) {
603             gen8_pp_set_surface2_state(ctx, pp_context,
604                                        bo, offset[1],
605                                        width[1], height[1], pitch[1],
606                                        0, 0,
607                                        SURFACE_FORMAT_R8B8_UNORM, 0,
608                                        base_index + 1);
609         } else if (fourcc_info->num_planes == 3) {
610             gen8_pp_set_surface2_state(ctx, pp_context,
611                                        bo, offset[1],
612                                        width[1], height[1], pitch[1],
613                                        0, 0,
614                                        SURFACE_FORMAT_R8_UNORM, 0,
615                                        base_index + 1);
616             gen8_pp_set_surface2_state(ctx, pp_context,
617                                        bo, offset[2],
618                                        width[2], height[2], pitch[2],
619                                        0, 0,
620                                        SURFACE_FORMAT_R8_UNORM, 0,
621                                        base_index + 2);
622         }
623     }
624 }
625
626 static int
627 pp_null_x_steps(void *private_context)
628 {
629     return 1;
630 }
631
632 static int
633 pp_null_y_steps(void *private_context)
634 {
635     return 1;
636 }
637
638 static int
639 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
640 {
641     return 0;
642 }
643
644 VAStatus
645 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
646                    const struct i965_surface *src_surface,
647                    const VARectangle *src_rect,
648                    struct i965_surface *dst_surface,
649                    const VARectangle *dst_rect,
650                    void *filter_param)
651 {
652     /* private function & data */
653     pp_context->pp_x_steps = pp_null_x_steps;
654     pp_context->pp_y_steps = pp_null_y_steps;
655     pp_context->private_context = NULL;
656     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
657
658     dst_surface->flags = src_surface->flags;
659
660     return VA_STATUS_SUCCESS;
661 }
662
663 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
664 {
665     int i, dst_width_adjust;
666     /* x offset of dest surface must be dword aligned.
667      * so we have to extend dst surface on left edge, and mask out pixels not interested
668      */
669     if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
670         pp_context->block_horizontal_mask_left = 0;
671         for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
672         {
673             pp_context->block_horizontal_mask_left |= 1<<i;
674         }
675     }
676     else {
677         pp_context->block_horizontal_mask_left = 0xffff;
678     }
679
680     dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
681     if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
682         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
683     }
684     else {
685         pp_context->block_horizontal_mask_right = 0xffff;
686     }
687
688     if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
689         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
690     }
691     else {
692         pp_context->block_vertical_mask_bottom = 0xff;
693     }
694
695 }
696
697 static int
698 gen7_pp_avs_x_steps(void *private_context)
699 {
700     struct pp_avs_context *pp_avs_context = private_context;
701
702     return pp_avs_context->dest_w / 16;
703 }
704
705 static int
706 gen7_pp_avs_y_steps(void *private_context)
707 {
708     struct pp_avs_context *pp_avs_context = private_context;
709
710     return pp_avs_context->dest_h / 16;
711 }
712
713 static int
714 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
715 {
716     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
717     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
718
719     pp_inline_parameter->grf9.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
720     pp_inline_parameter->grf9.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
721     pp_inline_parameter->grf9.constant_0 = 0xffffffff;
722     pp_inline_parameter->grf9.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
723
724     return 0;
725 }
726
727 static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
728                                               struct i965_post_processing_context *pp_context,
729                                               const struct i965_surface *surface)
730 {
731     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
732     int fourcc = pp_get_surface_fourcc(ctx, surface);
733
734     if (fourcc == VA_FOURCC_YUY2) {
735         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
736         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
737         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
738     } else if (fourcc == VA_FOURCC_UYVY) {
739         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
740         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
741         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
742     }
743 }
744
745 static const AVSConfig gen8_avs_config = {
746     .coeff_frac_bits = 6,
747     .coeff_epsilon = 1.0f / (1U << 6),
748     .num_phases = 16,
749     .num_luma_coeffs = 8,
750     .num_chroma_coeffs = 4,
751
752     .coeff_range = {
753         .lower_bound = {
754             .y_k_h = { -2, -2, -2, -2, -2, -2, -2, -2 },
755             .y_k_v = { -2, -2, -2, -2, -2, -2, -2, -2 },
756             .uv_k_h = { -1, -2, -2, -1 },
757             .uv_k_v = { -1, -2, -2, -1 },
758         },
759         .upper_bound = {
760             .y_k_h = { 2, 2, 2, 2, 2, 2, 2, 2 },
761             .y_k_v = { 2, 2, 2, 2, 2, 2, 2, 2 },
762             .uv_k_h = { 1, 2, 2, 1 },
763             .uv_k_v = { 1, 2, 2, 1 },
764         },
765     },
766 };
767
768 static int
769 gen8_pp_get_8tap_filter_mode(VADriverContextP ctx,
770                              const struct i965_surface *surface)
771 {
772     int fourcc = pp_get_surface_fourcc(ctx, surface);
773
774     if (fourcc == VA_FOURCC_YUY2 ||
775         fourcc == VA_FOURCC_UYVY)
776         return 1;
777     else
778         return 3;
779 }
780
781 VAStatus
782 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
783                            const struct i965_surface *src_surface,
784                            const VARectangle *src_rect,
785                            struct i965_surface *dst_surface,
786                            const VARectangle *dst_rect,
787                            void *filter_param)
788 {
789 /* TODO: Add the sampler_8x8 state */
790     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
791     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
792     struct gen8_sampler_8x8_avs *sampler_8x8;
793     int i;
794     int width[3], height[3], pitch[3], offset[3];
795     int src_width, src_height;
796     unsigned char *cc_ptr;
797     AVSState * const avs = &pp_avs_context->state;
798     float sx, sy;
799     const float * yuv_to_rgb_coefs;
800     size_t yuv_to_rgb_coefs_size;
801
802     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
803
804     /* source surface */
805     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
806                                          src_rect,
807                                          width, height, pitch, offset);
808     src_height = height[0];
809     src_width  = width[0];
810
811     /* destination surface */
812     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
813                                          dst_rect,
814                                          width, height, pitch, offset);
815
816     /* sampler 8x8 state */
817     dri_bo_map(pp_context->dynamic_state.bo, True);
818     assert(pp_context->dynamic_state.bo->virtual);
819
820     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
821                         pp_context->sampler_offset;
822     /* Currently only one gen8 sampler_8x8 is initialized */
823     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
824     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
825
826     sampler_8x8->dw0.gain_factor = 44;
827     sampler_8x8->dw0.weak_edge_threshold = 1;
828     sampler_8x8->dw0.strong_edge_threshold = 8;
829     /* Use the value like that on Ivy instead of default
830      * sampler_8x8->dw0.r3x_coefficient = 5;
831      */
832     sampler_8x8->dw0.r3x_coefficient = 27;
833     sampler_8x8->dw0.r3c_coefficient = 5;
834
835     sampler_8x8->dw2.global_noise_estimation = 255;
836     sampler_8x8->dw2.non_edge_weight = 1;
837     sampler_8x8->dw2.regular_weight = 2;
838     sampler_8x8->dw2.strong_edge_weight = 7;
839     /* Use the value like that on Ivy instead of default
840      * sampler_8x8->dw2.r5x_coefficient = 7;
841      * sampler_8x8->dw2.r5cx_coefficient = 7;
842      * sampler_8x8->dw2.r5c_coefficient = 7;
843      */
844     sampler_8x8->dw2.r5x_coefficient = 9;
845     sampler_8x8->dw2.r5cx_coefficient = 8;
846     sampler_8x8->dw2.r5c_coefficient = 3;
847
848     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
849     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
850     sampler_8x8->dw3.sat_max = 0x1f;
851     sampler_8x8->dw3.hue_max = 14;
852     /* The 8tap filter will determine whether the adaptive Filter is
853      * applied for all channels(dw153).
854      * If the 8tap filter is disabled, the adaptive filter should be disabled.
855      * Only when 8tap filter is enabled, it can be enabled or not.
856      */
857     sampler_8x8->dw3.enable_8tap_filter = gen8_pp_get_8tap_filter_mode(ctx, src_surface);
858     sampler_8x8->dw3.ief4_smooth_enable = 0;
859
860     sampler_8x8->dw4.s3u = 0;
861     sampler_8x8->dw4.diamond_margin = 4;
862     sampler_8x8->dw4.vy_std_enable = 0;
863     sampler_8x8->dw4.umid = 110;
864     sampler_8x8->dw4.vmid = 154;
865
866     sampler_8x8->dw5.diamond_dv = 0;
867     sampler_8x8->dw5.diamond_th = 35;
868     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
869     sampler_8x8->dw5.hs_margin = 3;
870     sampler_8x8->dw5.diamond_du = 2;
871
872     sampler_8x8->dw6.y_point1 = 46;
873     sampler_8x8->dw6.y_point2 = 47;
874     sampler_8x8->dw6.y_point3 = 254;
875     sampler_8x8->dw6.y_point4 = 255;
876
877     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
878
879     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
880     sampler_8x8->dw8.p0l = 46;
881     sampler_8x8->dw8.p1l = 216;
882
883     sampler_8x8->dw9.p2l = 236;
884     sampler_8x8->dw9.p3l = 236;
885     sampler_8x8->dw9.b0l = 133;
886     sampler_8x8->dw9.b1l = 130;
887
888     sampler_8x8->dw10.b2l = 130;
889     sampler_8x8->dw10.b3l = 130;
890     /* s0l = -5 / 256. s2.8 */
891     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
892     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
893
894     sampler_8x8->dw11.s1l = 0;
895     sampler_8x8->dw11.s2l = 0;
896
897     sampler_8x8->dw12.s3l = 0;
898     sampler_8x8->dw12.p0u = 46;
899     sampler_8x8->dw12.p1u = 66;
900     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
901
902     sampler_8x8->dw13.p2u = 130;
903     sampler_8x8->dw13.p3u = 236;
904     sampler_8x8->dw13.b0u = 143;
905     sampler_8x8->dw13.b1u = 163;
906
907     sampler_8x8->dw14.b2u = 200;
908     sampler_8x8->dw14.b3u = 140;
909     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
910
911     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
912     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
913
914     sx = (float)dst_rect->width / src_rect->width;
915     sy = (float)dst_rect->height / src_rect->height;
916     avs_update_coefficients(avs, sx, sy, pp_context->filter_flags);
917
918     assert(avs->config->num_phases >= 16);
919     for (i = 0; i <= 16; i++) {
920         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
921             &sampler_8x8->coefficients[i];
922         const AVSCoeffs * const coeffs = &avs->coeffs[i];
923
924         sampler_8x8_state->dw0.table_0x_filter_c0 =
925             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
926         sampler_8x8_state->dw0.table_0y_filter_c0 =
927             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
928         sampler_8x8_state->dw0.table_0x_filter_c1 =
929             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
930         sampler_8x8_state->dw0.table_0y_filter_c1 =
931             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
932
933         sampler_8x8_state->dw1.table_0x_filter_c2 =
934             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
935         sampler_8x8_state->dw1.table_0y_filter_c2 =
936             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
937         sampler_8x8_state->dw1.table_0x_filter_c3 =
938             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
939         sampler_8x8_state->dw1.table_0y_filter_c3 =
940             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
941
942         sampler_8x8_state->dw2.table_0x_filter_c4 =
943             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
944         sampler_8x8_state->dw2.table_0y_filter_c4 =
945             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
946         sampler_8x8_state->dw2.table_0x_filter_c5 =
947             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
948         sampler_8x8_state->dw2.table_0y_filter_c5 =
949             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
950
951         sampler_8x8_state->dw3.table_0x_filter_c6 =
952             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
953         sampler_8x8_state->dw3.table_0y_filter_c6 =
954             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
955         sampler_8x8_state->dw3.table_0x_filter_c7 =
956             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
957         sampler_8x8_state->dw3.table_0y_filter_c7 =
958             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
959
960         sampler_8x8_state->dw4.pad0 = 0;
961         sampler_8x8_state->dw5.pad0 = 0;
962         sampler_8x8_state->dw4.table_1x_filter_c2 =
963             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
964         sampler_8x8_state->dw4.table_1x_filter_c3 =
965             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
966         sampler_8x8_state->dw5.table_1x_filter_c4 =
967             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
968         sampler_8x8_state->dw5.table_1x_filter_c5 =
969             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
970
971         sampler_8x8_state->dw6.pad0 =
972         sampler_8x8_state->dw7.pad0 =
973         sampler_8x8_state->dw6.table_1y_filter_c2 =
974             intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
975         sampler_8x8_state->dw6.table_1y_filter_c3 =
976             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
977         sampler_8x8_state->dw7.table_1y_filter_c4 =
978             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
979         sampler_8x8_state->dw7.table_1y_filter_c5 =
980             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
981     }
982
983     sampler_8x8->dw152.default_sharpness_level =
984         -avs_is_needed(pp_context->filter_flags);
985     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
986     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
987     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
988
989     for ( ; i <= avs->config->num_phases; i++) {
990         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
991             &sampler_8x8->coefficients1[i - 17];
992         const AVSCoeffs * const coeffs = &avs->coeffs[i];
993
994         sampler_8x8_state->dw0.table_0x_filter_c0 =
995             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
996         sampler_8x8_state->dw0.table_0y_filter_c0 =
997             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
998         sampler_8x8_state->dw0.table_0x_filter_c1 =
999             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
1000         sampler_8x8_state->dw0.table_0y_filter_c1 =
1001             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
1002
1003         sampler_8x8_state->dw1.table_0x_filter_c2 =
1004             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
1005         sampler_8x8_state->dw1.table_0y_filter_c2 =
1006             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
1007         sampler_8x8_state->dw1.table_0x_filter_c3 =
1008             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
1009         sampler_8x8_state->dw1.table_0y_filter_c3 =
1010             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
1011
1012         sampler_8x8_state->dw2.table_0x_filter_c4 =
1013             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
1014         sampler_8x8_state->dw2.table_0y_filter_c4 =
1015             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
1016         sampler_8x8_state->dw2.table_0x_filter_c5 =
1017             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
1018         sampler_8x8_state->dw2.table_0y_filter_c5 =
1019             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
1020
1021         sampler_8x8_state->dw3.table_0x_filter_c6 =
1022             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
1023         sampler_8x8_state->dw3.table_0y_filter_c6 =
1024             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
1025         sampler_8x8_state->dw3.table_0x_filter_c7 =
1026             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
1027         sampler_8x8_state->dw3.table_0y_filter_c7 =
1028             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
1029
1030         sampler_8x8_state->dw4.pad0 = 0;
1031         sampler_8x8_state->dw5.pad0 = 0;
1032         sampler_8x8_state->dw4.table_1x_filter_c2 =
1033             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
1034         sampler_8x8_state->dw4.table_1x_filter_c3 =
1035             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
1036         sampler_8x8_state->dw5.table_1x_filter_c4 =
1037             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
1038         sampler_8x8_state->dw5.table_1x_filter_c5 =
1039             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
1040
1041         sampler_8x8_state->dw6.pad0 =
1042         sampler_8x8_state->dw7.pad0 =
1043         sampler_8x8_state->dw6.table_1y_filter_c2 =
1044             intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
1045         sampler_8x8_state->dw6.table_1y_filter_c3 =
1046             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
1047         sampler_8x8_state->dw7.table_1y_filter_c4 =
1048             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
1049         sampler_8x8_state->dw7.table_1y_filter_c5 =
1050             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
1051     }
1052
1053     dri_bo_unmap(pp_context->dynamic_state.bo);
1054
1055
1056     /* private function & data */
1057     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
1058     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
1059     pp_context->private_context = &pp_context->pp_avs_context;
1060     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
1061
1062     int dst_left_edge_extend = dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
1063     pp_avs_context->dest_x = dst_rect->x - dst_left_edge_extend;
1064     pp_avs_context->dest_y = dst_rect->y;
1065     pp_avs_context->dest_w = ALIGN(dst_rect->width + dst_left_edge_extend, 16);
1066     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
1067     pp_avs_context->src_w = src_rect->width;
1068     pp_avs_context->src_h = src_rect->height;
1069     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
1070
1071     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
1072     dw = MAX(dw, dst_rect->width + dst_left_edge_extend);
1073
1074     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
1075     pp_static_parameter->grf2.avs_wa_enable = 0; /* It is not required on GEN8+ */
1076     pp_static_parameter->grf2.alpha = 255;
1077
1078     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
1079     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
1080     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
1081         (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
1082     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
1083         (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
1084
1085     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
1086
1087     yuv_to_rgb_coefs = i915_color_standard_to_coefs (i915_filter_to_color_standard (src_surface->flags &
1088                                                                                     VA_SRC_COLOR_MASK),
1089                                                      &yuv_to_rgb_coefs_size);
1090     memcpy(&pp_static_parameter->grf7, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
1091
1092     dst_surface->flags = src_surface->flags;
1093
1094     return VA_STATUS_SUCCESS;
1095 }
1096
1097 VAStatus
1098 gen8_pp_initialize(
1099     VADriverContextP   ctx,
1100     struct i965_post_processing_context *pp_context,
1101     const struct i965_surface *src_surface,
1102     const VARectangle *src_rect,
1103     struct i965_surface *dst_surface,
1104     const VARectangle *dst_rect,
1105     int                pp_index,
1106     void * filter_param
1107 )
1108 {
1109     VAStatus va_status;
1110     struct i965_driver_data *i965 = i965_driver_data(ctx);
1111     dri_bo *bo;
1112     int bo_size;
1113     unsigned int end_offset;
1114     struct pp_module *pp_module;
1115     int static_param_size, inline_param_size;
1116
1117     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1118     bo = dri_bo_alloc(i965->intel.bufmgr,
1119                       "surface state & binding table",
1120                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1121                       4096);
1122     assert(bo);
1123     pp_context->surface_state_binding_table.bo = bo;
1124
1125     pp_context->idrt.num_interface_descriptors = 0;
1126
1127     pp_context->sampler_size = 4 * 4096;
1128
1129     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1130                 + pp_context->idrt_size;
1131
1132     dri_bo_unreference(pp_context->dynamic_state.bo);
1133     bo = dri_bo_alloc(i965->intel.bufmgr,
1134                       "dynamic_state",
1135                       bo_size,
1136                       4096);
1137
1138     assert(bo);
1139     pp_context->dynamic_state.bo = bo;
1140     pp_context->dynamic_state.bo_size = bo_size;
1141
1142     end_offset = 0;
1143     pp_context->dynamic_state.end_offset = 0;
1144
1145     /* Constant buffer offset */
1146     pp_context->curbe_offset = ALIGN(end_offset, 64);
1147     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1148
1149     /* Interface descriptor offset */
1150     pp_context->idrt_offset = ALIGN(end_offset, 64);
1151     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1152
1153     /* Sampler state offset */
1154     pp_context->sampler_offset = ALIGN(end_offset, 64);
1155     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1156
1157     /* update the end offset of dynamic_state */
1158     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1159
1160     static_param_size = sizeof(struct gen7_pp_static_parameter);
1161     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1162
1163     memset(pp_context->pp_static_parameter, 0, static_param_size);
1164     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1165
1166     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1167     pp_context->current_pp = pp_index;
1168     pp_module = &pp_context->pp_modules[pp_index];
1169
1170     if (pp_module->initialize)
1171         va_status = pp_module->initialize(ctx, pp_context,
1172                                           src_surface,
1173                                           src_rect,
1174                                           dst_surface,
1175                                           dst_rect,
1176                                           filter_param);
1177     else
1178         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1179
1180     calculate_boundary_block_mask(pp_context, dst_rect);
1181
1182     return va_status;
1183 }
1184
1185 static void
1186 gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
1187                                    struct i965_post_processing_context *pp_context)
1188 {
1189     struct gen8_interface_descriptor_data *desc;
1190     dri_bo *bo;
1191     int pp_index = pp_context->current_pp;
1192     unsigned char *cc_ptr;
1193
1194     bo = pp_context->dynamic_state.bo;
1195
1196     dri_bo_map(bo, 1);
1197     assert(bo->virtual);
1198     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1199
1200     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1201                 pp_context->idrt.num_interface_descriptors;
1202
1203     memset(desc, 0, sizeof(*desc));
1204     desc->desc0.kernel_start_pointer =
1205                 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1206     desc->desc2.single_program_flow = 1;
1207     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1208     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
1209     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1210     desc->desc4.binding_table_entry_count = 0;
1211     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1212     desc->desc5.constant_urb_entry_read_offset = 0;
1213
1214     desc->desc5.constant_urb_entry_read_length = 8; /* grf 1-8 */
1215
1216     dri_bo_unmap(bo);
1217     pp_context->idrt.num_interface_descriptors++;
1218 }
1219
1220
1221 static void
1222 gen8_pp_upload_constants(VADriverContextP ctx,
1223                          struct i965_post_processing_context *pp_context)
1224 {
1225     unsigned char *constant_buffer;
1226     int param_size;
1227
1228     assert(sizeof(struct gen7_pp_static_parameter) == 256);
1229
1230     param_size = sizeof(struct gen7_pp_static_parameter);
1231
1232     dri_bo_map(pp_context->dynamic_state.bo, 1);
1233     assert(pp_context->dynamic_state.bo->virtual);
1234     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1235                         pp_context->curbe_offset;
1236
1237     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1238     dri_bo_unmap(pp_context->dynamic_state.bo);
1239     return;
1240 }
1241
1242 void
1243 gen8_pp_states_setup(VADriverContextP ctx,
1244                      struct i965_post_processing_context *pp_context)
1245 {
1246     gen8_pp_interface_descriptor_table(ctx, pp_context);
1247     gen8_pp_upload_constants(ctx, pp_context);
1248 }
1249
1250 static void
1251 gen6_pp_pipeline_select(VADriverContextP ctx,
1252                         struct i965_post_processing_context *pp_context)
1253 {
1254     struct intel_batchbuffer *batch = pp_context->batch;
1255
1256     BEGIN_BATCH(batch, 1);
1257     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1258     ADVANCE_BATCH(batch);
1259 }
1260
1261 static void
1262 gen8_pp_state_base_address(VADriverContextP ctx,
1263                            struct i965_post_processing_context *pp_context)
1264 {
1265     struct intel_batchbuffer *batch = pp_context->batch;
1266
1267     BEGIN_BATCH(batch, 16);
1268     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1269         /* DW1 Generate state address */
1270     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1271         OUT_BATCH(batch, 0);
1272         OUT_BATCH(batch, 0);
1273
1274         /* DW4-5. Surface state address */
1275     OUT_RELOC64(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1276
1277         /* DW6-7. Dynamic state address */
1278     OUT_RELOC64(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1279                 0, 0 | BASE_ADDRESS_MODIFY);
1280
1281         /* DW8. Indirect object address */
1282     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1283         OUT_BATCH(batch, 0);
1284
1285         /* DW10-11. Instruction base address */
1286     OUT_RELOC64(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1287
1288     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1289     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1290     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1291     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1292     ADVANCE_BATCH(batch);
1293 }
1294
1295 void
1296 gen8_pp_vfe_state(VADriverContextP ctx,
1297                   struct i965_post_processing_context *pp_context)
1298 {
1299     struct intel_batchbuffer *batch = pp_context->batch;
1300
1301     BEGIN_BATCH(batch, 9);
1302     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1303     OUT_BATCH(batch, 0);
1304     OUT_BATCH(batch, 0);
1305     OUT_BATCH(batch,
1306               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1307               pp_context->vfe_gpu_state.num_urb_entries << 8);
1308     OUT_BATCH(batch, 0);
1309     OUT_BATCH(batch,
1310               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1311                 /* URB Entry Allocation Size, in 256 bits unit */
1312               (pp_context->vfe_gpu_state.curbe_allocation_size));
1313                 /* CURBE Allocation Size, in 256 bits unit */
1314     OUT_BATCH(batch, 0);
1315     OUT_BATCH(batch, 0);
1316     OUT_BATCH(batch, 0);
1317     ADVANCE_BATCH(batch);
1318 }
1319
1320 void
1321 gen8_interface_descriptor_load(VADriverContextP ctx,
1322                                struct i965_post_processing_context *pp_context)
1323 {
1324     struct intel_batchbuffer *batch = pp_context->batch;
1325
1326     BEGIN_BATCH(batch, 6);
1327
1328     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1329     OUT_BATCH(batch, 0);
1330
1331     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1332     OUT_BATCH(batch, 0);
1333     OUT_BATCH(batch,
1334               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1335     OUT_BATCH(batch, pp_context->idrt_offset);
1336     ADVANCE_BATCH(batch);
1337 }
1338
1339 void
1340 gen8_pp_curbe_load(VADriverContextP ctx,
1341                    struct i965_post_processing_context *pp_context)
1342 {
1343     struct intel_batchbuffer *batch = pp_context->batch;
1344     int param_size = 64;
1345
1346     param_size = sizeof(struct gen7_pp_static_parameter);
1347
1348     BEGIN_BATCH(batch, 4);
1349     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1350     OUT_BATCH(batch, 0);
1351     OUT_BATCH(batch,
1352               param_size);
1353     OUT_BATCH(batch, pp_context->curbe_offset);
1354     ADVANCE_BATCH(batch);
1355 }
1356
1357 void
1358 gen8_pp_object_walker(VADriverContextP ctx,
1359                       struct i965_post_processing_context *pp_context)
1360 {
1361     struct i965_driver_data *i965 = i965_driver_data(ctx);
1362     struct intel_batchbuffer *batch = pp_context->batch;
1363     int x, x_steps, y, y_steps;
1364     int param_size, command_length_in_dws, extra_cmd_in_dws;
1365     dri_bo *command_buffer;
1366     unsigned int *command_ptr;
1367
1368     param_size = sizeof(struct gen7_pp_inline_parameter);
1369
1370     x_steps = pp_context->pp_x_steps(pp_context->private_context);
1371     y_steps = pp_context->pp_y_steps(pp_context->private_context);
1372     command_length_in_dws = 6 + (param_size >> 2);
1373     extra_cmd_in_dws = 2;
1374     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1375                                   "command objects buffer",
1376                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1377                                   4096);
1378
1379     dri_bo_map(command_buffer, 1);
1380     command_ptr = command_buffer->virtual;
1381
1382     for (y = 0; y < y_steps; y++) {
1383         for (x = 0; x < x_steps; x++) {
1384             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1385
1386                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1387                 *command_ptr++ = 0;
1388                 *command_ptr++ = 0;
1389                 *command_ptr++ = 0;
1390                 *command_ptr++ = 0;
1391                 *command_ptr++ = 0;
1392                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1393                 command_ptr += (param_size >> 2);
1394
1395                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1396                 *command_ptr++ = 0;
1397             }
1398         }
1399     }
1400
1401     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1402         *command_ptr++ = 0;
1403
1404     *command_ptr++ = MI_BATCH_BUFFER_END;
1405     *command_ptr++ = 0;
1406
1407     dri_bo_unmap(command_buffer);
1408
1409     BEGIN_BATCH(batch, 3);
1410     OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1411     OUT_RELOC(batch, command_buffer,
1412               I915_GEM_DOMAIN_COMMAND, 0, 0);
1413     OUT_BATCH(batch, 0);
1414     ADVANCE_BATCH(batch);
1415
1416     dri_bo_unreference(command_buffer);
1417
1418     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1419      * will cause control to pass back to ring buffer
1420      */
1421     intel_batchbuffer_end_atomic(batch);
1422     intel_batchbuffer_flush(batch);
1423     intel_batchbuffer_start_atomic(batch, 0x1000);
1424 }
1425
1426 static void
1427 gen8_pp_pipeline_setup(VADriverContextP ctx,
1428                        struct i965_post_processing_context *pp_context)
1429 {
1430     struct intel_batchbuffer *batch = pp_context->batch;
1431
1432     intel_batchbuffer_start_atomic(batch, 0x1000);
1433     intel_batchbuffer_emit_mi_flush(batch);
1434     gen6_pp_pipeline_select(ctx, pp_context);
1435     gen8_pp_state_base_address(ctx, pp_context);
1436     gen8_pp_vfe_state(ctx, pp_context);
1437     gen8_pp_curbe_load(ctx, pp_context);
1438     gen8_interface_descriptor_load(ctx, pp_context);
1439     gen8_pp_vfe_state(ctx, pp_context);
1440     gen8_pp_object_walker(ctx, pp_context);
1441     intel_batchbuffer_end_atomic(batch);
1442 }
1443
1444 static VAStatus
1445 gen8_post_processing(
1446     VADriverContextP   ctx,
1447     struct i965_post_processing_context *pp_context,
1448     const struct i965_surface *src_surface,
1449     const VARectangle *src_rect,
1450     struct i965_surface *dst_surface,
1451     const VARectangle *dst_rect,
1452     int                pp_index,
1453     void * filter_param
1454 )
1455 {
1456     VAStatus va_status;
1457
1458     va_status = gen8_pp_initialize(ctx, pp_context,
1459                                    src_surface,
1460                                    src_rect,
1461                                    dst_surface,
1462                                    dst_rect,
1463                                    pp_index,
1464                                    filter_param);
1465
1466     if (va_status == VA_STATUS_SUCCESS) {
1467         gen8_pp_states_setup(ctx, pp_context);
1468         gen8_pp_pipeline_setup(ctx, pp_context);
1469     }
1470
1471     return va_status;
1472 }
1473
1474 static void
1475 gen8_post_processing_context_finalize(VADriverContextP ctx,
1476     struct i965_post_processing_context *pp_context)
1477 {
1478     if(pp_context->vebox_proc_ctx){
1479        gen75_vebox_context_destroy(ctx,pp_context->vebox_proc_ctx);
1480        pp_context->vebox_proc_ctx = NULL;
1481     }
1482
1483     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1484     pp_context->surface_state_binding_table.bo = NULL;
1485
1486     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1487     pp_context->pp_dn_context.stmm_bo = NULL;
1488
1489     if (pp_context->instruction_state.bo) {
1490         dri_bo_unreference(pp_context->instruction_state.bo);
1491         pp_context->instruction_state.bo = NULL;
1492     }
1493
1494     if (pp_context->indirect_state.bo) {
1495         dri_bo_unreference(pp_context->indirect_state.bo);
1496         pp_context->indirect_state.bo = NULL;
1497     }
1498
1499     if (pp_context->dynamic_state.bo) {
1500         dri_bo_unreference(pp_context->dynamic_state.bo);
1501         pp_context->dynamic_state.bo = NULL;
1502     }
1503
1504     free(pp_context->pp_static_parameter);
1505     free(pp_context->pp_inline_parameter);
1506     pp_context->pp_static_parameter = NULL;
1507     pp_context->pp_inline_parameter = NULL;
1508 }
1509
1510 #define VPP_CURBE_ALLOCATION_SIZE       32
1511
1512 void
1513 gen8_post_processing_context_common_init(VADriverContextP ctx,
1514                                          void *data,
1515                                          struct pp_module *pp_modules,
1516                                          int num_pp_modules,
1517                                          struct intel_batchbuffer *batch)
1518 {
1519     struct i965_driver_data *i965 = i965_driver_data(ctx);
1520     int i, kernel_size;
1521     unsigned int kernel_offset, end_offset;
1522     unsigned char *kernel_ptr;
1523     struct pp_module *pp_module;
1524     struct i965_post_processing_context *pp_context = data;
1525
1526     pp_context->vfe_gpu_state.max_num_threads = 60;
1527     pp_context->vfe_gpu_state.num_urb_entries = 59;
1528     pp_context->vfe_gpu_state.gpgpu_mode = 0;
1529     pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1530     pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1531
1532     pp_context->intel_post_processing = gen8_post_processing;
1533     pp_context->finalize = gen8_post_processing_context_finalize;
1534
1535     assert(ARRAY_ELEMS(pp_context->pp_modules) == num_pp_modules);
1536
1537     memcpy(pp_context->pp_modules, pp_modules, sizeof(pp_context->pp_modules));
1538
1539     kernel_size = 4096 ;
1540
1541     for (i = 0; i < NUM_PP_MODULES; i++) {
1542         pp_module = &pp_context->pp_modules[i];
1543
1544         if (pp_module->kernel.bin && pp_module->kernel.size) {
1545             kernel_size += pp_module->kernel.size;
1546         }
1547     }
1548
1549     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1550                                   "kernel shader",
1551                                   kernel_size,
1552                                   0x1000);
1553     if (pp_context->instruction_state.bo == NULL) {
1554         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1555         return;
1556     }
1557
1558     assert(pp_context->instruction_state.bo);
1559
1560
1561     pp_context->instruction_state.bo_size = kernel_size;
1562     pp_context->instruction_state.end_offset = 0;
1563     end_offset = 0;
1564
1565     dri_bo_map(pp_context->instruction_state.bo, 1);
1566     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1567
1568     for (i = 0; i < NUM_PP_MODULES; i++) {
1569         pp_module = &pp_context->pp_modules[i];
1570
1571         kernel_offset = ALIGN(end_offset, 64);
1572         pp_module->kernel.kernel_offset = kernel_offset;
1573
1574         if (pp_module->kernel.bin && pp_module->kernel.size) {
1575
1576             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1577             end_offset = kernel_offset + pp_module->kernel.size;
1578         }
1579     }
1580
1581     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1582
1583     dri_bo_unmap(pp_context->instruction_state.bo);
1584
1585     /* static & inline parameters */
1586     pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1587     pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1588
1589     pp_context->batch = batch;
1590
1591     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1592     pp_context->curbe_size = 256;
1593
1594 }
1595
1596 void
1597 gen8_post_processing_context_init(VADriverContextP ctx,
1598                                   void *data,
1599                                   struct intel_batchbuffer *batch)
1600 {
1601     struct i965_post_processing_context *pp_context = data;
1602
1603     gen8_post_processing_context_common_init(ctx, data, pp_modules_gen8, ARRAY_ELEMS(pp_modules_gen8), batch);
1604     avs_init_state(&pp_context->pp_avs_context.state, &gen8_avs_config);
1605 }