2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Xiang Haihao <haihao.xiang@intel.com>
32 #include "intel_batchbuffer.h"
33 #include "intel_driver.h"
35 #include "i965_drv_video.h"
36 #include "i965_gpe_utils.h"
39 i965_gpe_select(VADriverContextP ctx,
40 struct i965_gpe_context *gpe_context,
41 struct intel_batchbuffer *batch)
43 BEGIN_BATCH(batch, 1);
44 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
49 gen6_gpe_state_base_address(VADriverContextP ctx,
50 struct i965_gpe_context *gpe_context,
51 struct intel_batchbuffer *batch)
53 BEGIN_BATCH(batch, 10);
55 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
56 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General State Base Address */
58 gpe_context->surface_state_binding_table.bo,
59 I915_GEM_DOMAIN_INSTRUCTION,
61 BASE_ADDRESS_MODIFY); /* Surface state base address */
62 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic State Base Address */
63 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect Object Base Address */
64 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction Base Address */
65 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* General State Access Upper Bound */
66 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Dynamic State Access Upper Bound */
67 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Indirect Object Access Upper Bound */
68 OUT_BATCH(batch, BASE_ADDRESS_MODIFY); /* Instruction Access Upper Bound */
74 gen6_gpe_vfe_state(VADriverContextP ctx,
75 struct i965_gpe_context *gpe_context,
76 struct intel_batchbuffer *batch)
79 BEGIN_BATCH(batch, 8);
81 OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (8 - 2));
82 OUT_BATCH(batch, 0); /* Scratch Space Base Pointer and Space */
84 gpe_context->vfe_state.max_num_threads << 16 | /* Maximum Number of Threads */
85 gpe_context->vfe_state.num_urb_entries << 8 | /* Number of URB Entries */
86 gpe_context->vfe_state.gpgpu_mode << 2); /* MEDIA Mode */
87 OUT_BATCH(batch, 0); /* Debug: Object ID */
89 gpe_context->vfe_state.urb_entry_size << 16 | /* URB Entry Allocation Size */
90 gpe_context->vfe_state.curbe_allocation_size); /* CURBE Allocation Size */
91 /* the vfe_desc5/6/7 will decide whether the scoreboard is used. */
92 OUT_BATCH(batch, gpe_context->vfe_desc5.dword);
93 OUT_BATCH(batch, gpe_context->vfe_desc6.dword);
94 OUT_BATCH(batch, gpe_context->vfe_desc7.dword);
101 gen6_gpe_curbe_load(VADriverContextP ctx,
102 struct i965_gpe_context *gpe_context,
103 struct intel_batchbuffer *batch)
105 BEGIN_BATCH(batch, 4);
107 OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
109 OUT_BATCH(batch, gpe_context->curbe.length);
110 OUT_RELOC(batch, gpe_context->curbe.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
112 ADVANCE_BATCH(batch);
116 gen6_gpe_idrt(VADriverContextP ctx,
117 struct i965_gpe_context *gpe_context,
118 struct intel_batchbuffer *batch)
120 BEGIN_BATCH(batch, 4);
122 OUT_BATCH(batch, CMD_MEDIA_INTERFACE_LOAD | (4 - 2));
124 OUT_BATCH(batch, gpe_context->idrt.max_entries * gpe_context->idrt.entry_size);
125 OUT_RELOC(batch, gpe_context->idrt.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
127 ADVANCE_BATCH(batch);
131 i965_gpe_load_kernels(VADriverContextP ctx,
132 struct i965_gpe_context *gpe_context,
133 struct i965_kernel *kernel_list,
134 unsigned int num_kernels)
136 struct i965_driver_data *i965 = i965_driver_data(ctx);
139 assert(num_kernels <= MAX_GPE_KERNELS);
140 memcpy(gpe_context->kernels, kernel_list, sizeof(*kernel_list) * num_kernels);
141 gpe_context->num_kernels = num_kernels;
143 for (i = 0; i < num_kernels; i++) {
144 struct i965_kernel *kernel = &gpe_context->kernels[i];
146 kernel->bo = dri_bo_alloc(i965->intel.bufmgr,
151 dri_bo_subdata(kernel->bo, 0, kernel->size, kernel->bin);
156 i965_gpe_context_destroy(struct i965_gpe_context *gpe_context)
160 dri_bo_unreference(gpe_context->surface_state_binding_table.bo);
161 gpe_context->surface_state_binding_table.bo = NULL;
163 dri_bo_unreference(gpe_context->idrt.bo);
164 gpe_context->idrt.bo = NULL;
166 dri_bo_unreference(gpe_context->curbe.bo);
167 gpe_context->curbe.bo = NULL;
169 for (i = 0; i < gpe_context->num_kernels; i++) {
170 struct i965_kernel *kernel = &gpe_context->kernels[i];
172 dri_bo_unreference(kernel->bo);
178 i965_gpe_context_init(VADriverContextP ctx,
179 struct i965_gpe_context *gpe_context)
181 struct i965_driver_data *i965 = i965_driver_data(ctx);
184 dri_bo_unreference(gpe_context->surface_state_binding_table.bo);
185 bo = dri_bo_alloc(i965->intel.bufmgr,
186 "surface state & binding table",
187 gpe_context->surface_state_binding_table.length,
190 gpe_context->surface_state_binding_table.bo = bo;
192 dri_bo_unreference(gpe_context->idrt.bo);
193 bo = dri_bo_alloc(i965->intel.bufmgr,
194 "interface descriptor table",
195 gpe_context->idrt.entry_size * gpe_context->idrt.max_entries,
198 gpe_context->idrt.bo = bo;
200 dri_bo_unreference(gpe_context->curbe.bo);
201 bo = dri_bo_alloc(i965->intel.bufmgr,
203 gpe_context->curbe.length,
206 gpe_context->curbe.bo = bo;
210 gen6_gpe_pipeline_setup(VADriverContextP ctx,
211 struct i965_gpe_context *gpe_context,
212 struct intel_batchbuffer *batch)
214 intel_batchbuffer_emit_mi_flush(batch);
216 i965_gpe_select(ctx, gpe_context, batch);
217 gen6_gpe_state_base_address(ctx, gpe_context, batch);
218 gen6_gpe_vfe_state(ctx, gpe_context, batch);
219 gen6_gpe_curbe_load(ctx, gpe_context, batch);
220 gen6_gpe_idrt(ctx, gpe_context, batch);
224 gen8_gpe_pipeline_end(VADriverContextP ctx,
225 struct i965_gpe_context *gpe_context,
226 struct intel_batchbuffer *batch)
232 i965_gpe_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
235 case I915_TILING_NONE:
236 ss->ss3.tiled_surface = 0;
237 ss->ss3.tile_walk = 0;
240 ss->ss3.tiled_surface = 1;
241 ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
244 ss->ss3.tiled_surface = 1;
245 ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
251 i965_gpe_set_surface2_tiling(struct i965_surface_state2 *ss, unsigned int tiling)
254 case I915_TILING_NONE:
255 ss->ss2.tiled_surface = 0;
256 ss->ss2.tile_walk = 0;
259 ss->ss2.tiled_surface = 1;
260 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
263 ss->ss2.tiled_surface = 1;
264 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
270 gen7_gpe_set_surface_tiling(struct gen7_surface_state *ss, unsigned int tiling)
273 case I915_TILING_NONE:
274 ss->ss0.tiled_surface = 0;
275 ss->ss0.tile_walk = 0;
278 ss->ss0.tiled_surface = 1;
279 ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
282 ss->ss0.tiled_surface = 1;
283 ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
289 gen7_gpe_set_surface2_tiling(struct gen7_surface_state2 *ss, unsigned int tiling)
292 case I915_TILING_NONE:
293 ss->ss2.tiled_surface = 0;
294 ss->ss2.tile_walk = 0;
297 ss->ss2.tiled_surface = 1;
298 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
301 ss->ss2.tiled_surface = 1;
302 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
308 gen8_gpe_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
311 case I915_TILING_NONE:
312 ss->ss0.tiled_surface = 0;
313 ss->ss0.tile_walk = 0;
316 ss->ss0.tiled_surface = 1;
317 ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
320 ss->ss0.tiled_surface = 1;
321 ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
327 gen8_gpe_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
330 case I915_TILING_NONE:
331 ss->ss2.tiled_surface = 0;
332 ss->ss2.tile_walk = 0;
335 ss->ss2.tiled_surface = 1;
336 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
339 ss->ss2.tiled_surface = 1;
340 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
346 i965_gpe_set_surface2_state(VADriverContextP ctx,
347 struct object_surface *obj_surface,
348 struct i965_surface_state2 *ss)
351 unsigned int tiling, swizzle;
353 assert(obj_surface->bo);
354 assert(obj_surface->fourcc == VA_FOURCC_NV12);
356 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
357 w = obj_surface->orig_width;
358 h = obj_surface->orig_height;
359 w_pitch = obj_surface->width;
361 memset(ss, 0, sizeof(*ss));
363 ss->ss0.surface_base_address = obj_surface->bo->offset;
365 ss->ss1.cbcr_pixel_offset_v_direction = 2;
366 ss->ss1.width = w - 1;
367 ss->ss1.height = h - 1;
369 ss->ss2.surface_format = MFX_SURFACE_PLANAR_420_8;
370 ss->ss2.interleave_chroma = 1;
371 ss->ss2.pitch = w_pitch - 1;
372 ss->ss2.half_pitch_for_chroma = 0;
373 i965_gpe_set_surface2_tiling(ss, tiling);
374 /* ss3: UV offset for interleave mode */
375 ss->ss3.x_offset_for_cb = obj_surface->x_cb_offset;
376 ss->ss3.y_offset_for_cb = obj_surface->y_cb_offset;
380 i965_gpe_surface2_setup(VADriverContextP ctx,
381 struct i965_gpe_context *gpe_context,
382 struct object_surface *obj_surface,
383 unsigned long binding_table_offset,
384 unsigned long surface_state_offset)
386 struct i965_surface_state2 *ss;
389 bo = gpe_context->surface_state_binding_table.bo;
393 ss = (struct i965_surface_state2 *)((char *)bo->virtual + surface_state_offset);
394 i965_gpe_set_surface2_state(ctx, obj_surface, ss);
395 dri_bo_emit_reloc(bo,
396 I915_GEM_DOMAIN_RENDER, 0,
398 surface_state_offset + offsetof(struct i965_surface_state2, ss0),
401 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
406 i965_gpe_set_media_rw_surface_state(VADriverContextP ctx,
407 struct object_surface *obj_surface,
408 struct i965_surface_state *ss)
411 unsigned int tiling, swizzle;
413 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
414 w = obj_surface->orig_width;
415 h = obj_surface->orig_height;
416 w_pitch = obj_surface->width;
418 memset(ss, 0, sizeof(*ss));
420 ss->ss0.surface_type = I965_SURFACE_2D;
421 ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
423 ss->ss1.base_addr = obj_surface->bo->offset;
425 ss->ss2.width = w / 4 - 1; /* in DWORDs for media read & write message */
426 ss->ss2.height = h - 1;
428 ss->ss3.pitch = w_pitch - 1;
429 i965_gpe_set_surface_tiling(ss, tiling);
433 i965_gpe_media_rw_surface_setup(VADriverContextP ctx,
434 struct i965_gpe_context *gpe_context,
435 struct object_surface *obj_surface,
436 unsigned long binding_table_offset,
437 unsigned long surface_state_offset,
440 struct i965_surface_state *ss;
443 bo = gpe_context->surface_state_binding_table.bo;
444 dri_bo_map(bo, True);
447 ss = (struct i965_surface_state *)((char *)bo->virtual + surface_state_offset);
448 i965_gpe_set_media_rw_surface_state(ctx, obj_surface, ss);
449 dri_bo_emit_reloc(bo,
450 I915_GEM_DOMAIN_RENDER, write_enabled ? I915_GEM_DOMAIN_RENDER : 0,
452 surface_state_offset + offsetof(struct i965_surface_state, ss1),
455 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
460 i965_gpe_set_buffer_surface_state(VADriverContextP ctx,
461 struct i965_buffer_surface *buffer_surface,
462 struct i965_surface_state *ss)
466 assert(buffer_surface->bo);
467 num_entries = buffer_surface->num_blocks * buffer_surface->size_block / buffer_surface->pitch;
469 memset(ss, 0, sizeof(*ss));
471 ss->ss0.render_cache_read_mode = 1;
472 ss->ss0.surface_type = I965_SURFACE_BUFFER;
474 ss->ss1.base_addr = buffer_surface->bo->offset;
476 ss->ss2.width = ((num_entries - 1) & 0x7f);
477 ss->ss2.height = (((num_entries - 1) >> 7) & 0x1fff);
479 ss->ss3.depth = (((num_entries - 1) >> 20) & 0x7f);
480 ss->ss3.pitch = buffer_surface->pitch - 1;
484 i965_gpe_buffer_suface_setup(VADriverContextP ctx,
485 struct i965_gpe_context *gpe_context,
486 struct i965_buffer_surface *buffer_surface,
487 unsigned long binding_table_offset,
488 unsigned long surface_state_offset)
490 struct i965_surface_state *ss;
493 bo = gpe_context->surface_state_binding_table.bo;
497 ss = (struct i965_surface_state *)((char *)bo->virtual + surface_state_offset);
498 i965_gpe_set_buffer_surface_state(ctx, buffer_surface, ss);
499 dri_bo_emit_reloc(bo,
500 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
502 surface_state_offset + offsetof(struct i965_surface_state, ss1),
505 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
510 gen7_gpe_set_surface2_state(VADriverContextP ctx,
511 struct object_surface *obj_surface,
512 struct gen7_surface_state2 *ss)
515 unsigned int tiling, swizzle;
517 assert(obj_surface->bo);
518 assert(obj_surface->fourcc == VA_FOURCC_NV12);
520 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
521 w = obj_surface->orig_width;
522 h = obj_surface->orig_height;
523 w_pitch = obj_surface->width;
525 memset(ss, 0, sizeof(*ss));
527 ss->ss0.surface_base_address = obj_surface->bo->offset;
529 ss->ss1.cbcr_pixel_offset_v_direction = 2;
530 ss->ss1.width = w - 1;
531 ss->ss1.height = h - 1;
533 ss->ss2.surface_format = MFX_SURFACE_PLANAR_420_8;
534 ss->ss2.interleave_chroma = 1;
535 ss->ss2.pitch = w_pitch - 1;
536 ss->ss2.half_pitch_for_chroma = 0;
537 gen7_gpe_set_surface2_tiling(ss, tiling);
538 /* ss3: UV offset for interleave mode */
539 ss->ss3.x_offset_for_cb = obj_surface->x_cb_offset;
540 ss->ss3.y_offset_for_cb = obj_surface->y_cb_offset;
544 gen7_gpe_surface2_setup(VADriverContextP ctx,
545 struct i965_gpe_context *gpe_context,
546 struct object_surface *obj_surface,
547 unsigned long binding_table_offset,
548 unsigned long surface_state_offset)
550 struct gen7_surface_state2 *ss;
553 bo = gpe_context->surface_state_binding_table.bo;
557 ss = (struct gen7_surface_state2 *)((char *)bo->virtual + surface_state_offset);
558 gen7_gpe_set_surface2_state(ctx, obj_surface, ss);
559 dri_bo_emit_reloc(bo,
560 I915_GEM_DOMAIN_RENDER, 0,
562 surface_state_offset + offsetof(struct gen7_surface_state2, ss0),
565 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
570 gen7_gpe_set_media_rw_surface_state(VADriverContextP ctx,
571 struct object_surface *obj_surface,
572 struct gen7_surface_state *ss)
575 unsigned int tiling, swizzle;
577 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
578 w = obj_surface->orig_width;
579 h = obj_surface->orig_height;
580 w_pitch = obj_surface->width;
582 memset(ss, 0, sizeof(*ss));
584 ss->ss0.surface_type = I965_SURFACE_2D;
585 ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
587 ss->ss1.base_addr = obj_surface->bo->offset;
589 ss->ss2.width = w / 4 - 1; /* in DWORDs for media read & write message */
590 ss->ss2.height = h - 1;
592 ss->ss3.pitch = w_pitch - 1;
593 gen7_gpe_set_surface_tiling(ss, tiling);
597 gen75_gpe_set_media_chroma_surface_state(VADriverContextP ctx,
598 struct object_surface *obj_surface,
599 struct gen7_surface_state *ss)
602 unsigned int tiling, swizzle;
605 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
606 w = obj_surface->orig_width;
607 w_pitch = obj_surface->width;
609 cbcr_offset = obj_surface->height * obj_surface->width;
610 memset(ss, 0, sizeof(*ss));
612 ss->ss0.surface_type = I965_SURFACE_2D;
613 ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
615 ss->ss1.base_addr = obj_surface->bo->offset + cbcr_offset;
617 ss->ss2.width = w / 4 - 1; /* in DWORDs for media read & write message */
618 ss->ss2.height = (obj_surface->height / 2) - 1;
620 ss->ss3.pitch = w_pitch - 1;
621 gen7_gpe_set_surface_tiling(ss, tiling);
625 gen7_gpe_media_rw_surface_setup(VADriverContextP ctx,
626 struct i965_gpe_context *gpe_context,
627 struct object_surface *obj_surface,
628 unsigned long binding_table_offset,
629 unsigned long surface_state_offset,
632 struct gen7_surface_state *ss;
635 bo = gpe_context->surface_state_binding_table.bo;
636 dri_bo_map(bo, True);
639 ss = (struct gen7_surface_state *)((char *)bo->virtual + surface_state_offset);
640 gen7_gpe_set_media_rw_surface_state(ctx, obj_surface, ss);
641 dri_bo_emit_reloc(bo,
642 I915_GEM_DOMAIN_RENDER, write_enabled ? I915_GEM_DOMAIN_RENDER : 0,
644 surface_state_offset + offsetof(struct gen7_surface_state, ss1),
647 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
652 gen75_gpe_media_chroma_surface_setup(VADriverContextP ctx,
653 struct i965_gpe_context *gpe_context,
654 struct object_surface *obj_surface,
655 unsigned long binding_table_offset,
656 unsigned long surface_state_offset,
659 struct gen7_surface_state *ss;
663 assert(obj_surface->fourcc == VA_FOURCC_NV12);
664 bo = gpe_context->surface_state_binding_table.bo;
665 dri_bo_map(bo, True);
668 cbcr_offset = obj_surface->height * obj_surface->width;
669 ss = (struct gen7_surface_state *)((char *)bo->virtual + surface_state_offset);
670 gen75_gpe_set_media_chroma_surface_state(ctx, obj_surface, ss);
671 dri_bo_emit_reloc(bo,
672 I915_GEM_DOMAIN_RENDER, write_enabled ? I915_GEM_DOMAIN_RENDER : 0,
674 surface_state_offset + offsetof(struct gen7_surface_state, ss1),
677 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
683 gen7_gpe_set_buffer_surface_state(VADriverContextP ctx,
684 struct i965_buffer_surface *buffer_surface,
685 struct gen7_surface_state *ss)
689 assert(buffer_surface->bo);
690 num_entries = buffer_surface->num_blocks * buffer_surface->size_block / buffer_surface->pitch;
692 memset(ss, 0, sizeof(*ss));
694 ss->ss0.surface_type = I965_SURFACE_BUFFER;
696 ss->ss1.base_addr = buffer_surface->bo->offset;
698 ss->ss2.width = ((num_entries - 1) & 0x7f);
699 ss->ss2.height = (((num_entries - 1) >> 7) & 0x3fff);
701 ss->ss3.depth = (((num_entries - 1) >> 21) & 0x3f);
702 ss->ss3.pitch = buffer_surface->pitch - 1;
706 gen7_gpe_buffer_suface_setup(VADriverContextP ctx,
707 struct i965_gpe_context *gpe_context,
708 struct i965_buffer_surface *buffer_surface,
709 unsigned long binding_table_offset,
710 unsigned long surface_state_offset)
712 struct gen7_surface_state *ss;
715 bo = gpe_context->surface_state_binding_table.bo;
719 ss = (struct gen7_surface_state *)((char *)bo->virtual + surface_state_offset);
720 gen7_gpe_set_buffer_surface_state(ctx, buffer_surface, ss);
721 dri_bo_emit_reloc(bo,
722 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
724 surface_state_offset + offsetof(struct gen7_surface_state, ss1),
727 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
732 gen8_gpe_set_surface2_state(VADriverContextP ctx,
733 struct object_surface *obj_surface,
734 struct gen8_surface_state2 *ss)
736 struct i965_driver_data *i965 = i965_driver_data(ctx);
738 unsigned int tiling, swizzle;
740 assert(obj_surface->bo);
741 assert(obj_surface->fourcc == VA_FOURCC_NV12
742 || obj_surface->fourcc == VA_FOURCC_P010);
744 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
745 w = obj_surface->orig_width;
746 h = obj_surface->orig_height;
747 w_pitch = obj_surface->width;
749 memset(ss, 0, sizeof(*ss));
751 if (IS_GEN9(i965->intel.device_info))
752 ss->ss5.surface_object_mocs = GEN9_CACHE_PTE;
754 ss->ss6.base_addr = (uint32_t)obj_surface->bo->offset64;
755 ss->ss7.base_addr_high = (uint32_t)(obj_surface->bo->offset64 >> 32);
757 ss->ss1.cbcr_pixel_offset_v_direction = 2;
758 ss->ss1.width = w - 1;
759 ss->ss1.height = h - 1;
761 ss->ss2.surface_format = MFX_SURFACE_PLANAR_420_8;
762 ss->ss2.interleave_chroma = 1;
763 ss->ss2.pitch = w_pitch - 1;
764 ss->ss2.half_pitch_for_chroma = 0;
765 gen8_gpe_set_surface2_tiling(ss, tiling);
766 /* ss3: UV offset for interleave mode */
767 ss->ss3.x_offset_for_cb = obj_surface->x_cb_offset;
768 ss->ss3.y_offset_for_cb = obj_surface->y_cb_offset;
772 gen8_gpe_surface2_setup(VADriverContextP ctx,
773 struct i965_gpe_context *gpe_context,
774 struct object_surface *obj_surface,
775 unsigned long binding_table_offset,
776 unsigned long surface_state_offset)
778 struct gen8_surface_state2 *ss;
781 bo = gpe_context->surface_state_binding_table.bo;
785 ss = (struct gen8_surface_state2 *)((char *)bo->virtual + surface_state_offset);
786 gen8_gpe_set_surface2_state(ctx, obj_surface, ss);
787 dri_bo_emit_reloc(bo,
788 I915_GEM_DOMAIN_RENDER, 0,
790 surface_state_offset + offsetof(struct gen8_surface_state2, ss6),
793 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
798 gen8_gpe_set_media_rw_surface_state(VADriverContextP ctx,
799 struct object_surface *obj_surface,
800 struct gen8_surface_state *ss)
802 struct i965_driver_data *i965 = i965_driver_data(ctx);
804 unsigned int tiling, swizzle;
806 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
807 w = obj_surface->orig_width;
808 h = obj_surface->orig_height;
809 w_pitch = obj_surface->width;
811 memset(ss, 0, sizeof(*ss));
813 if (IS_GEN9(i965->intel.device_info))
814 ss->ss1.surface_mocs = GEN9_CACHE_PTE;
816 ss->ss0.surface_type = I965_SURFACE_2D;
817 ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
819 ss->ss8.base_addr = (uint32_t)obj_surface->bo->offset64;
820 ss->ss9.base_addr_high = (uint32_t)(obj_surface->bo->offset64 >> 32);
822 ss->ss2.width = w / 4 - 1; /* in DWORDs for media read & write message */
823 ss->ss2.height = h - 1;
825 ss->ss3.pitch = w_pitch - 1;
826 gen8_gpe_set_surface_tiling(ss, tiling);
830 gen8_gpe_set_media_chroma_surface_state(VADriverContextP ctx,
831 struct object_surface *obj_surface,
832 struct gen8_surface_state *ss)
834 struct i965_driver_data *i965 = i965_driver_data(ctx);
836 unsigned int tiling, swizzle;
838 uint64_t base_offset;
840 dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);
841 w = obj_surface->orig_width;
842 w_pitch = obj_surface->width;
844 cbcr_offset = obj_surface->height * obj_surface->width;
845 memset(ss, 0, sizeof(*ss));
847 if (IS_GEN9(i965->intel.device_info))
848 ss->ss1.surface_mocs = GEN9_CACHE_PTE;
850 ss->ss0.surface_type = I965_SURFACE_2D;
851 ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
853 base_offset = obj_surface->bo->offset64 + cbcr_offset;
854 ss->ss8.base_addr = (uint32_t) base_offset;
855 ss->ss9.base_addr_high = (uint32_t)(base_offset >> 32);
857 ss->ss2.width = w / 4 - 1; /* in DWORDs for media read & write message */
858 ss->ss2.height = (obj_surface->height / 2) - 1;
860 ss->ss3.pitch = w_pitch - 1;
861 gen8_gpe_set_surface_tiling(ss, tiling);
865 gen8_gpe_media_rw_surface_setup(VADriverContextP ctx,
866 struct i965_gpe_context *gpe_context,
867 struct object_surface *obj_surface,
868 unsigned long binding_table_offset,
869 unsigned long surface_state_offset,
872 struct gen8_surface_state *ss;
875 bo = gpe_context->surface_state_binding_table.bo;
876 dri_bo_map(bo, True);
879 ss = (struct gen8_surface_state *)((char *)bo->virtual + surface_state_offset);
880 gen8_gpe_set_media_rw_surface_state(ctx, obj_surface, ss);
881 dri_bo_emit_reloc(bo,
882 I915_GEM_DOMAIN_RENDER, write_enabled ? I915_GEM_DOMAIN_RENDER : 0,
884 surface_state_offset + offsetof(struct gen8_surface_state, ss8),
887 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
892 gen8_gpe_media_chroma_surface_setup(VADriverContextP ctx,
893 struct i965_gpe_context *gpe_context,
894 struct object_surface *obj_surface,
895 unsigned long binding_table_offset,
896 unsigned long surface_state_offset,
899 struct gen8_surface_state *ss;
903 assert(obj_surface->fourcc == VA_FOURCC_NV12
904 || obj_surface->fourcc == VA_FOURCC_P010);
905 bo = gpe_context->surface_state_binding_table.bo;
906 dri_bo_map(bo, True);
909 cbcr_offset = obj_surface->height * obj_surface->width;
910 ss = (struct gen8_surface_state *)((char *)bo->virtual + surface_state_offset);
911 gen8_gpe_set_media_chroma_surface_state(ctx, obj_surface, ss);
912 dri_bo_emit_reloc(bo,
913 I915_GEM_DOMAIN_RENDER, write_enabled ? I915_GEM_DOMAIN_RENDER : 0,
915 surface_state_offset + offsetof(struct gen8_surface_state, ss8),
918 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
924 gen8_gpe_set_buffer_surface_state(VADriverContextP ctx,
925 struct i965_buffer_surface *buffer_surface,
926 struct gen8_surface_state *ss)
928 struct i965_driver_data *i965 = i965_driver_data(ctx);
931 assert(buffer_surface->bo);
932 num_entries = buffer_surface->num_blocks * buffer_surface->size_block / buffer_surface->pitch;
934 memset(ss, 0, sizeof(*ss));
936 ss->ss0.surface_type = I965_SURFACE_BUFFER;
937 if (IS_GEN9(i965->intel.device_info))
938 ss->ss1.surface_mocs = GEN9_CACHE_PTE;
941 ss->ss8.base_addr = (uint32_t)buffer_surface->bo->offset64;
942 ss->ss9.base_addr_high = (uint32_t)(buffer_surface->bo->offset64 >> 32);
944 ss->ss2.width = ((num_entries - 1) & 0x7f);
945 ss->ss2.height = (((num_entries - 1) >> 7) & 0x3fff);
947 ss->ss3.depth = (((num_entries - 1) >> 21) & 0x3f);
948 ss->ss3.pitch = buffer_surface->pitch - 1;
952 gen8_gpe_buffer_suface_setup(VADriverContextP ctx,
953 struct i965_gpe_context *gpe_context,
954 struct i965_buffer_surface *buffer_surface,
955 unsigned long binding_table_offset,
956 unsigned long surface_state_offset)
958 struct gen8_surface_state *ss;
961 bo = gpe_context->surface_state_binding_table.bo;
965 ss = (struct gen8_surface_state *)((char *)bo->virtual + surface_state_offset);
966 gen8_gpe_set_buffer_surface_state(ctx, buffer_surface, ss);
967 dri_bo_emit_reloc(bo,
968 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
970 surface_state_offset + offsetof(struct gen8_surface_state, ss8),
973 *((unsigned int *)((char *)bo->virtual + binding_table_offset)) = surface_state_offset;
978 gen8_gpe_state_base_address(VADriverContextP ctx,
979 struct i965_gpe_context *gpe_context,
980 struct intel_batchbuffer *batch)
982 BEGIN_BATCH(batch, 16);
984 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 14);
986 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY); //General State Base Address
990 /*DW4 Surface state base address */
991 OUT_RELOC64(batch, gpe_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
993 /*DW6. Dynamic state base address */
994 if (gpe_context->dynamic_state.bo)
995 OUT_RELOC64(batch, gpe_context->dynamic_state.bo,
996 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
997 0, BASE_ADDRESS_MODIFY);
999 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1000 OUT_BATCH(batch, 0);
1004 /*DW8. Indirect Object base address */
1005 if (gpe_context->indirect_state.bo)
1006 OUT_RELOC64(batch, gpe_context->indirect_state.bo,
1007 I915_GEM_DOMAIN_SAMPLER,
1008 0, BASE_ADDRESS_MODIFY);
1010 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1011 OUT_BATCH(batch, 0);
1015 /*DW10. Instruct base address */
1016 if (gpe_context->instruction_state.bo)
1017 OUT_RELOC64(batch, gpe_context->instruction_state.bo,
1018 I915_GEM_DOMAIN_INSTRUCTION,
1019 0, BASE_ADDRESS_MODIFY);
1021 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1022 OUT_BATCH(batch, 0);
1025 /* DW12. Size limitation */
1026 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //General State Access Upper Bound
1027 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Dynamic State Access Upper Bound
1028 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Indirect Object Access Upper Bound
1029 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Instruction Access Upper Bound
1032 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY); //LLC Coherent Base Address
1033 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY ); //LLC Coherent Upper Bound
1036 ADVANCE_BATCH(batch);
1040 gen8_gpe_vfe_state(VADriverContextP ctx,
1041 struct i965_gpe_context *gpe_context,
1042 struct intel_batchbuffer *batch)
1045 BEGIN_BATCH(batch, 9);
1047 OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1048 /* Scratch Space Base Pointer and Space */
1049 OUT_BATCH(batch, 0);
1050 OUT_BATCH(batch, 0);
1053 gpe_context->vfe_state.max_num_threads << 16 | /* Maximum Number of Threads */
1054 gpe_context->vfe_state.num_urb_entries << 8 | /* Number of URB Entries */
1055 gpe_context->vfe_state.gpgpu_mode << 2); /* MEDIA Mode */
1056 OUT_BATCH(batch, 0); /* Debug: Object ID */
1058 gpe_context->vfe_state.urb_entry_size << 16 | /* URB Entry Allocation Size */
1059 gpe_context->vfe_state.curbe_allocation_size); /* CURBE Allocation Size */
1061 /* the vfe_desc5/6/7 will decide whether the scoreboard is used. */
1062 OUT_BATCH(batch, gpe_context->vfe_desc5.dword);
1063 OUT_BATCH(batch, gpe_context->vfe_desc6.dword);
1064 OUT_BATCH(batch, gpe_context->vfe_desc7.dword);
1066 ADVANCE_BATCH(batch);
1072 gen8_gpe_curbe_load(VADriverContextP ctx,
1073 struct i965_gpe_context *gpe_context,
1074 struct intel_batchbuffer *batch)
1076 BEGIN_BATCH(batch, 4);
1078 OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1079 OUT_BATCH(batch, 0);
1080 OUT_BATCH(batch, ALIGN(gpe_context->curbe.length, 64));
1081 OUT_BATCH(batch, gpe_context->curbe.offset);
1083 ADVANCE_BATCH(batch);
1087 gen8_gpe_idrt(VADriverContextP ctx,
1088 struct i965_gpe_context *gpe_context,
1089 struct intel_batchbuffer *batch)
1091 BEGIN_BATCH(batch, 6);
1093 OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1094 OUT_BATCH(batch, 0);
1096 OUT_BATCH(batch, CMD_MEDIA_INTERFACE_LOAD | (4 - 2));
1097 OUT_BATCH(batch, 0);
1098 OUT_BATCH(batch, gpe_context->idrt.max_entries * gpe_context->idrt.entry_size);
1099 OUT_BATCH(batch, gpe_context->idrt.offset);
1101 ADVANCE_BATCH(batch);
1106 gen8_gpe_pipeline_setup(VADriverContextP ctx,
1107 struct i965_gpe_context *gpe_context,
1108 struct intel_batchbuffer *batch)
1110 intel_batchbuffer_emit_mi_flush(batch);
1112 i965_gpe_select(ctx, gpe_context, batch);
1113 gen8_gpe_state_base_address(ctx, gpe_context, batch);
1114 gen8_gpe_vfe_state(ctx, gpe_context, batch);
1115 gen8_gpe_curbe_load(ctx, gpe_context, batch);
1116 gen8_gpe_idrt(ctx, gpe_context, batch);
1120 gen8_gpe_context_init(VADriverContextP ctx,
1121 struct i965_gpe_context *gpe_context)
1123 struct i965_driver_data *i965 = i965_driver_data(ctx);
1126 unsigned int start_offset, end_offset;
1128 dri_bo_unreference(gpe_context->surface_state_binding_table.bo);
1129 bo = dri_bo_alloc(i965->intel.bufmgr,
1130 "surface state & binding table",
1131 gpe_context->surface_state_binding_table.length,
1134 gpe_context->surface_state_binding_table.bo = bo;
1136 bo_size = gpe_context->idrt.max_entries * ALIGN(gpe_context->idrt.entry_size, 64) +
1137 ALIGN(gpe_context->curbe.length, 64) +
1138 gpe_context->sampler.max_entries * ALIGN(gpe_context->sampler.entry_size, 64);
1139 dri_bo_unreference(gpe_context->dynamic_state.bo);
1140 bo = dri_bo_alloc(i965->intel.bufmgr,
1141 "surface state & binding table",
1145 gpe_context->dynamic_state.bo = bo;
1146 gpe_context->dynamic_state.bo_size = bo_size;
1149 gpe_context->dynamic_state.end_offset = 0;
1151 /* Constant buffer offset */
1152 start_offset = ALIGN(end_offset, 64);
1153 dri_bo_unreference(gpe_context->curbe.bo);
1154 gpe_context->curbe.bo = bo;
1155 dri_bo_reference(gpe_context->curbe.bo);
1156 gpe_context->curbe.offset = start_offset;
1157 end_offset = start_offset + gpe_context->curbe.length;
1159 /* Interface descriptor offset */
1160 start_offset = ALIGN(end_offset, 64);
1161 dri_bo_unreference(gpe_context->idrt.bo);
1162 gpe_context->idrt.bo = bo;
1163 dri_bo_reference(gpe_context->idrt.bo);
1164 gpe_context->idrt.offset = start_offset;
1165 end_offset = start_offset + ALIGN(gpe_context->idrt.entry_size, 64) * gpe_context->idrt.max_entries;
1167 /* Sampler state offset */
1168 start_offset = ALIGN(end_offset, 64);
1169 dri_bo_unreference(gpe_context->sampler.bo);
1170 gpe_context->sampler.bo = bo;
1171 dri_bo_reference(gpe_context->sampler.bo);
1172 gpe_context->sampler.offset = start_offset;
1173 end_offset = start_offset + ALIGN(gpe_context->sampler.entry_size, 64) * gpe_context->sampler.max_entries;
1175 /* update the end offset of dynamic_state */
1176 gpe_context->dynamic_state.end_offset = end_offset;
1181 gen8_gpe_context_destroy(struct i965_gpe_context *gpe_context)
1183 dri_bo_unreference(gpe_context->surface_state_binding_table.bo);
1184 gpe_context->surface_state_binding_table.bo = NULL;
1186 dri_bo_unreference(gpe_context->instruction_state.bo);
1187 gpe_context->instruction_state.bo = NULL;
1189 dri_bo_unreference(gpe_context->dynamic_state.bo);
1190 gpe_context->dynamic_state.bo = NULL;
1192 dri_bo_unreference(gpe_context->indirect_state.bo);
1193 gpe_context->indirect_state.bo = NULL;
1195 dri_bo_unreference(gpe_context->curbe.bo);
1196 gpe_context->curbe.bo = NULL;
1198 dri_bo_unreference(gpe_context->idrt.bo);
1199 gpe_context->idrt.bo = NULL;
1201 dri_bo_unreference(gpe_context->sampler.bo);
1202 gpe_context->sampler.bo = NULL;
1207 gen8_gpe_load_kernels(VADriverContextP ctx,
1208 struct i965_gpe_context *gpe_context,
1209 struct i965_kernel *kernel_list,
1210 unsigned int num_kernels)
1212 struct i965_driver_data *i965 = i965_driver_data(ctx);
1213 int i, kernel_size = 0;
1214 unsigned int kernel_offset, end_offset;
1215 unsigned char *kernel_ptr;
1216 struct i965_kernel *kernel;
1218 assert(num_kernels <= MAX_GPE_KERNELS);
1219 memcpy(gpe_context->kernels, kernel_list, sizeof(*kernel_list) * num_kernels);
1220 gpe_context->num_kernels = num_kernels;
1222 for (i = 0; i < num_kernels; i++) {
1223 kernel = &gpe_context->kernels[i];
1225 kernel_size += ALIGN(kernel->size, 64);
1228 gpe_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1232 if (gpe_context->instruction_state.bo == NULL) {
1233 WARN_ONCE("failure to allocate the buffer space for kernel shader\n");
1237 assert(gpe_context->instruction_state.bo);
1239 gpe_context->instruction_state.bo_size = kernel_size;
1240 gpe_context->instruction_state.end_offset = 0;
1243 dri_bo_map(gpe_context->instruction_state.bo, 1);
1244 kernel_ptr = (unsigned char *)(gpe_context->instruction_state.bo->virtual);
1245 for (i = 0; i < num_kernels; i++) {
1246 kernel_offset = ALIGN(end_offset, 64);
1247 kernel = &gpe_context->kernels[i];
1248 kernel->kernel_offset = kernel_offset;
1251 memcpy(kernel_ptr + kernel_offset, kernel->bin, kernel->size);
1253 end_offset = kernel_offset + kernel->size;
1257 gpe_context->instruction_state.end_offset = end_offset;
1259 dri_bo_unmap(gpe_context->instruction_state.bo);
1265 gen9_gpe_state_base_address(VADriverContextP ctx,
1266 struct i965_gpe_context *gpe_context,
1267 struct intel_batchbuffer *batch)
1269 struct i965_driver_data *i965 = i965_driver_data(ctx);
1270 BEGIN_BATCH(batch, 19);
1272 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (19 - 2));
1274 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY); //General State Base Address
1275 OUT_BATCH(batch, 0);
1276 OUT_BATCH(batch, 0);
1278 /*DW4 Surface state base address */
1279 OUT_RELOC64(batch, gpe_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY | (i965->intel.mocs_state << 4)); /* Surface state base address */
1281 /*DW6. Dynamic state base address */
1282 if (gpe_context->dynamic_state.bo)
1283 OUT_RELOC64(batch, gpe_context->dynamic_state.bo,
1284 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1285 I915_GEM_DOMAIN_RENDER,
1286 BASE_ADDRESS_MODIFY | (i965->intel.mocs_state << 4));
1288 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1289 OUT_BATCH(batch, 0);
1293 /*DW8. Indirect Object base address */
1294 if (gpe_context->indirect_state.bo)
1295 OUT_RELOC64(batch, gpe_context->indirect_state.bo,
1296 I915_GEM_DOMAIN_SAMPLER,
1297 0, BASE_ADDRESS_MODIFY | (i965->intel.mocs_state << 4));
1299 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1300 OUT_BATCH(batch, 0);
1304 /*DW10. Instruct base address */
1305 if (gpe_context->instruction_state.bo)
1306 OUT_RELOC64(batch, gpe_context->instruction_state.bo,
1307 I915_GEM_DOMAIN_INSTRUCTION,
1308 0, BASE_ADDRESS_MODIFY | (i965->intel.mocs_state << 4));
1310 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1311 OUT_BATCH(batch, 0);
1315 /* DW12. Size limitation */
1316 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //General State Access Upper Bound
1317 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Dynamic State Access Upper Bound
1318 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Indirect Object Access Upper Bound
1319 OUT_BATCH(batch, 0xFFFFF000 | BASE_ADDRESS_MODIFY); //Instruction Access Upper Bound
1321 /* the bindless surface state address */
1322 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1323 OUT_BATCH(batch, 0);
1324 OUT_BATCH(batch, 0xFFFFF000);
1326 ADVANCE_BATCH(batch);
1330 gen9_gpe_select(VADriverContextP ctx,
1331 struct i965_gpe_context *gpe_context,
1332 struct intel_batchbuffer *batch)
1334 BEGIN_BATCH(batch, 1);
1335 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA |
1336 GEN9_PIPELINE_SELECTION_MASK |
1337 GEN9_MEDIA_DOP_GATE_OFF |
1338 GEN9_MEDIA_DOP_GATE_MASK |
1339 GEN9_FORCE_MEDIA_AWAKE_ON |
1340 GEN9_FORCE_MEDIA_AWAKE_MASK);
1341 ADVANCE_BATCH(batch);
1345 gen9_gpe_pipeline_setup(VADriverContextP ctx,
1346 struct i965_gpe_context *gpe_context,
1347 struct intel_batchbuffer *batch)
1349 intel_batchbuffer_emit_mi_flush(batch);
1351 gen9_gpe_select(ctx, gpe_context, batch);
1352 gen9_gpe_state_base_address(ctx, gpe_context, batch);
1353 gen8_gpe_vfe_state(ctx, gpe_context, batch);
1354 gen8_gpe_curbe_load(ctx, gpe_context, batch);
1355 gen8_gpe_idrt(ctx, gpe_context, batch);
1359 gen9_gpe_pipeline_end(VADriverContextP ctx,
1360 struct i965_gpe_context *gpe_context,
1361 struct intel_batchbuffer *batch)
1363 BEGIN_BATCH(batch, 1);
1364 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA |
1365 GEN9_PIPELINE_SELECTION_MASK |
1366 GEN9_MEDIA_DOP_GATE_ON |
1367 GEN9_MEDIA_DOP_GATE_MASK |
1368 GEN9_FORCE_MEDIA_AWAKE_OFF |
1369 GEN9_FORCE_MEDIA_AWAKE_MASK);
1370 ADVANCE_BATCH(batch);
1374 i965_allocate_gpe_resource(dri_bufmgr *bufmgr,
1375 struct i965_gpe_resource *res,
1383 res->bo = dri_bo_alloc(bufmgr, name, res->size, 4096);
1386 return (res->bo != NULL);
1390 i965_object_surface_to_2d_gpe_resource_with_align(struct i965_gpe_resource *res,
1391 struct object_surface *obj_surface,
1392 unsigned int alignment)
1394 unsigned int swizzle;
1396 res->type = I965_GPE_RESOURCE_2D;
1397 res->width = ALIGN(obj_surface->orig_width, (1 << alignment));
1398 res->height = ALIGN(obj_surface->orig_height, (1 << alignment));
1399 res->pitch = obj_surface->width;
1400 res->size = obj_surface->size;
1401 res->cb_cr_pitch = obj_surface->cb_cr_pitch;
1402 res->x_cb_offset = obj_surface->x_cb_offset;
1403 res->y_cb_offset = obj_surface->y_cb_offset;
1404 res->bo = obj_surface->bo;
1407 dri_bo_reference(res->bo);
1408 dri_bo_get_tiling(obj_surface->bo, &res->tiling, &swizzle);
1412 i965_object_surface_to_2d_gpe_resource(struct i965_gpe_resource *res,
1413 struct object_surface *obj_surface)
1415 i965_object_surface_to_2d_gpe_resource_with_align(res, obj_surface, 0);
1419 i965_dri_object_to_buffer_gpe_resource(struct i965_gpe_resource *res,
1422 unsigned int swizzle;
1424 res->type = I965_GPE_RESOURCE_BUFFER;
1425 res->width = bo->size;
1427 res->pitch = res->width;
1428 res->size = res->pitch * res->width;
1432 dri_bo_reference(res->bo);
1433 dri_bo_get_tiling(res->bo, &res->tiling, &swizzle);
1437 i965_dri_object_to_2d_gpe_resource(struct i965_gpe_resource *res,
1440 unsigned int height,
1443 unsigned int swizzle;
1445 res->type = I965_GPE_RESOURCE_2D;
1447 res->height = height;
1449 res->size = res->pitch * res->width;
1453 dri_bo_reference(res->bo);
1454 dri_bo_get_tiling(res->bo, &res->tiling, &swizzle);
1458 i965_zero_gpe_resource(struct i965_gpe_resource *res)
1461 dri_bo_map(res->bo, 1);
1462 memset(res->bo->virtual, 0, res->size);
1463 dri_bo_unmap(res->bo);
1468 i965_free_gpe_resource(struct i965_gpe_resource *res)
1470 dri_bo_unreference(res->bo);
1476 i965_map_gpe_resource(struct i965_gpe_resource *res)
1481 ret = dri_bo_map(res->bo, 1);
1484 res->map = res->bo->virtual;
1494 i965_unmap_gpe_resource(struct i965_gpe_resource *res)
1496 if (res->bo && res->map)
1497 dri_bo_unmap(res->bo);
1503 gen8_gpe_mi_flush_dw(VADriverContextP ctx,
1504 struct intel_batchbuffer *batch,
1505 struct gpe_mi_flush_dw_parameter *params)
1507 int video_pipeline_cache_invalidate = 0;
1508 int post_sync_operation = MI_FLUSH_DW_NOWRITE;
1510 if (params->video_pipeline_cache_invalidate)
1511 video_pipeline_cache_invalidate = MI_FLUSH_DW_VIDEO_PIPELINE_CACHE_INVALIDATE;
1514 post_sync_operation = MI_FLUSH_DW_WRITE_QWORD;
1516 __OUT_BATCH(batch, (MI_FLUSH_DW2 |
1517 video_pipeline_cache_invalidate |
1518 post_sync_operation |
1519 (5 - 2))); /* Always use PPGTT */
1522 __OUT_RELOC64(batch,
1524 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1527 __OUT_BATCH(batch, 0);
1528 __OUT_BATCH(batch, 0);
1531 __OUT_BATCH(batch, params->dw0);
1532 __OUT_BATCH(batch, params->dw1);
1536 gen8_gpe_mi_store_data_imm(VADriverContextP ctx,
1537 struct intel_batchbuffer *batch,
1538 struct gpe_mi_store_data_imm_parameter *params)
1540 if (params->is_qword) {
1541 __OUT_BATCH(batch, MI_STORE_DATA_IMM |
1543 (5 - 2)); /* Always use PPGTT */
1545 __OUT_BATCH(batch, MI_STORE_DATA_IMM | (4 - 2)); /* Always use PPGTT */
1548 __OUT_RELOC64(batch,
1550 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1552 __OUT_BATCH(batch, params->dw0);
1554 if (params->is_qword)
1555 __OUT_BATCH(batch, params->dw1);
1559 gen8_gpe_mi_store_register_mem(VADriverContextP ctx,
1560 struct intel_batchbuffer *batch,
1561 struct gpe_mi_store_register_mem_parameter *params)
1563 __OUT_BATCH(batch, (MI_STORE_REGISTER_MEM | (4 - 2))); /* Always use PPGTT */
1564 __OUT_BATCH(batch, params->mmio_offset);
1565 __OUT_RELOC64(batch,
1567 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1572 gen8_gpe_mi_load_register_mem(VADriverContextP ctx,
1573 struct intel_batchbuffer *batch,
1574 struct gpe_mi_load_register_mem_parameter *params)
1576 __OUT_BATCH(batch, (MI_LOAD_REGISTER_MEM | (4 - 2))); /* Always use PPGTT */
1577 __OUT_BATCH(batch, params->mmio_offset);
1578 __OUT_RELOC64(batch,
1580 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1585 gen8_gpe_mi_load_register_imm(VADriverContextP ctx,
1586 struct intel_batchbuffer *batch,
1587 struct gpe_mi_load_register_imm_parameter *params)
1589 __OUT_BATCH(batch, (MI_LOAD_REGISTER_IMM | (3 - 2)));
1590 __OUT_BATCH(batch, params->mmio_offset);
1591 __OUT_BATCH(batch, params->data);
1595 gen8_gpe_mi_load_register_reg(VADriverContextP ctx,
1596 struct intel_batchbuffer *batch,
1597 struct gpe_mi_load_register_reg_parameter *params)
1599 __OUT_BATCH(batch, (MI_LOAD_REGISTER_REG | (3 - 2)));
1600 __OUT_BATCH(batch, params->src_mmio_offset);
1601 __OUT_BATCH(batch, params->dst_mmio_offset);
1605 gen9_gpe_mi_math(VADriverContextP ctx,
1606 struct intel_batchbuffer *batch,
1607 struct gpe_mi_math_parameter *params)
1609 __OUT_BATCH(batch, (MI_MATH | (params->num_instructions - 1)));
1610 intel_batchbuffer_data(batch, params->instruction_list, params->num_instructions * 4);
1614 gen9_gpe_mi_conditional_batch_buffer_end(VADriverContextP ctx,
1615 struct intel_batchbuffer *batch,
1616 struct gpe_mi_conditional_batch_buffer_end_parameter *params)
1618 int compare_mask_mode_enabled = MI_COMPARE_MASK_MODE_ENANBLED;
1620 if (params->compare_mask_mode_disabled)
1621 compare_mask_mode_enabled = 0;
1623 __OUT_BATCH(batch, (MI_CONDITIONAL_BATCH_BUFFER_END |
1625 compare_mask_mode_enabled |
1626 (4 - 2))); /* Always use PPGTT */
1627 __OUT_BATCH(batch, params->compare_data);
1628 __OUT_RELOC64(batch,
1630 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, 0,
1635 gen8_gpe_mi_batch_buffer_start(VADriverContextP ctx,
1636 struct intel_batchbuffer *batch,
1637 struct gpe_mi_batch_buffer_start_parameter *params)
1639 __OUT_BATCH(batch, (MI_BATCH_BUFFER_START |
1640 (!!params->is_second_level << 22) |
1641 (!params->use_global_gtt << 8) |
1643 __OUT_RELOC64(batch,
1645 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, 0,
1650 gen8_gpe_context_set_dynamic_buffer(VADriverContextP ctx,
1651 struct i965_gpe_context *gpe_context,
1652 struct gpe_dynamic_state_parameter *ds)
1654 if (!ds->bo || !gpe_context)
1657 dri_bo_unreference(gpe_context->dynamic_state.bo);
1658 gpe_context->dynamic_state.bo = ds->bo;
1659 dri_bo_reference(gpe_context->dynamic_state.bo);
1660 gpe_context->dynamic_state.bo_size = ds->bo_size;
1662 /* curbe buffer is a part of the dynamic buffer */
1663 dri_bo_unreference(gpe_context->curbe.bo);
1664 gpe_context->curbe.bo = ds->bo;
1665 dri_bo_reference(gpe_context->curbe.bo);
1666 gpe_context->curbe.offset = ds->curbe_offset;
1668 /* idrt buffer is a part of the dynamic buffer */
1669 dri_bo_unreference(gpe_context->idrt.bo);
1670 gpe_context->idrt.bo = ds->bo;
1671 dri_bo_reference(gpe_context->idrt.bo);
1672 gpe_context->idrt.offset = ds->idrt_offset;
1674 /* sampler buffer is a part of the dynamic buffer */
1675 dri_bo_unreference(gpe_context->sampler.bo);
1676 gpe_context->sampler.bo = ds->bo;
1677 dri_bo_reference(gpe_context->sampler.bo);
1678 gpe_context->sampler.offset = ds->sampler_offset;
1684 i965_gpe_context_map_curbe(struct i965_gpe_context *gpe_context)
1686 dri_bo_map(gpe_context->curbe.bo, 1);
1688 return (char *)gpe_context->curbe.bo->virtual + gpe_context->curbe.offset;
1692 i965_gpe_context_unmap_curbe(struct i965_gpe_context *gpe_context)
1694 dri_bo_unmap(gpe_context->curbe.bo);
1698 gen9_gpe_reset_binding_table(VADriverContextP ctx,
1699 struct i965_gpe_context *gpe_context)
1701 unsigned int *binding_table;
1702 unsigned int binding_table_offset = gpe_context->surface_state_binding_table.binding_table_offset;
1705 dri_bo_map(gpe_context->surface_state_binding_table.bo, 1);
1706 binding_table = (unsigned int*)((char *)gpe_context->surface_state_binding_table.bo->virtual + binding_table_offset);
1708 for (i = 0; i < gpe_context->surface_state_binding_table.max_entries; i++) {
1709 *(binding_table + i) = gpe_context->surface_state_binding_table.surface_state_offset + i * SURFACE_STATE_PADDED_SIZE_GEN9;
1712 dri_bo_unmap(gpe_context->surface_state_binding_table.bo);
1716 gen8_gpe_setup_interface_data(VADriverContextP ctx,
1717 struct i965_gpe_context *gpe_context)
1719 struct gen8_interface_descriptor_data *desc;
1722 unsigned char *desc_ptr;
1724 bo = gpe_context->idrt.bo;
1726 assert(bo->virtual);
1727 desc_ptr = (unsigned char *)bo->virtual + gpe_context->idrt.offset;
1728 desc = (struct gen8_interface_descriptor_data *)desc_ptr;
1730 for (i = 0; i < gpe_context->num_kernels; i++) {
1731 struct i965_kernel *kernel;
1733 kernel = &gpe_context->kernels[i];
1734 assert(sizeof(*desc) == 32);
1736 /*Setup the descritor table*/
1737 memset(desc, 0, sizeof(*desc));
1738 desc->desc0.kernel_start_pointer = kernel->kernel_offset >> 6;
1739 desc->desc3.sampler_count = 0;
1740 desc->desc3.sampler_state_pointer = (gpe_context->sampler.offset >> 5);
1741 desc->desc4.binding_table_entry_count = 0;
1742 desc->desc4.binding_table_pointer = (gpe_context->surface_state_binding_table.binding_table_offset >> 5);
1743 desc->desc5.constant_urb_entry_read_offset = 0;
1744 desc->desc5.constant_urb_entry_read_length = ALIGN(gpe_context->curbe.length, 32) >> 5; // in registers
1753 gen9_gpe_set_surface_tiling(struct gen9_surface_state *ss, unsigned int tiling)
1756 case I915_TILING_NONE:
1757 ss->ss0.tiled_surface = 0;
1758 ss->ss0.tile_walk = 0;
1761 ss->ss0.tiled_surface = 1;
1762 ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
1765 ss->ss0.tiled_surface = 1;
1766 ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
1772 gen9_gpe_set_surface2_tiling(struct gen9_surface_state2 *ss, unsigned int tiling)
1775 case I915_TILING_NONE:
1776 ss->ss2.tiled_surface = 0;
1777 ss->ss2.tile_walk = 0;
1780 ss->ss2.tiled_surface = 1;
1781 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
1784 ss->ss2.tiled_surface = 1;
1785 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
1791 gen9_gpe_set_2d_surface_state(struct gen9_surface_state *ss,
1792 unsigned int cacheability_control,
1793 unsigned int format,
1794 unsigned int tiling,
1796 unsigned int height,
1798 uint64_t base_offset,
1799 unsigned int y_offset)
1801 memset(ss, 0, sizeof(*ss));
1803 /* Always set 1(align 4 mode) */
1804 ss->ss0.vertical_alignment = 1;
1805 ss->ss0.horizontal_alignment = 1;
1807 ss->ss0.surface_format = format;
1808 ss->ss0.surface_type = I965_SURFACE_2D;
1810 ss->ss1.surface_mocs = cacheability_control;
1812 ss->ss2.width = width - 1;
1813 ss->ss2.height = height - 1;
1815 ss->ss3.pitch = pitch - 1;
1817 ss->ss5.y_offset = y_offset;
1819 ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
1820 ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
1821 ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
1822 ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
1824 ss->ss8.base_addr = (uint32_t)base_offset;
1825 ss->ss9.base_addr_high = (uint32_t)(base_offset >> 32);
1827 gen9_gpe_set_surface_tiling(ss, tiling);
1830 /* This is only for NV12 format */
1832 gen9_gpe_set_adv_surface_state(struct gen9_surface_state2 *ss,
1833 unsigned int v_direction,
1834 unsigned int cacheability_control,
1835 unsigned int format,
1836 unsigned int tiling,
1838 unsigned int height,
1840 uint64_t base_offset,
1841 unsigned int y_cb_offset)
1843 memset(ss, 0, sizeof(*ss));
1845 ss->ss1.cbcr_pixel_offset_v_direction = v_direction;
1846 ss->ss1.width = width - 1;
1847 ss->ss1.height = height - 1;
1849 ss->ss2.surface_format = format;
1850 ss->ss2.interleave_chroma = 1;
1851 ss->ss2.pitch = pitch - 1;
1853 ss->ss3.y_offset_for_cb = y_cb_offset;
1855 ss->ss5.surface_object_mocs = cacheability_control;
1857 ss->ss6.base_addr = (uint32_t)base_offset;
1858 ss->ss7.base_addr_high = (uint32_t)(base_offset >> 32);
1860 gen9_gpe_set_surface2_tiling(ss, tiling);
1864 gen9_gpe_set_buffer2_surface_state(struct gen9_surface_state *ss,
1865 unsigned int cacheability_control,
1866 unsigned int format,
1869 uint64_t base_offset)
1871 memset(ss, 0, sizeof(*ss));
1873 ss->ss0.surface_format = format;
1874 ss->ss0.surface_type = I965_SURFACE_BUFFER;
1876 ss->ss1.surface_mocs = cacheability_control;
1878 ss->ss2.width = (size - 1) & 0x7F;
1879 ss->ss2.height = ((size - 1) & 0x1FFF80) >> 7;
1881 ss->ss3.depth = ((size - 1) & 0xFE00000) >> 21;
1882 ss->ss3.pitch = pitch - 1;
1884 ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
1885 ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
1886 ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
1887 ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
1889 ss->ss8.base_addr = (uint32_t)base_offset;
1890 ss->ss9.base_addr_high = (uint32_t)(base_offset >> 32);
1894 gen9_gpe_context_add_surface(struct i965_gpe_context *gpe_context,
1895 struct i965_gpe_surface *gpe_surface,
1899 unsigned int tiling, swizzle, width, height, pitch, tile_alignment, y_offset = 0;
1900 unsigned int surface_state_offset = gpe_context->surface_state_binding_table.surface_state_offset +
1901 index * SURFACE_STATE_PADDED_SIZE_GEN9;
1902 unsigned int binding_table_offset = gpe_context->surface_state_binding_table.binding_table_offset +
1904 struct i965_gpe_resource *gpe_resource = gpe_surface->gpe_resource;
1906 dri_bo_get_tiling(gpe_resource->bo, &tiling, &swizzle);
1908 dri_bo_map(gpe_context->surface_state_binding_table.bo, 1);
1909 buf = (char *)gpe_context->surface_state_binding_table.bo->virtual;
1910 *((unsigned int *)(buf + binding_table_offset)) = surface_state_offset;
1912 if (gpe_surface->is_2d_surface && gpe_surface->is_override_offset) {
1913 struct gen9_surface_state *ss = (struct gen9_surface_state *)(buf + surface_state_offset);
1915 width = gpe_resource->width;
1916 height = gpe_resource->height;
1917 pitch = gpe_resource->pitch;
1919 if (gpe_surface->is_media_block_rw) {
1920 if (gpe_surface->is_16bpp)
1921 width = (ALIGN(width * 2, 4) >> 2);
1923 width = (ALIGN(width, 4) >> 2);
1927 gen9_gpe_set_2d_surface_state(ss,
1928 gpe_surface->cacheability_control,
1929 gpe_surface->format,
1931 width, height, pitch,
1932 gpe_resource->bo->offset64 + gpe_surface->offset,
1935 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
1936 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1937 gpe_surface->offset,
1938 surface_state_offset + offsetof(struct gen9_surface_state, ss8),
1940 } else if (gpe_surface->is_2d_surface && gpe_surface->is_uv_surface) {
1941 unsigned int cbcr_offset;
1942 struct gen9_surface_state *ss = (struct gen9_surface_state *)(buf + surface_state_offset);
1944 width = gpe_resource->width;
1945 height = gpe_resource->height / 2;
1946 pitch = gpe_resource->pitch;
1948 if (gpe_surface->is_media_block_rw) {
1949 if (gpe_surface->is_16bpp)
1950 width = (ALIGN(width * 2, 4) >> 2);
1952 width = (ALIGN(width, 4) >> 2);
1955 if (tiling == I915_TILING_Y) {
1956 tile_alignment = 32;
1957 } else if (tiling == I915_TILING_X) {
1962 y_offset = (gpe_resource->y_cb_offset % tile_alignment);
1963 cbcr_offset = ALIGN_FLOOR(gpe_resource->y_cb_offset, tile_alignment) * pitch;
1965 gen9_gpe_set_2d_surface_state(ss,
1966 gpe_surface->cacheability_control,
1967 I965_SURFACEFORMAT_R16_UINT,
1969 width, height, pitch,
1970 gpe_resource->bo->offset64 + cbcr_offset,
1973 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
1974 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
1976 surface_state_offset + offsetof(struct gen9_surface_state, ss8),
1978 } else if (gpe_surface->is_2d_surface) {
1979 struct gen9_surface_state *ss = (struct gen9_surface_state *)(buf + surface_state_offset);
1981 width = gpe_resource->width;
1982 height = gpe_resource->height;
1983 pitch = gpe_resource->pitch;
1985 if (gpe_surface->is_media_block_rw) {
1986 if (gpe_surface->is_16bpp)
1987 width = (ALIGN(width * 2, 4) >> 2);
1989 width = (ALIGN(width, 4) >> 2);
1992 gen9_gpe_set_2d_surface_state(ss,
1993 gpe_surface->cacheability_control,
1994 gpe_surface->format,
1996 width, height, pitch,
1997 gpe_resource->bo->offset64,
2000 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
2001 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
2003 surface_state_offset + offsetof(struct gen9_surface_state, ss8),
2005 } else if (gpe_surface->is_adv_surface) {
2006 struct gen9_surface_state2 *ss = (struct gen9_surface_state2 *)(buf + surface_state_offset);
2008 width = gpe_resource->width;
2009 height = gpe_resource->height;
2010 pitch = gpe_resource->pitch;
2012 gen9_gpe_set_adv_surface_state(ss,
2013 gpe_surface->v_direction,
2014 gpe_surface->cacheability_control,
2015 MFX_SURFACE_PLANAR_420_8,
2017 width, height, pitch,
2018 gpe_resource->bo->offset64,
2019 gpe_resource->y_cb_offset);
2021 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
2022 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
2024 surface_state_offset + offsetof(struct gen9_surface_state2, ss6),
2027 struct gen9_surface_state *ss = (struct gen9_surface_state *)(buf + surface_state_offset);
2028 unsigned int format;
2030 assert(gpe_surface->is_buffer);
2032 if (gpe_surface->is_raw_buffer) {
2033 format = I965_SURFACEFORMAT_RAW;
2036 format = I965_SURFACEFORMAT_R32_UINT;
2037 pitch = sizeof(unsigned int);
2040 gen9_gpe_set_buffer2_surface_state(ss,
2041 gpe_surface->cacheability_control,
2045 gpe_resource->bo->offset64 + gpe_surface->offset);
2047 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
2048 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
2049 gpe_surface->offset,
2050 surface_state_offset + offsetof(struct gen9_surface_state, ss8),
2054 dri_bo_unmap(gpe_context->surface_state_binding_table.bo);
2058 i965_gpe_allocate_2d_resource(dri_bufmgr *bufmgr,
2059 struct i965_gpe_resource *res,
2070 res->type = I965_GPE_RESOURCE_2D;
2072 res->height = height;
2075 bo_size = ALIGN(height, 16) * pitch;
2076 res->size = bo_size;
2078 res->bo = dri_bo_alloc(bufmgr, name, res->size, 4096);
2085 gen8_gpe_media_state_flush(VADriverContextP ctx,
2086 struct i965_gpe_context *gpe_context,
2087 struct intel_batchbuffer *batch)
2089 BEGIN_BATCH(batch, 2);
2091 OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH | (2 - 2));
2092 OUT_BATCH(batch, 0);
2094 ADVANCE_BATCH(batch);
2098 gen8_gpe_media_object(VADriverContextP ctx,
2099 struct i965_gpe_context *gpe_context,
2100 struct intel_batchbuffer *batch,
2101 struct gpe_media_object_parameter *param)
2103 int batch_size, subdata_size;
2107 if (param->pinline_data && param->inline_size) {
2108 subdata_size = ALIGN(param->inline_size, 4);
2109 batch_size += subdata_size / 4;
2111 BEGIN_BATCH(batch, batch_size);
2112 OUT_BATCH(batch, CMD_MEDIA_OBJECT | (batch_size - 2));
2113 OUT_BATCH(batch, param->interface_offset);
2114 OUT_BATCH(batch, param->use_scoreboard << 21);
2115 OUT_BATCH(batch, 0);
2116 OUT_BATCH(batch, (param->scoreboard_y << 16 |
2117 param->scoreboard_x));
2118 OUT_BATCH(batch, param->scoreboard_mask);
2121 intel_batchbuffer_data(batch, param->pinline_data, subdata_size);
2123 ADVANCE_BATCH(batch);
2127 gen8_gpe_media_object_walker(VADriverContextP ctx,
2128 struct i965_gpe_context *gpe_context,
2129 struct intel_batchbuffer *batch,
2130 struct gpe_media_object_walker_parameter *param)
2135 if (param->inline_size)
2136 walker_length += ALIGN(param->inline_size, 4) / 4;
2137 BEGIN_BATCH(batch, walker_length);
2138 OUT_BATCH(batch, CMD_MEDIA_OBJECT_WALKER | (walker_length - 2));
2139 OUT_BATCH(batch, param->interface_offset);
2140 OUT_BATCH(batch, param->use_scoreboard << 21);
2141 OUT_BATCH(batch, 0);
2142 OUT_BATCH(batch, 0);
2143 OUT_BATCH(batch, (param->group_id_loop_select << 8 |
2144 param->scoreboard_mask)); // DW5
2145 OUT_BATCH(batch, (param->color_count_minus1 << 24 |
2146 param->middle_loop_extra_steps << 16 |
2147 param->mid_loop_unit_y << 12 |
2148 param->mid_loop_unit_x << 8));
2149 OUT_BATCH(batch, ((param->global_loop_exec_count & 0x3ff) << 16 |
2150 (param->local_loop_exec_count & 0x3ff)));
2151 OUT_BATCH(batch, param->block_resolution.value);
2152 OUT_BATCH(batch, param->local_start.value);
2153 OUT_BATCH(batch, 0); // DW10
2154 OUT_BATCH(batch, param->local_outer_loop_stride.value);
2155 OUT_BATCH(batch, param->local_inner_loop_unit.value);
2156 OUT_BATCH(batch, param->global_resolution.value);
2157 OUT_BATCH(batch, param->global_start.value);
2158 OUT_BATCH(batch, param->global_outer_loop_stride.value);
2159 OUT_BATCH(batch, param->global_inner_loop_unit.value);
2161 if (param->pinline_data && param->inline_size)
2162 intel_batchbuffer_data(batch, param->pinline_data, ALIGN(param->inline_size, 4));
2164 ADVANCE_BATCH(batch);
2169 intel_vpp_init_media_object_walker_parameter(struct intel_vpp_kernel_walker_parameter *kernel_walker_param,
2170 struct gpe_media_object_walker_parameter *walker_param)
2172 memset(walker_param, 0, sizeof(*walker_param));
2174 walker_param->use_scoreboard = kernel_walker_param->use_scoreboard;
2176 walker_param->block_resolution.x = kernel_walker_param->resolution_x;
2177 walker_param->block_resolution.y = kernel_walker_param->resolution_y;
2179 walker_param->global_resolution.x = kernel_walker_param->resolution_x;
2180 walker_param->global_resolution.y = kernel_walker_param->resolution_y;
2182 walker_param->global_outer_loop_stride.x = kernel_walker_param->resolution_x;
2183 walker_param->global_outer_loop_stride.y = 0;
2185 walker_param->global_inner_loop_unit.x = 0;
2186 walker_param->global_inner_loop_unit.y = kernel_walker_param->resolution_y;
2188 walker_param->local_loop_exec_count = 0xFFFF; //MAX VALUE
2189 walker_param->global_loop_exec_count = 0xFFFF; //MAX VALUE
2191 if (kernel_walker_param->no_dependency) {
2192 /* The no_dependency is used for VPP */
2193 walker_param->scoreboard_mask = 0;
2194 walker_param->use_scoreboard = 0;
2195 // Raster scan walking pattern
2196 walker_param->local_outer_loop_stride.x = 0;
2197 walker_param->local_outer_loop_stride.y = 1;
2198 walker_param->local_inner_loop_unit.x = 1;
2199 walker_param->local_inner_loop_unit.y = 0;
2200 walker_param->local_end.x = kernel_walker_param->resolution_x - 1;
2201 walker_param->local_end.y = 0;
2203 walker_param->local_end.x = 0;
2204 walker_param->local_end.y = 0;
2207 walker_param->scoreboard_mask = 0x0F;
2208 walker_param->local_outer_loop_stride.x = 1;
2209 walker_param->local_outer_loop_stride.y = 0;
2210 walker_param->local_inner_loop_unit.x = -2;
2211 walker_param->local_inner_loop_unit.y = 1;
2216 gen8_gpe_reset_binding_table(VADriverContextP ctx, struct i965_gpe_context *gpe_context)
2218 unsigned int *binding_table;
2219 unsigned int binding_table_offset = gpe_context->surface_state_binding_table.binding_table_offset;
2222 dri_bo_map(gpe_context->surface_state_binding_table.bo, 1);
2223 binding_table = (unsigned int*)((char *)gpe_context->surface_state_binding_table.bo->virtual + binding_table_offset);
2225 for (i = 0; i < gpe_context->surface_state_binding_table.max_entries; i++) {
2226 *(binding_table + i) = gpe_context->surface_state_binding_table.surface_state_offset + i * SURFACE_STATE_PADDED_SIZE_GEN8;
2229 dri_bo_unmap(gpe_context->surface_state_binding_table.bo);
2233 gen8_gpe_set_2d_surface_state(struct gen8_surface_state *ss,
2234 unsigned int vert_line_stride_offset,
2235 unsigned int vert_line_stride,
2236 unsigned int cacheability_control,
2237 unsigned int format,
2238 unsigned int tiling,
2240 unsigned int height,
2242 unsigned int base_offset,
2243 unsigned int y_offset)
2245 memset(ss, 0, sizeof(*ss));
2247 ss->ss0.vert_line_stride_ofs = vert_line_stride_offset;
2248 ss->ss0.vert_line_stride = vert_line_stride;
2249 ss->ss0.surface_format = format;
2250 ss->ss0.surface_type = I965_SURFACE_2D;
2252 ss->ss1.surface_mocs = cacheability_control;
2254 ss->ss2.width = width - 1;
2255 ss->ss2.height = height - 1;
2257 ss->ss3.pitch = pitch - 1;
2259 ss->ss5.y_offset = y_offset;
2261 ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
2262 ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
2263 ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
2264 ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
2266 ss->ss8.base_addr = base_offset;
2268 gen8_gpe_set_surface_tiling(ss, tiling);
2272 gen8_gpe_set_adv_surface_state(struct gen8_surface_state2 *ss,
2273 unsigned int v_direction,
2274 unsigned int cacheability_control,
2275 unsigned int format,
2276 unsigned int tiling,
2278 unsigned int height,
2280 unsigned int base_offset,
2281 unsigned int y_cb_offset)
2283 memset(ss, 0, sizeof(*ss));
2285 ss->ss1.cbcr_pixel_offset_v_direction = v_direction;
2286 ss->ss1.width = width - 1;
2287 ss->ss1.height = height - 1;
2289 ss->ss2.surface_format = format;
2290 ss->ss2.interleave_chroma = 1;
2291 ss->ss2.pitch = pitch - 1;
2293 ss->ss3.y_offset_for_cb = y_cb_offset;
2295 ss->ss5.surface_object_mocs = cacheability_control;
2297 ss->ss6.base_addr = base_offset;
2299 gen8_gpe_set_surface2_tiling(ss, tiling);
2303 gen8_gpe_set_buffer2_surface_state(struct gen8_surface_state *ss,
2304 unsigned int cacheability_control,
2305 unsigned int format,
2308 unsigned int base_offset)
2310 memset(ss, 0, sizeof(*ss));
2312 ss->ss0.surface_format = format;
2313 ss->ss0.surface_type = I965_SURFACE_BUFFER;
2315 ss->ss1.surface_mocs = cacheability_control;
2317 ss->ss2.width = (size - 1) & 0x7F;
2318 ss->ss2.height = ((size - 1) & 0x1FFF80) >> 7;
2320 ss->ss3.depth = ((size - 1) & 0xFE00000) >> 21;
2321 ss->ss3.pitch = pitch - 1;
2323 ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
2324 ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
2325 ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
2326 ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
2328 ss->ss8.base_addr = base_offset;
2332 gen8_gpe_context_add_surface(struct i965_gpe_context *gpe_context,
2333 struct i965_gpe_surface *gpe_surface,
2337 unsigned int tiling, swizzle, width, height, pitch, tile_alignment, y_offset = 0;
2338 unsigned int surface_state_offset = gpe_context->surface_state_binding_table.surface_state_offset +
2339 index * SURFACE_STATE_PADDED_SIZE_GEN8;
2340 unsigned int binding_table_offset = gpe_context->surface_state_binding_table.binding_table_offset +
2342 struct i965_gpe_resource *gpe_resource = gpe_surface->gpe_resource;
2344 dri_bo_get_tiling(gpe_resource->bo, &tiling, &swizzle);
2346 dri_bo_map(gpe_context->surface_state_binding_table.bo, 1);
2347 buf = (char *)gpe_context->surface_state_binding_table.bo->virtual;
2348 *((unsigned int *)(buf + binding_table_offset)) = surface_state_offset;
2350 if (gpe_surface->is_2d_surface) {
2351 struct gen8_surface_state *ss = (struct gen8_surface_state *)(buf + surface_state_offset);
2352 unsigned int target_offset;
2354 width = gpe_resource->width;
2355 height = gpe_resource->height;
2356 pitch = gpe_resource->pitch;
2358 if (gpe_surface->is_override_offset) {
2360 target_offset = gpe_surface->offset;
2361 } else if (gpe_surface->is_uv_surface) {
2364 if (tiling == I915_TILING_Y) {
2365 tile_alignment = 32;
2366 } else if (tiling == I915_TILING_X) {
2371 y_offset = (gpe_resource->y_cb_offset % tile_alignment);
2372 target_offset = ALIGN_FLOOR(gpe_resource->y_cb_offset, tile_alignment) * pitch;
2378 if (gpe_surface->is_media_block_rw) {
2379 width = (ALIGN(width, 4) >> 2);
2382 gen8_gpe_set_2d_surface_state(ss,
2383 gpe_surface->vert_line_stride_offset,
2384 gpe_surface->vert_line_stride,
2385 gpe_surface->cacheability_control,
2386 gpe_surface->format,
2388 width, height, pitch,
2389 gpe_resource->bo->offset64 + target_offset,
2392 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
2393 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
2395 surface_state_offset + offsetof(struct gen8_surface_state, ss8),
2397 } else if (gpe_surface->is_adv_surface) {
2398 struct gen8_surface_state2 *ss = (struct gen8_surface_state2 *)(buf + surface_state_offset);
2400 width = gpe_resource->width;
2401 height = gpe_resource->height;
2402 pitch = gpe_resource->pitch;
2404 gen8_gpe_set_adv_surface_state(ss,
2405 gpe_surface->v_direction,
2406 gpe_surface->cacheability_control,
2407 MFX_SURFACE_PLANAR_420_8,
2409 width, height, pitch,
2410 gpe_resource->bo->offset64,
2411 gpe_resource->y_cb_offset);
2413 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
2414 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
2416 surface_state_offset + offsetof(struct gen8_surface_state2, ss6),
2419 struct gen8_surface_state *ss = (struct gen8_surface_state *)(buf + surface_state_offset);
2420 unsigned int format;
2422 assert(gpe_surface->is_buffer);
2424 if (gpe_surface->is_raw_buffer) {
2425 format = I965_SURFACEFORMAT_RAW;
2428 format = I965_SURFACEFORMAT_R32_UINT;
2429 pitch = sizeof(unsigned int);
2432 gen8_gpe_set_buffer2_surface_state(ss,
2433 gpe_surface->cacheability_control,
2437 gpe_resource->bo->offset64 + gpe_surface->offset);
2439 dri_bo_emit_reloc(gpe_context->surface_state_binding_table.bo,
2440 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
2441 gpe_surface->offset,
2442 surface_state_offset + offsetof(struct gen8_surface_state, ss8),
2446 dri_bo_unmap(gpe_context->surface_state_binding_table.bo);
2450 gen8_gpe_mi_conditional_batch_buffer_end(VADriverContextP ctx,
2451 struct intel_batchbuffer *batch,
2452 struct gpe_mi_conditional_batch_buffer_end_parameter *param)
2454 __OUT_BATCH(batch, (MI_CONDITIONAL_BATCH_BUFFER_END |
2456 (4 - 2))); /* Always use PPGTT */
2457 __OUT_BATCH(batch, param->compare_data);
2458 __OUT_RELOC64(batch,
2460 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, 0,
2466 gen8_gpe_pipe_control(VADriverContextP ctx,
2467 struct intel_batchbuffer *batch,
2468 struct gpe_pipe_control_parameter *param)
2470 int render_target_cache_flush_enable = CMD_PIPE_CONTROL_WC_FLUSH;
2471 int dc_flush_enable = 0;
2472 int state_cache_invalidation_enable = 0;
2473 int constant_cache_invalidation_enable = 0;
2474 int vf_cache_invalidation_enable = 0;
2475 int instruction_cache_invalidation_enable = 0;
2476 int post_sync_operation = CMD_PIPE_CONTROL_NOWRITE;
2477 int use_global_gtt = CMD_PIPE_CONTROL_GLOBAL_GTT_GEN8;
2478 int cs_stall_enable = !param->disable_cs_stall;
2480 switch (param->flush_mode) {
2481 case PIPE_CONTROL_FLUSH_WRITE_CACHE:
2482 render_target_cache_flush_enable = CMD_PIPE_CONTROL_WC_FLUSH;
2483 dc_flush_enable = CMD_PIPE_CONTROL_DC_FLUSH;
2486 case PIPE_CONTROL_FLUSH_READ_CACHE:
2487 render_target_cache_flush_enable = 0;
2488 state_cache_invalidation_enable = CMD_PIPE_CONTROL_SC_INVALIDATION_GEN8;
2489 constant_cache_invalidation_enable = CMD_PIPE_CONTROL_CC_INVALIDATION_GEN8;
2490 vf_cache_invalidation_enable = CMD_PIPE_CONTROL_VFC_INVALIDATION_GEN8;
2491 instruction_cache_invalidation_enable = CMD_PIPE_CONTROL_IS_FLUSH;
2494 case PIPE_CONTROL_FLUSH_NONE:
2496 render_target_cache_flush_enable = 0;
2501 post_sync_operation = CMD_PIPE_CONTROL_WRITE_QWORD;
2502 use_global_gtt = CMD_PIPE_CONTROL_LOCAL_PGTT_GEN8;
2504 post_sync_operation = CMD_PIPE_CONTROL_NOWRITE;
2505 render_target_cache_flush_enable = CMD_PIPE_CONTROL_WC_FLUSH;
2506 state_cache_invalidation_enable = CMD_PIPE_CONTROL_SC_INVALIDATION_GEN8;
2507 constant_cache_invalidation_enable = CMD_PIPE_CONTROL_CC_INVALIDATION_GEN8;
2508 vf_cache_invalidation_enable = CMD_PIPE_CONTROL_VFC_INVALIDATION_GEN8;
2509 instruction_cache_invalidation_enable = CMD_PIPE_CONTROL_IS_FLUSH;
2512 __OUT_BATCH(batch, CMD_PIPE_CONTROL | (6 - 2));
2513 __OUT_BATCH(batch, (render_target_cache_flush_enable |
2515 state_cache_invalidation_enable |
2516 constant_cache_invalidation_enable |
2517 vf_cache_invalidation_enable |
2518 instruction_cache_invalidation_enable |
2519 post_sync_operation |
2522 CMD_PIPE_CONTROL_FLUSH_ENABLE));
2525 __OUT_RELOC64(batch,
2527 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_RENDER,
2530 __OUT_BATCH(batch, 0);
2531 __OUT_BATCH(batch, 0);
2534 __OUT_BATCH(batch, param->dw0);
2535 __OUT_BATCH(batch, param->dw1);
2539 i965_init_media_object_walker_parameter(struct gpe_encoder_kernel_walker_parameter *kernel_walker_param,
2540 struct gpe_media_object_walker_parameter *walker_param)
2542 memset(walker_param, 0, sizeof(*walker_param));
2544 walker_param->use_scoreboard = kernel_walker_param->use_scoreboard;
2546 walker_param->block_resolution.x = kernel_walker_param->resolution_x;
2547 walker_param->block_resolution.y = kernel_walker_param->resolution_y;
2549 walker_param->global_resolution.x = kernel_walker_param->resolution_x;
2550 walker_param->global_resolution.y = kernel_walker_param->resolution_y;
2552 walker_param->global_outer_loop_stride.x = kernel_walker_param->resolution_x;
2553 walker_param->global_outer_loop_stride.y = 0;
2555 walker_param->global_inner_loop_unit.x = 0;
2556 walker_param->global_inner_loop_unit.y = kernel_walker_param->resolution_y;
2558 walker_param->local_loop_exec_count = 0xFFFF; //MAX VALUE
2559 walker_param->global_loop_exec_count = 0xFFFF; //MAX VALUE
2561 if (kernel_walker_param->no_dependency) {
2562 walker_param->scoreboard_mask = 0;
2563 // Raster scan walking pattern
2564 walker_param->local_outer_loop_stride.x = 0;
2565 walker_param->local_outer_loop_stride.y = 1;
2566 walker_param->local_inner_loop_unit.x = 1;
2567 walker_param->local_inner_loop_unit.y = 0;
2568 walker_param->local_end.x = kernel_walker_param->resolution_x - 1;
2569 walker_param->local_end.y = 0;
2570 } else if (kernel_walker_param->use_vertical_raster_scan) {
2571 walker_param->scoreboard_mask = 0x1;
2572 walker_param->use_scoreboard = 0;
2573 // Raster scan walking pattern
2574 walker_param->local_outer_loop_stride.x = 1;
2575 walker_param->local_outer_loop_stride.y = 0;
2576 walker_param->local_inner_loop_unit.x = 0;
2577 walker_param->local_inner_loop_unit.y = 1;
2578 walker_param->local_end.x = 0;
2579 walker_param->local_end.y = kernel_walker_param->resolution_y - 1;
2581 walker_param->local_end.x = 0;
2582 walker_param->local_end.y = 0;
2584 if (kernel_walker_param->walker_degree == WALKER_45Z_DEGREE) {
2586 walker_param->scoreboard_mask = 0x0F;
2588 walker_param->global_loop_exec_count = 0x3FF;
2589 walker_param->local_loop_exec_count = 0x3FF;
2591 walker_param->global_resolution.x = (unsigned int)(kernel_walker_param->resolution_x / 2.f) + 1;
2592 walker_param->global_resolution.y = 2 * kernel_walker_param->resolution_y;
2594 walker_param->global_start.x = 0;
2595 walker_param->global_start.y = 0;
2597 walker_param->global_outer_loop_stride.x = walker_param->global_resolution.x;
2598 walker_param->global_outer_loop_stride.y = 0;
2600 walker_param->global_inner_loop_unit.x = 0;
2601 walker_param->global_inner_loop_unit.y = walker_param->global_resolution.y;
2603 walker_param->block_resolution.x = walker_param->global_resolution.x;
2604 walker_param->block_resolution.y = walker_param->global_resolution.y;
2606 walker_param->local_start.x = 0;
2607 walker_param->local_start.y = 0;
2609 walker_param->local_outer_loop_stride.x = 1;
2610 walker_param->local_outer_loop_stride.y = 0;
2612 walker_param->local_inner_loop_unit.x = -1;
2613 walker_param->local_inner_loop_unit.y = 4;
2615 walker_param->middle_loop_extra_steps = 3;
2616 walker_param->mid_loop_unit_x = 0;
2617 walker_param->mid_loop_unit_y = 1;
2618 } else if (kernel_walker_param->walker_degree == WALKER_45_DEGREE) {
2620 walker_param->scoreboard_mask = 0x03;
2621 // 45 order in local loop
2622 walker_param->local_outer_loop_stride.x = 1;
2623 walker_param->local_outer_loop_stride.y = 0;
2624 walker_param->local_inner_loop_unit.x = -1;
2625 walker_param->local_inner_loop_unit.y = 1;
2626 } else if (kernel_walker_param->walker_degree == WALKER_26Z_DEGREE) {
2628 walker_param->scoreboard_mask = 0x7f;
2630 // z order in local loop
2631 walker_param->local_outer_loop_stride.x = 0;
2632 walker_param->local_outer_loop_stride.y = 1;
2633 walker_param->local_inner_loop_unit.x = 1;
2634 walker_param->local_inner_loop_unit.y = 0;
2636 walker_param->block_resolution.x = 2;
2637 walker_param->block_resolution.y = 2;
2639 walker_param->global_outer_loop_stride.x = 2;
2640 walker_param->global_outer_loop_stride.y = 0;
2642 walker_param->global_inner_loop_unit.x = 0xFFF - 4 + 1;
2643 walker_param->global_inner_loop_unit.y = 2;
2647 walker_param->scoreboard_mask = 0x0F;
2648 walker_param->local_outer_loop_stride.x = 1;
2649 walker_param->local_outer_loop_stride.y = 0;
2650 walker_param->local_inner_loop_unit.x = -2;
2651 walker_param->local_inner_loop_unit.y = 1;
2657 gen9_add_2d_gpe_surface(VADriverContextP ctx,
2658 struct i965_gpe_context *gpe_context,
2659 struct object_surface *obj_surface,
2661 int is_media_block_rw,
2662 unsigned int format,
2665 struct i965_driver_data *i965 = i965_driver_data(ctx);
2666 struct i965_gpe_resource gpe_resource;
2667 struct i965_gpe_surface gpe_surface;
2669 memset(&gpe_surface, 0, sizeof(gpe_surface));
2671 i965_object_surface_to_2d_gpe_resource(&gpe_resource, obj_surface);
2672 gpe_surface.gpe_resource = &gpe_resource;
2673 gpe_surface.is_2d_surface = 1;
2674 gpe_surface.is_uv_surface = !!is_uv_surface;
2675 gpe_surface.is_media_block_rw = !!is_media_block_rw;
2677 gpe_surface.cacheability_control = i965->intel.mocs_state;
2678 gpe_surface.format = format;
2680 gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
2681 i965_free_gpe_resource(&gpe_resource);
2685 gen9_add_adv_gpe_surface(VADriverContextP ctx,
2686 struct i965_gpe_context *gpe_context,
2687 struct object_surface *obj_surface,
2690 struct i965_driver_data *i965 = i965_driver_data(ctx);
2691 struct i965_gpe_resource gpe_resource;
2692 struct i965_gpe_surface gpe_surface;
2694 memset(&gpe_surface, 0, sizeof(gpe_surface));
2696 i965_object_surface_to_2d_gpe_resource(&gpe_resource, obj_surface);
2697 gpe_surface.gpe_resource = &gpe_resource;
2698 gpe_surface.is_adv_surface = 1;
2699 gpe_surface.cacheability_control = i965->intel.mocs_state;
2700 gpe_surface.v_direction = 2;
2702 gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
2703 i965_free_gpe_resource(&gpe_resource);
2707 gen9_add_buffer_gpe_surface(VADriverContextP ctx,
2708 struct i965_gpe_context *gpe_context,
2709 struct i965_gpe_resource *gpe_buffer,
2712 unsigned int offset,
2715 struct i965_driver_data *i965 = i965_driver_data(ctx);
2716 struct i965_gpe_surface gpe_surface;
2718 memset(&gpe_surface, 0, sizeof(gpe_surface));
2720 gpe_surface.gpe_resource = gpe_buffer;
2721 gpe_surface.is_buffer = 1;
2722 gpe_surface.is_raw_buffer = !!is_raw_buffer;
2723 gpe_surface.cacheability_control = i965->intel.mocs_state;
2724 gpe_surface.size = size;
2725 gpe_surface.offset = offset;
2727 gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
2731 gen9_add_buffer_2d_gpe_surface(VADriverContextP ctx,
2732 struct i965_gpe_context *gpe_context,
2733 struct i965_gpe_resource *gpe_buffer,
2734 int is_media_block_rw,
2735 unsigned int format,
2738 struct i965_driver_data *i965 = i965_driver_data(ctx);
2739 struct i965_gpe_surface gpe_surface;
2741 memset(&gpe_surface, 0, sizeof(gpe_surface));
2743 gpe_surface.gpe_resource = gpe_buffer;
2744 gpe_surface.is_2d_surface = 1;
2745 gpe_surface.is_media_block_rw = !!is_media_block_rw;
2746 gpe_surface.cacheability_control = i965->intel.mocs_state;
2747 gpe_surface.format = format;
2749 gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
2753 gen9_add_dri_buffer_gpe_surface(VADriverContextP ctx,
2754 struct i965_gpe_context *gpe_context,
2758 unsigned int offset,
2761 struct i965_gpe_resource gpe_resource;
2763 i965_dri_object_to_buffer_gpe_resource(&gpe_resource, bo);
2764 gen9_add_buffer_gpe_surface(ctx,
2772 i965_free_gpe_resource(&gpe_resource);
2776 i965_gpe_table_init(VADriverContextP ctx)
2778 struct i965_driver_data *i965 = i965_driver_data(ctx);
2779 struct i965_gpe_table *gpe = &i965->gpe_table;
2781 if (IS_GEN8(i965->intel.device_info)) {
2782 gpe->context_init = gen8_gpe_context_init;
2783 gpe->context_destroy = gen8_gpe_context_destroy;
2784 gpe->context_add_surface = gen8_gpe_context_add_surface;
2785 gpe->reset_binding_table = gen8_gpe_reset_binding_table;
2786 gpe->load_kernels = gen8_gpe_load_kernels;
2787 gpe->setup_interface_data = gen8_gpe_setup_interface_data;
2788 gpe->set_dynamic_buffer = gen8_gpe_context_set_dynamic_buffer;
2789 gpe->media_object = gen8_gpe_media_object;
2790 gpe->media_object_walker = gen8_gpe_media_object_walker;
2791 gpe->media_state_flush = gen8_gpe_media_state_flush;
2792 gpe->pipe_control = gen8_gpe_pipe_control;
2793 gpe->pipeline_end = gen8_gpe_pipeline_end;
2794 gpe->pipeline_setup = gen8_gpe_pipeline_setup;
2795 gpe->mi_conditional_batch_buffer_end = gen8_gpe_mi_conditional_batch_buffer_end;
2796 gpe->mi_batch_buffer_start = gen8_gpe_mi_batch_buffer_start;
2797 gpe->mi_load_register_reg = gen8_gpe_mi_load_register_reg;
2798 gpe->mi_load_register_imm = gen8_gpe_mi_load_register_imm;
2799 gpe->mi_load_register_mem = gen8_gpe_mi_load_register_mem;
2800 gpe->mi_store_register_mem = gen8_gpe_mi_store_register_mem;
2801 gpe->mi_store_data_imm = gen8_gpe_mi_store_data_imm;
2802 gpe->mi_flush_dw = gen8_gpe_mi_flush_dw;
2803 } else if (IS_GEN9(i965->intel.device_info)) {
2804 gpe->context_init = gen8_gpe_context_init;
2805 gpe->context_destroy = gen8_gpe_context_destroy;
2806 gpe->context_add_surface = gen9_gpe_context_add_surface;
2807 gpe->reset_binding_table = gen9_gpe_reset_binding_table;
2808 gpe->load_kernels = gen8_gpe_load_kernels;
2809 gpe->setup_interface_data = gen8_gpe_setup_interface_data;
2810 gpe->set_dynamic_buffer = gen8_gpe_context_set_dynamic_buffer;
2811 gpe->media_object = gen8_gpe_media_object;
2812 gpe->media_object_walker = gen8_gpe_media_object_walker;
2813 gpe->media_state_flush = gen8_gpe_media_state_flush;
2814 gpe->pipe_control = gen8_gpe_pipe_control;
2815 gpe->pipeline_end = gen9_gpe_pipeline_end;
2816 gpe->pipeline_setup = gen9_gpe_pipeline_setup;
2817 gpe->mi_conditional_batch_buffer_end = gen9_gpe_mi_conditional_batch_buffer_end;
2818 gpe->mi_batch_buffer_start = gen8_gpe_mi_batch_buffer_start;
2819 gpe->mi_load_register_reg = gen8_gpe_mi_load_register_reg;
2820 gpe->mi_load_register_imm = gen8_gpe_mi_load_register_imm;
2821 gpe->mi_load_register_mem = gen8_gpe_mi_load_register_mem;
2822 gpe->mi_store_register_mem = gen8_gpe_mi_store_register_mem;
2823 gpe->mi_store_data_imm = gen8_gpe_mi_store_data_imm;
2824 gpe->mi_flush_dw = gen8_gpe_mi_flush_dw;
2826 // TODO: for other platforms
2833 i965_gpe_table_terminate(VADriverContextP ctx)