2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Xiang Haihao <haihao.xiang@intel.com>
27 #ifndef _I965_GPE_UTILS_H_
28 #define _I965_GPE_UTILS_H_
31 #include <intel_bufmgr.h>
33 #include "i965_defines.h"
34 #include "i965_structs.h"
36 #define MAX_GPE_KERNELS 32
38 struct i965_buffer_surface {
40 unsigned int num_blocks;
41 unsigned int size_block;
46 I965_GPE_RESOURCE_BUFFER = 0,
50 struct i965_gpe_resource {
64 struct gpe_dynamic_state_parameter {
67 unsigned int curbe_offset;
68 unsigned int idrt_offset;
69 unsigned int sampler_offset;
72 #define PIPE_CONTROL_FLUSH_NONE 0
73 #define PIPE_CONTROL_FLUSH_WRITE_CACHE 1
74 #define PIPE_CONTROL_FLUSH_READ_CACHE 2
76 struct gpe_pipe_control_parameter {
79 unsigned int flush_mode;
80 unsigned int disable_cs_stall;
85 struct i965_gpe_context {
88 unsigned int length; /* in bytes */
89 unsigned int max_entries;
90 unsigned int binding_table_offset;
91 unsigned int surface_state_offset;
92 } surface_state_binding_table;
96 unsigned int max_entries;
97 unsigned int entry_size; /* in bytes */
103 unsigned int length; /* in bytes */
109 unsigned int max_entries;
110 unsigned int entry_size; /* in bytes */
115 unsigned int gpgpu_mode : 1;
116 unsigned int pad0 : 7;
117 unsigned int max_num_threads : 16;
118 unsigned int num_urb_entries : 8;
119 unsigned int urb_entry_size : 16;
120 unsigned int curbe_allocation_size : 16;
123 /* vfe_desc5/6/7 is used to determine whether the HW scoreboard is used.
124 * If scoreboard is not used, don't touch them
129 unsigned int mask: 8;
130 unsigned int pad: 22;
131 unsigned int type: 1;
132 unsigned int enable: 1;
164 unsigned int num_kernels;
165 struct i965_kernel kernels[MAX_GPE_KERNELS];
170 unsigned int end_offset;
180 unsigned int end_offset;
184 struct gpe_mi_flush_dw_parameter {
187 unsigned int video_pipeline_cache_invalidate;
192 struct gpe_mi_store_data_imm_parameter {
194 unsigned int is_qword;
200 struct gpe_mi_store_register_mem_parameter {
203 unsigned int mmio_offset;
206 struct gpe_mi_load_register_mem_parameter {
209 unsigned int mmio_offset;
212 struct gpe_mi_load_register_imm_parameter {
214 unsigned int mmio_offset;
217 struct gpe_mi_load_register_reg_parameter {
218 unsigned int src_mmio_offset;
219 unsigned int dst_mmio_offset;
222 struct gpe_mi_math_parameter {
223 unsigned int num_instructions;
224 unsigned int *instruction_list;
227 struct gpe_mi_conditional_batch_buffer_end_parameter {
230 unsigned int compare_mask_mode_disabled;
231 unsigned int compare_data;
234 struct gpe_mi_batch_buffer_start_parameter {
237 unsigned int is_second_level;
238 unsigned int use_global_gtt;
241 struct gpe_mi_copy_mem_parameter {
243 unsigned int src_offset;
245 unsigned int dst_offset;
248 void i965_gpe_context_destroy(struct i965_gpe_context *gpe_context);
249 void i965_gpe_context_init(VADriverContextP ctx,
250 struct i965_gpe_context *gpe_context);
251 void i965_gpe_load_kernels(VADriverContextP ctx,
252 struct i965_gpe_context *gpe_context,
253 struct i965_kernel *kernel_list,
254 unsigned int num_kernels);
255 void gen6_gpe_pipeline_setup(VADriverContextP ctx,
256 struct i965_gpe_context *gpe_context,
257 struct intel_batchbuffer *batch);
258 void i965_gpe_surface2_setup(VADriverContextP ctx,
259 struct i965_gpe_context *gpe_context,
260 struct object_surface *obj_surface,
261 unsigned long binding_table_offset,
262 unsigned long surface_state_offset);
263 void i965_gpe_media_rw_surface_setup(VADriverContextP ctx,
264 struct i965_gpe_context *gpe_context,
265 struct object_surface *obj_surface,
266 unsigned long binding_table_offset,
267 unsigned long surface_state_offset,
269 void i965_gpe_buffer_suface_setup(VADriverContextP ctx,
270 struct i965_gpe_context *gpe_context,
271 struct i965_buffer_surface *buffer_surface,
272 unsigned long binding_table_offset,
273 unsigned long surface_state_offset);
274 void gen7_gpe_surface2_setup(VADriverContextP ctx,
275 struct i965_gpe_context *gpe_context,
276 struct object_surface *obj_surface,
277 unsigned long binding_table_offset,
278 unsigned long surface_state_offset);
279 void gen7_gpe_media_rw_surface_setup(VADriverContextP ctx,
280 struct i965_gpe_context *gpe_context,
281 struct object_surface *obj_surface,
282 unsigned long binding_table_offset,
283 unsigned long surface_state_offset,
285 void gen7_gpe_buffer_suface_setup(VADriverContextP ctx,
286 struct i965_gpe_context *gpe_context,
287 struct i965_buffer_surface *buffer_surface,
288 unsigned long binding_table_offset,
289 unsigned long surface_state_offset);
290 void gen75_gpe_media_chroma_surface_setup(VADriverContextP ctx,
291 struct i965_gpe_context *gpe_context,
292 struct object_surface *obj_surface,
293 unsigned long binding_table_offset,
294 unsigned long surface_state_offset,
297 extern void gen8_gpe_surface2_setup(VADriverContextP ctx,
298 struct i965_gpe_context *gpe_context,
299 struct object_surface *obj_surface,
300 unsigned long binding_table_offset,
301 unsigned long surface_state_offset);
302 extern void gen8_gpe_media_rw_surface_setup(VADriverContextP ctx,
303 struct i965_gpe_context *gpe_context,
304 struct object_surface *obj_surface,
305 unsigned long binding_table_offset,
306 unsigned long surface_state_offset,
308 extern void gen8_gpe_buffer_suface_setup(VADriverContextP ctx,
309 struct i965_gpe_context *gpe_context,
310 struct i965_buffer_surface *buffer_surface,
311 unsigned long binding_table_offset,
312 unsigned long surface_state_offset);
313 extern void gen8_gpe_media_chroma_surface_setup(VADriverContextP ctx,
314 struct i965_gpe_context *gpe_context,
315 struct object_surface *obj_surface,
316 unsigned long binding_table_offset,
317 unsigned long surface_state_offset,
320 void gen8_gpe_pipeline_setup(VADriverContextP ctx,
321 struct i965_gpe_context *gpe_context,
322 struct intel_batchbuffer *batch);
324 gen8_gpe_context_set_dynamic_buffer(VADriverContextP ctx,
325 struct i965_gpe_context *gpe_context,
326 struct gpe_dynamic_state_parameter *ds);
329 void gen8_gpe_context_destroy(struct i965_gpe_context *gpe_context);
330 void gen8_gpe_context_init(VADriverContextP ctx,
331 struct i965_gpe_context *gpe_context);
333 void gen8_gpe_load_kernels(VADriverContextP ctx,
334 struct i965_gpe_context *gpe_context,
335 struct i965_kernel *kernel_list,
336 unsigned int num_kernels);
338 void gen9_gpe_pipeline_setup(VADriverContextP ctx,
339 struct i965_gpe_context *gpe_context,
340 struct intel_batchbuffer *batch);
342 void gen9_gpe_pipeline_end(VADriverContextP ctx,
343 struct i965_gpe_context *gpe_context,
344 struct intel_batchbuffer *batch);
346 Bool i965_allocate_gpe_resource(dri_bufmgr *bufmgr,
347 struct i965_gpe_resource *res,
351 void i965_object_surface_to_2d_gpe_resource(struct i965_gpe_resource *res,
352 struct object_surface *obj_surface);
354 void i965_object_surface_to_2d_gpe_resource_with_align(struct i965_gpe_resource *res,
355 struct object_surface *obj_surface,
356 unsigned int alignment);
358 void i965_dri_object_to_buffer_gpe_resource(struct i965_gpe_resource *res,
361 void i965_dri_object_to_2d_gpe_resource(struct i965_gpe_resource *res,
367 void i965_zero_gpe_resource(struct i965_gpe_resource *res);
369 void i965_free_gpe_resource(struct i965_gpe_resource *res);
371 void *i965_map_gpe_resource(struct i965_gpe_resource *res);
373 void i965_unmap_gpe_resource(struct i965_gpe_resource *res);
375 void gen8_gpe_mi_flush_dw(VADriverContextP ctx,
376 struct intel_batchbuffer *batch,
377 struct gpe_mi_flush_dw_parameter *params);
379 void gen8_gpe_mi_store_data_imm(VADriverContextP ctx,
380 struct intel_batchbuffer *batch,
381 struct gpe_mi_store_data_imm_parameter *params);
383 void gen8_gpe_mi_store_register_mem(VADriverContextP ctx,
384 struct intel_batchbuffer *batch,
385 struct gpe_mi_store_register_mem_parameter *params);
387 void gen8_gpe_mi_load_register_mem(VADriverContextP ctx,
388 struct intel_batchbuffer *batch,
389 struct gpe_mi_load_register_mem_parameter *params);
391 void gen8_gpe_mi_load_register_imm(VADriverContextP ctx,
392 struct intel_batchbuffer *batch,
393 struct gpe_mi_load_register_imm_parameter *params);
395 void gen8_gpe_mi_load_register_reg(VADriverContextP ctx,
396 struct intel_batchbuffer *batch,
397 struct gpe_mi_load_register_reg_parameter *params);
399 void gen9_gpe_mi_math(VADriverContextP ctx,
400 struct intel_batchbuffer *batch,
401 struct gpe_mi_math_parameter *params);
403 void gen9_gpe_mi_conditional_batch_buffer_end(VADriverContextP ctx,
404 struct intel_batchbuffer *batch,
405 struct gpe_mi_conditional_batch_buffer_end_parameter *params);
407 void gen8_gpe_mi_batch_buffer_start(VADriverContextP ctx,
408 struct intel_batchbuffer *batch,
409 struct gpe_mi_batch_buffer_start_parameter *params);
412 struct gpe_media_object_inline_data {
417 unsigned int reserved: 16;
423 struct gpe_media_object_parameter {
424 unsigned int use_scoreboard;
425 unsigned int scoreboard_x;
426 unsigned int scoreboard_y;
427 unsigned int scoreboard_mask;
428 unsigned int interface_offset;
430 unsigned int inline_size;
433 struct i965_gpe_surface {
434 unsigned int is_buffer: 1;
435 unsigned int is_2d_surface: 1;
436 unsigned int is_adv_surface: 1;
437 unsigned int is_uv_surface: 1;
438 unsigned int is_media_block_rw: 1;
439 unsigned int is_raw_buffer: 1;
440 unsigned int is_16bpp : 1;
441 /* use the override_offset for 2d_surface */
442 unsigned int is_override_offset : 1;
444 unsigned int vert_line_stride_offset;
445 unsigned int vert_line_stride;
446 unsigned int cacheability_control;
447 unsigned int format; // 2d surface only
448 unsigned int v_direction; // adv surface only
449 unsigned int size; // buffer only
452 struct i965_gpe_resource *gpe_resource;
456 gen9_gpe_reset_binding_table(VADriverContextP ctx,
457 struct i965_gpe_context *gpe_context);
459 void *i965_gpe_context_map_curbe(struct i965_gpe_context *gpe_context);
462 void i965_gpe_context_unmap_curbe(struct i965_gpe_context *gpe_context);
465 void gen8_gpe_setup_interface_data(VADriverContextP ctx,
466 struct i965_gpe_context *gpe_context);
468 gen9_gpe_context_add_surface(struct i965_gpe_context *gpe_context,
469 struct i965_gpe_surface *gpe_surface,
473 i965_gpe_allocate_2d_resource(dri_bufmgr *bufmgr,
474 struct i965_gpe_resource *res,
480 struct gpe_walker_xy {
490 struct gpe_media_object_walker_parameter {
492 unsigned int inline_size;
493 unsigned int interface_offset;
494 unsigned int use_scoreboard;
495 unsigned int scoreboard_mask;
496 unsigned int group_id_loop_select;
497 unsigned int color_count_minus1;
498 unsigned int mid_loop_unit_x;
499 unsigned int mid_loop_unit_y;
500 unsigned int middle_loop_extra_steps;
501 unsigned int local_loop_exec_count;
502 unsigned int global_loop_exec_count;
503 struct gpe_walker_xy block_resolution;
504 struct gpe_walker_xy local_start;
505 struct gpe_walker_xy local_end;
506 struct gpe_walker_xy local_outer_loop_stride;
507 struct gpe_walker_xy local_inner_loop_unit;
508 struct gpe_walker_xy global_resolution;
509 struct gpe_walker_xy global_start;
510 struct gpe_walker_xy global_outer_loop_stride;
511 struct gpe_walker_xy global_inner_loop_unit;
515 WALKER_NO_DEGREE = 0,
521 struct gpe_encoder_kernel_walker_parameter {
522 unsigned int walker_degree;
523 unsigned int use_scoreboard;
524 unsigned int scoreboard_mask;
525 unsigned int no_dependency;
526 unsigned int resolution_x;
527 unsigned int resolution_y;
528 unsigned int use_vertical_raster_scan;
532 gen8_gpe_media_object(VADriverContextP ctx,
533 struct i965_gpe_context *gpe_context,
534 struct intel_batchbuffer *batch,
535 struct gpe_media_object_parameter *param);
538 gen8_gpe_media_state_flush(VADriverContextP ctx,
539 struct i965_gpe_context *gpe_context,
540 struct intel_batchbuffer *batch);
543 gen8_gpe_media_object_walker(VADriverContextP ctx,
544 struct i965_gpe_context *gpe_context,
545 struct intel_batchbuffer *batch,
546 struct gpe_media_object_walker_parameter *param);
549 struct intel_vpp_kernel_walker_parameter {
550 unsigned int use_scoreboard;
551 unsigned int scoreboard_mask;
552 unsigned int no_dependency;
553 unsigned int resolution_x;
554 unsigned int resolution_y;
558 intel_vpp_init_media_object_walker_parameter(struct intel_vpp_kernel_walker_parameter *kernel_walker_param,
559 struct gpe_media_object_walker_parameter *walker_param);
561 gen8_gpe_reset_binding_table(VADriverContextP ctx, struct i965_gpe_context *gpe_context);
564 gen8_gpe_context_add_surface(struct i965_gpe_context *gpe_context,
565 struct i965_gpe_surface *gpe_surface,
569 gen8_gpe_mi_conditional_batch_buffer_end(VADriverContextP ctx,
570 struct intel_batchbuffer *batch,
571 struct gpe_mi_conditional_batch_buffer_end_parameter *param);
574 gen8_gpe_pipe_control(VADriverContextP ctx,
575 struct intel_batchbuffer *batch,
576 struct gpe_pipe_control_parameter *param);
579 i965_init_media_object_walker_parameter(struct gpe_encoder_kernel_walker_parameter *kernel_walker_param,
580 struct gpe_media_object_walker_parameter *walker_param);
583 i965_add_2d_gpe_surface(VADriverContextP ctx,
584 struct i965_gpe_context *gpe_context,
585 struct object_surface *obj_surface,
587 int is_media_block_rw,
591 i965_add_adv_gpe_surface(VADriverContextP ctx,
592 struct i965_gpe_context *gpe_context,
593 struct object_surface *obj_surface,
596 i965_add_buffer_gpe_surface(VADriverContextP ctx,
597 struct i965_gpe_context *gpe_context,
598 struct i965_gpe_resource *gpe_buffer,
604 i965_add_buffer_2d_gpe_surface(VADriverContextP ctx,
605 struct i965_gpe_context *gpe_context,
606 struct i965_gpe_resource *gpe_buffer,
607 int is_media_block_rw,
611 gen9_add_dri_buffer_gpe_surface(VADriverContextP ctx,
612 struct i965_gpe_context *gpe_context,
619 struct i965_gpe_table {
620 void (*context_init)(VADriverContextP ctx,
621 struct i965_gpe_context *gpe_context);
623 void (*context_destroy)(struct i965_gpe_context *gpe_context);
625 void (*context_add_surface)(struct i965_gpe_context *gpe_context,
626 struct i965_gpe_surface *gpe_surface,
629 void (*reset_binding_table)(VADriverContextP ctx, struct i965_gpe_context *gpe_context);
631 void (*load_kernels)(VADriverContextP ctx,
632 struct i965_gpe_context *gpe_context,
633 struct i965_kernel *kernel_list,
634 unsigned int num_kernels);
636 void (*setup_interface_data)(VADriverContextP ctx, struct i965_gpe_context *gpe_context);
638 void (*set_dynamic_buffer)(VADriverContextP ctx,
639 struct i965_gpe_context *gpe_context,
640 struct gpe_dynamic_state_parameter *ds);
642 void (*media_object)(VADriverContextP ctx,
643 struct i965_gpe_context *gpe_context,
644 struct intel_batchbuffer *batch,
645 struct gpe_media_object_parameter *param);
647 void (*media_object_walker)(VADriverContextP ctx,
648 struct i965_gpe_context *gpe_context,
649 struct intel_batchbuffer *batch,
650 struct gpe_media_object_walker_parameter *param);
652 void (*media_state_flush)(VADriverContextP ctx,
653 struct i965_gpe_context *gpe_context,
654 struct intel_batchbuffer *batch);
657 void (*pipe_control)(VADriverContextP ctx,
658 struct intel_batchbuffer *batch,
659 struct gpe_pipe_control_parameter *param);
661 void (*pipeline_end)(VADriverContextP ctx,
662 struct i965_gpe_context *gpe_context,
663 struct intel_batchbuffer *batch); // only available on gen9+
665 void (*pipeline_setup)(VADriverContextP ctx,
666 struct i965_gpe_context *gpe_context,
667 struct intel_batchbuffer *batch);
669 void (*mi_conditional_batch_buffer_end)(VADriverContextP ctx,
670 struct intel_batchbuffer *batch,
671 struct gpe_mi_conditional_batch_buffer_end_parameter *param);
673 void (*mi_batch_buffer_start)(VADriverContextP ctx,
674 struct intel_batchbuffer *batch,
675 struct gpe_mi_batch_buffer_start_parameter *params);
677 void (*mi_load_register_reg)(VADriverContextP ctx,
678 struct intel_batchbuffer *batch,
679 struct gpe_mi_load_register_reg_parameter *params);
681 void (*mi_load_register_imm)(VADriverContextP ctx,
682 struct intel_batchbuffer *batch,
683 struct gpe_mi_load_register_imm_parameter *params);
685 void (*mi_load_register_mem)(VADriverContextP ctx,
686 struct intel_batchbuffer *batch,
687 struct gpe_mi_load_register_mem_parameter *params);
690 void (*mi_store_register_mem)(VADriverContextP ctx,
691 struct intel_batchbuffer *batch,
692 struct gpe_mi_store_register_mem_parameter *params);
694 void (*mi_store_data_imm)(VADriverContextP ctx,
695 struct intel_batchbuffer *batch,
696 struct gpe_mi_store_data_imm_parameter *params);
698 void (*mi_flush_dw)(VADriverContextP ctx,
699 struct intel_batchbuffer *batch,
700 struct gpe_mi_flush_dw_parameter *params);
702 void (*mi_copy_mem_mem)(VADriverContextP ctx,
703 struct intel_batchbuffer *batch,
704 struct gpe_mi_copy_mem_parameter *params);
708 i965_gpe_table_init(VADriverContextP ctx);
711 i965_gpe_table_terminate(VADriverContextP ctx);
713 #endif /* _I965_GPE_UTILS_H_ */