#include "vp9_probs.h"
#include "gen9_vp9_const_def.h"
+#define MAX_VP9_ENCODER_FRAMERATE 60
#define MAX_VP9_ENCODER_SURFACES 64
#define MAX_URB_SIZE 4096 /* In register */
#define BRC_KERNEL_AVBR 0x0040
#define BRC_KERNEL_CQL 0x0080
-#define DEFAULT_MOCS 0x02
#define VP9_PIC_STATE_BUFFER_SIZE 192
-typedef struct _intel_kernel_header_
-{
+typedef struct _intel_kernel_header_ {
uint32_t reserved : 6;
uint32_t kernel_start_pointer : 26;
} intel_kernel_header;
static uint32_t intel_convert_sign_mag(int val, int sign_bit_pos)
{
uint32_t ret_val = 0;
- if (val < 0)
- {
+ if (val < 0) {
val = -val;
ret_val = ((1 << (sign_bit_pos - 1)) | (val & ((1 << (sign_bit_pos - 1)) - 1)));
- }
- else
- {
+ } else {
ret_val = val & ((1 << (sign_bit_pos - 1)) - 1);
}
return ret_val;
pinvalid_entry = &(pkh_table->VP9BRC_Update) + 1;
next_krnoffset = binary_size;
- if ((operation == INTEL_VP9_ENC_SCALING4X) || (operation == INTEL_VP9_ENC_SCALING2X))
- {
+ if ((operation == INTEL_VP9_ENC_SCALING4X) || (operation == INTEL_VP9_ENC_SCALING2X)) {
pcurr_header = &pkh_table->PLY_DSCALE;
- }
- else if (operation == INTEL_VP9_ENC_ME)
- {
+ } else if (operation == INTEL_VP9_ENC_ME) {
pcurr_header = &pkh_table->VP9_ME_P;
- }
- else if (operation == INTEL_VP9_ENC_MBENC)
- {
+ } else if (operation == INTEL_VP9_ENC_MBENC) {
pcurr_header = &pkh_table->VP9_Enc_I_32x32;
- }
- else if (operation == INTEL_VP9_ENC_DYS)
- {
+ } else if (operation == INTEL_VP9_ENC_DYS) {
pcurr_header = &pkh_table->VP9_DYS;
- }
- else if (operation == INTEL_VP9_ENC_BRC)
- {
+ } else if (operation == INTEL_VP9_ENC_BRC) {
pcurr_header = &pkh_table->VP9BRC_Intra_Distortion;
- }
- else
- {
+ } else {
return false;
}
ret_kernel->bin = (const BIN_PTR *)(bin_start + (pcurr_header->kernel_start_pointer << 6));
pnext_header = (pcurr_header + 1);
- if (pnext_header < pinvalid_entry)
- {
+ if (pnext_header < pinvalid_entry) {
next_krnoffset = pnext_header->kernel_start_pointer << 6;
}
ret_kernel->size = next_krnoffset - (pcurr_header->kernel_start_pointer << 6);
* the expected, it is unnecessary to allocate it again
*/
if (vp9_surface->dys_frame_width == surface_param->frame_width &&
- vp9_surface->dys_frame_width == surface_param->frame_width)
+ vp9_surface->dys_frame_height == surface_param->frame_height)
return VA_STATUS_SUCCESS;
if (vp9_surface->dys_4x_surface_obj) {
i965_free_gpe_resource(&vme_context->res_brc_mmdk_pak_buffer);
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_brc_history_buffer,
- VP9_BRC_HISTORY_BUFFER_SIZE,
- "Brc History buffer");
+ &vme_context->res_brc_history_buffer,
+ VP9_BRC_HISTORY_BUFFER_SIZE,
+ "Brc History buffer");
if (!allocate_flag)
goto failed_allocation;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_brc_const_data_buffer,
- VP9_BRC_CONSTANTSURFACE_SIZE,
- "Brc Constant buffer");
+ &vme_context->res_brc_const_data_buffer,
+ VP9_BRC_CONSTANTSURFACE_SIZE,
+ "Brc Constant buffer");
if (!allocate_flag)
goto failed_allocation;
res_size = ALIGN(sizeof(vp9_mbenc_curbe_data), 64) + 128 +
- ALIGN(sizeof(struct gen8_interface_descriptor_data), 64) * NUM_VP9_MBENC;
+ ALIGN(sizeof(struct gen8_interface_descriptor_data), 64) * NUM_VP9_MBENC;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_brc_mbenc_curbe_write_buffer,
- res_size,
- "Brc Curbe write");
+ &vme_context->res_brc_mbenc_curbe_write_buffer,
+ res_size,
+ "Brc Curbe write");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_PIC_STATE_BUFFER_SIZE * 4;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_pic_state_brc_read_buffer,
- res_size,
- "Pic State Brc_read");
+ &vme_context->res_pic_state_brc_read_buffer,
+ res_size,
+ "Pic State Brc_read");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_PIC_STATE_BUFFER_SIZE * 4;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_pic_state_brc_write_hfw_read_buffer,
- res_size,
- "Pic State Brc_write Hfw_Read");
+ &vme_context->res_pic_state_brc_write_hfw_read_buffer,
+ res_size,
+ "Pic State Brc_write Hfw_Read");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_PIC_STATE_BUFFER_SIZE * 4;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_pic_state_hfw_write_buffer,
- res_size,
- "Pic State Hfw Write");
+ &vme_context->res_pic_state_hfw_write_buffer,
+ res_size,
+ "Pic State Hfw Write");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_SEGMENT_STATE_BUFFER_SIZE;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_seg_state_brc_read_buffer,
- res_size,
- "Segment state brc_read");
+ &vme_context->res_seg_state_brc_read_buffer,
+ res_size,
+ "Segment state brc_read");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_SEGMENT_STATE_BUFFER_SIZE;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_seg_state_brc_write_buffer,
- res_size,
- "Segment state brc_write");
+ &vme_context->res_seg_state_brc_write_buffer,
+ res_size,
+ "Segment state brc_write");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_BRC_BITSTREAM_SIZE_BUFFER_SIZE;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_brc_bitstream_size_buffer,
- res_size,
- "Brc bitstream buffer");
+ &vme_context->res_brc_bitstream_size_buffer,
+ res_size,
+ "Brc bitstream buffer");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_HFW_BRC_DATA_BUFFER_SIZE;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_brc_hfw_data_buffer,
- res_size,
- "mfw Brc data");
+ &vme_context->res_brc_hfw_data_buffer,
+ res_size,
+ "mfw Brc data");
if (!allocate_flag)
goto failed_allocation;
res_size = VP9_BRC_MMDK_PAK_BUFFER_SIZE;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_brc_mmdk_pak_buffer,
- res_size,
- "Brc mmdk_pak");
+ &vme_context->res_brc_mmdk_pak_buffer,
+ res_size,
+ "Brc mmdk_pak");
if (!allocate_flag)
goto failed_allocation;
}
i965_free_gpe_resource(&vme_context->res_hvd_line_buffer);
res_size = frame_width_in_sb * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_hvd_line_buffer,
- res_size,
- "VP9 hvd line line");
+ &vme_context->res_hvd_line_buffer,
+ res_size,
+ "VP9 hvd line line");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_hvd_tile_line_buffer);
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_hvd_tile_line_buffer,
- res_size,
- "VP9 hvd tile_line line");
+ &vme_context->res_hvd_tile_line_buffer,
+ res_size,
+ "VP9 hvd tile_line line");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_deblocking_filter_line_buffer);
res_size = frame_width_in_sb * 18 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_deblocking_filter_line_buffer,
- res_size,
- "VP9 deblocking filter line");
+ &vme_context->res_deblocking_filter_line_buffer,
+ res_size,
+ "VP9 deblocking filter line");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_deblocking_filter_tile_line_buffer);
res_size = frame_width_in_sb * 18 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_deblocking_filter_tile_line_buffer,
- res_size,
- "VP9 deblocking tile line");
+ &vme_context->res_deblocking_filter_tile_line_buffer,
+ res_size,
+ "VP9 deblocking tile line");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_deblocking_filter_tile_col_buffer);
res_size = frame_height_in_sb * 17 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_deblocking_filter_tile_col_buffer,
- res_size,
- "VP9 deblocking tile col");
+ &vme_context->res_deblocking_filter_tile_col_buffer,
+ res_size,
+ "VP9 deblocking tile col");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_metadata_line_buffer);
res_size = frame_width_in_sb * 5 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_metadata_line_buffer,
- res_size,
- "VP9 metadata line");
+ &vme_context->res_metadata_line_buffer,
+ res_size,
+ "VP9 metadata line");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_metadata_tile_line_buffer);
res_size = frame_width_in_sb * 5 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_metadata_tile_line_buffer,
- res_size,
- "VP9 metadata tile line");
+ &vme_context->res_metadata_tile_line_buffer,
+ res_size,
+ "VP9 metadata tile line");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_metadata_tile_col_buffer);
res_size = frame_height_in_sb * 5 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_metadata_tile_col_buffer,
- res_size,
- "VP9 metadata tile col");
+ &vme_context->res_metadata_tile_col_buffer,
+ res_size,
+ "VP9 metadata tile col");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_prob_buffer);
res_size = 2048;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_prob_buffer,
- res_size,
- "VP9 prob");
+ &vme_context->res_prob_buffer,
+ res_size,
+ "VP9 prob");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_segmentid_buffer);
res_size = frame_sb_num * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_segmentid_buffer,
- res_size,
- "VP9 segment id");
+ &vme_context->res_segmentid_buffer,
+ res_size,
+ "VP9 segment id");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_prob_delta_buffer);
res_size = 29 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_prob_delta_buffer,
- res_size,
- "VP9 prob delta");
+ &vme_context->res_prob_delta_buffer,
+ res_size,
+ "VP9 prob delta");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_prob_delta_buffer);
res_size = 29 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_prob_delta_buffer,
- res_size,
- "VP9 prob delta");
+ &vme_context->res_prob_delta_buffer,
+ res_size,
+ "VP9 prob delta");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_compressed_input_buffer);
res_size = 32 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_compressed_input_buffer,
- res_size,
- "VP9 compressed_input buffer");
+ &vme_context->res_compressed_input_buffer,
+ res_size,
+ "VP9 compressed_input buffer");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_prob_counter_buffer);
res_size = 193 * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_prob_counter_buffer,
- res_size,
- "VP9 prob counter");
+ &vme_context->res_prob_counter_buffer,
+ res_size,
+ "VP9 prob counter");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_tile_record_streamout_buffer);
res_size = frame_sb_num * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_tile_record_streamout_buffer,
- res_size,
- "VP9 tile record stream_out");
+ &vme_context->res_tile_record_streamout_buffer,
+ res_size,
+ "VP9 tile record stream_out");
if (!allocate_flag)
goto failed_allocation;
i965_free_gpe_resource(&vme_context->res_cu_stat_streamout_buffer);
res_size = frame_sb_num * 64;
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_cu_stat_streamout_buffer,
- res_size,
- "VP9 CU stat stream_out");
+ &vme_context->res_cu_stat_streamout_buffer,
+ res_size,
+ "VP9 CU stat stream_out");
if (!allocate_flag)
goto failed_allocation;
height = vp9_state->downscaled_height_4x_in_mb * 16;
i965_free_gpe_resource(&vme_context->s4x_memv_data_buffer);
allocate_flag = i965_gpe_allocate_2d_resource(i965->intel.bufmgr,
- &vme_context->s4x_memv_data_buffer,
- width, height,
- ALIGN(width, 64),
- "VP9 4x MEMV data");
+ &vme_context->s4x_memv_data_buffer,
+ width, height,
+ ALIGN(width, 64),
+ "VP9 4x MEMV data");
if (!allocate_flag)
goto failed_allocation;
height = vp9_state->downscaled_height_4x_in_mb * 16;
i965_free_gpe_resource(&vme_context->s4x_memv_distortion_buffer);
allocate_flag = i965_gpe_allocate_2d_resource(i965->intel.bufmgr,
- &vme_context->s4x_memv_distortion_buffer,
- width, height,
- ALIGN(width, 64),
- "VP9 4x MEMV distorion");
+ &vme_context->s4x_memv_distortion_buffer,
+ width, height,
+ ALIGN(width, 64),
+ "VP9 4x MEMV distorion");
if (!allocate_flag)
goto failed_allocation;
height = vp9_state->downscaled_height_16x_in_mb * 16;
i965_free_gpe_resource(&vme_context->s16x_memv_data_buffer);
allocate_flag = i965_gpe_allocate_2d_resource(i965->intel.bufmgr,
- &vme_context->s16x_memv_data_buffer,
- width, height,
- width,
- "VP9 16x MEMV data");
+ &vme_context->s16x_memv_data_buffer,
+ width, height,
+ width,
+ "VP9 16x MEMV data");
if (!allocate_flag)
goto failed_allocation;
height = vp9_state->frame_height_in_mb * 8;
i965_free_gpe_resource(&vme_context->res_output_16x16_inter_modes);
allocate_flag = i965_gpe_allocate_2d_resource(i965->intel.bufmgr,
- &vme_context->res_output_16x16_inter_modes,
- width, height,
- ALIGN(width, 64),
- "VP9 output inter_mode");
+ &vme_context->res_output_16x16_inter_modes,
+ width, height,
+ ALIGN(width, 64),
+ "VP9 output inter_mode");
if (!allocate_flag)
goto failed_allocation;
for (i = 0; i < 2; i++) {
i965_free_gpe_resource(&vme_context->res_mode_decision[i]);
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_mode_decision[i],
- res_size,
- "VP9 mode decision");
+ &vme_context->res_mode_decision[i],
+ res_size,
+ "VP9 mode decision");
if (!allocate_flag)
goto failed_allocation;
for (i = 0; i < 2; i++) {
i965_free_gpe_resource(&vme_context->res_mv_temporal_buffer[i]);
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_mv_temporal_buffer[i],
- res_size,
- "VP9 temporal mv");
+ &vme_context->res_mv_temporal_buffer[i],
+ res_size,
+ "VP9 temporal mv");
if (!allocate_flag)
goto failed_allocation;
}
res_size = vp9_state->mb_data_offset + frame_sb_num * 64 * 64 + 1000;
i965_free_gpe_resource(&vme_context->res_mb_code_surface);
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_mb_code_surface,
- ALIGN(res_size, 4096),
- "VP9 mb_code surface");
+ &vme_context->res_mb_code_surface,
+ ALIGN(res_size, 4096),
+ "VP9 mb_code surface");
if (!allocate_flag)
goto failed_allocation;
res_size = 128;
i965_free_gpe_resource(&vme_context->res_pak_uncompressed_input_buffer);
allocate_flag = i965_allocate_gpe_resource(i965->intel.bufmgr,
- &vme_context->res_pak_uncompressed_input_buffer,
- ALIGN(res_size, 4096),
- "VP9 pak_uncompressed_input");
+ &vme_context->res_pak_uncompressed_input_buffer,
+ ALIGN(res_size, 4096),
+ "VP9 pak_uncompressed_input");
if (!allocate_flag)
goto failed_allocation;
static void
gen9_init_media_object_walker_parameter(struct intel_encoder_context *encoder_context,
- struct vp9_encoder_kernel_walker_parameter *kernel_walker_param,
+ struct gpe_encoder_kernel_walker_parameter *kernel_walker_param,
struct gpe_media_object_walker_parameter *walker_param)
{
memset(walker_param, 0, sizeof(*walker_param));
}
static void
-gen9_add_2d_gpe_surface(VADriverContextP ctx,
- struct i965_gpe_context *gpe_context,
- struct object_surface *obj_surface,
- int is_uv_surface,
- int is_media_block_rw,
- unsigned int format,
- int index)
-{
- struct i965_gpe_resource gpe_resource;
- struct i965_gpe_surface gpe_surface;
-
- memset(&gpe_surface, 0, sizeof(gpe_surface));
-
- i965_object_surface_to_2d_gpe_resource(&gpe_resource, obj_surface);
- gpe_surface.gpe_resource = &gpe_resource;
- gpe_surface.is_2d_surface = 1;
- gpe_surface.is_uv_surface = !!is_uv_surface;
- gpe_surface.is_media_block_rw = !!is_media_block_rw;
-
- gpe_surface.cacheability_control = DEFAULT_MOCS;
- gpe_surface.format = format;
-
- gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
- i965_free_gpe_resource(&gpe_resource);
-}
-
-static void
-gen9_add_adv_gpe_surface(VADriverContextP ctx,
- struct i965_gpe_context *gpe_context,
- struct object_surface *obj_surface,
- int index)
-{
- struct i965_gpe_resource gpe_resource;
- struct i965_gpe_surface gpe_surface;
-
- memset(&gpe_surface, 0, sizeof(gpe_surface));
-
- i965_object_surface_to_2d_gpe_resource(&gpe_resource, obj_surface);
- gpe_surface.gpe_resource = &gpe_resource;
- gpe_surface.is_adv_surface = 1;
- gpe_surface.cacheability_control = DEFAULT_MOCS;
- gpe_surface.v_direction = 2;
-
- gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
- i965_free_gpe_resource(&gpe_resource);
-}
-
-static void
-gen9_add_buffer_gpe_surface(VADriverContextP ctx,
- struct i965_gpe_context *gpe_context,
- struct i965_gpe_resource *gpe_buffer,
- int is_raw_buffer,
- unsigned int size,
- unsigned int offset,
- int index)
-{
- struct i965_gpe_surface gpe_surface;
-
- memset(&gpe_surface, 0, sizeof(gpe_surface));
-
- gpe_surface.gpe_resource = gpe_buffer;
- gpe_surface.is_buffer = 1;
- gpe_surface.is_raw_buffer = !!is_raw_buffer;
- gpe_surface.cacheability_control = DEFAULT_MOCS;
- gpe_surface.size = size;
- gpe_surface.offset = offset;
-
- gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
-}
-
-static void
-gen9_add_buffer_2d_gpe_surface(VADriverContextP ctx,
- struct i965_gpe_context *gpe_context,
- struct i965_gpe_resource *gpe_buffer,
- int is_media_block_rw,
- unsigned int format,
- int index)
-{
- struct i965_gpe_surface gpe_surface;
-
- memset(&gpe_surface, 0, sizeof(gpe_surface));
-
- gpe_surface.gpe_resource = gpe_buffer;
- gpe_surface.is_2d_surface = 1;
- gpe_surface.is_media_block_rw = !!is_media_block_rw;
- gpe_surface.cacheability_control = DEFAULT_MOCS;
- gpe_surface.format = format;
-
- gen9_gpe_context_add_surface(gpe_context, &gpe_surface, index);
-}
-
-static void
-gen9_add_dri_buffer_gpe_surface(VADriverContextP ctx,
- struct i965_gpe_context *gpe_context,
- dri_bo *bo,
- int is_raw_buffer,
- unsigned int size,
- unsigned int offset,
- int index)
-{
- struct i965_gpe_resource gpe_resource;
-
- i965_dri_object_to_buffer_gpe_resource(&gpe_resource, bo);
- gen9_add_buffer_gpe_surface(ctx,
- gpe_context,
- &gpe_resource,
- is_raw_buffer,
- size,
- offset,
- index);
-
- i965_free_gpe_resource(&gpe_resource);
-}
-
-/*
-static void
-gen9_add_dri_buffer_2d_gpe_surface(VADriverContextP ctx,
- struct i965_gpe_context *gpe_context,
- dri_bo *bo,
- unsigned int width,
- unsigned int height,
- unsigned int pitch,
- int is_media_block_rw,
- unsigned int format,
- int index)
-{
- struct i965_gpe_resource gpe_resource;
-
- i965_gpe_dri_object_to_2d_gpe_resource(&gpe_resource, bo, width, height, pitch);
- gen9_add_buffer_2d_gpe_surface(ctx,
- gpe_context,
- &gpe_resource,
- is_media_block_rw,
- format,
- index);
-
- i965_free_gpe_resource(&gpe_resource);
-}
-*/
-
-static void
gen9_run_kernel_media_object(VADriverContextP ctx,
struct intel_encoder_context *encoder_context,
struct i965_gpe_context *gpe_context,
mi_store_data_imm.bo = status_buffer->bo;
mi_store_data_imm.offset = status_buffer->media_index_offset;
mi_store_data_imm.dw0 = media_function;
- gen9_gpe_mi_store_data_imm(ctx, batch, &mi_store_data_imm);
+ gen8_gpe_mi_store_data_imm(ctx, batch, &mi_store_data_imm);
intel_batchbuffer_emit_mi_flush(batch);
gen9_gpe_pipeline_setup(ctx, gpe_context, batch);
mi_store_data_imm.bo = status_buffer->bo;
mi_store_data_imm.offset = status_buffer->media_index_offset;
mi_store_data_imm.dw0 = media_function;
- gen9_gpe_mi_store_data_imm(ctx, batch, &mi_store_data_imm);
+ gen8_gpe_mi_store_data_imm(ctx, batch, &mi_store_data_imm);
gen9_gpe_pipeline_setup(ctx, gpe_context, batch);
- gen9_gpe_media_object_walker(ctx, gpe_context, batch, param);
+ gen8_gpe_media_object_walker(ctx, gpe_context, batch, param);
gen8_gpe_media_state_flush(ctx, gpe_context, batch);
gen9_gpe_pipeline_end(ctx, gpe_context, batch);
seq_param = param->pseq_param;
segment_param = param->psegment_param;
- cmd = gen8p_gpe_context_map_curbe(gpe_context);
+ cmd = i965_gpe_context_map_curbe(gpe_context);
if (!cmd)
return;
memset(cmd, 0, sizeof(vp9_brc_curbe_data));
- if (!vp9_state->dys_enabled)
- {
+ if (!vp9_state->dys_enabled) {
cmd->dw0.frame_width = pic_param->frame_width_src;
cmd->dw0.frame_height = pic_param->frame_height_src;
- }
- else
- {
+ } else {
cmd->dw0.frame_width = pic_param->frame_width_dst;
cmd->dw0.frame_height = pic_param->frame_height_dst;
}
cmd->dw1.ref_frame_flags = vp9_state->ref_frame_flag;
cmd->dw1.num_tlevels = 1;
- switch(param->media_state_type)
- {
- case VP9_MEDIA_STATE_BRC_INIT_RESET:
- {
- cmd->dw3.max_level_ratiot0 = 0;
- cmd->dw3.max_level_ratiot1 = 0;
- cmd->dw3.max_level_ratiot2 = 0;
- cmd->dw3.max_level_ratiot3 = 0;
-
- cmd->dw4.profile_level_max_frame = seq_param->max_frame_width *
- seq_param->max_frame_height;
- cmd->dw5.init_buf_fullness = vp9_state->init_vbv_buffer_fullness_in_bit;
- cmd->dw6.buf_size = vp9_state->vbv_buffer_size_in_bit;
- cmd->dw7.target_bit_rate = (vp9_state->target_bit_rate + VP9_BRC_KBPS - 1) / VP9_BRC_KBPS *
- VP9_BRC_KBPS;
- cmd->dw8.max_bit_rate = (vp9_state->max_bit_rate + VP9_BRC_KBPS - 1) / VP9_BRC_KBPS *
- VP9_BRC_KBPS;
- cmd->dw9.min_bit_rate = (vp9_state->min_bit_rate + VP9_BRC_KBPS - 1) / VP9_BRC_KBPS *
- VP9_BRC_KBPS;
- cmd->dw10.frame_ratem = vp9_state->frame_rate;
- cmd->dw11.frame_rated = 1;
-
- cmd->dw14.avbr_accuracy = 30;
- cmd->dw14.avbr_convergence = 150;
-
- if (encoder_context->rate_control_mode == VA_RC_CBR)
- {
- cmd->dw12.brc_flag = BRC_KERNEL_CBR;
- cmd->dw8.max_bit_rate = cmd->dw7.target_bit_rate;
- cmd->dw9.min_bit_rate = 0;
- }
- else if (encoder_context->rate_control_mode == VA_RC_VBR)
- {
- cmd->dw12.brc_flag = BRC_KERNEL_VBR;
- }
- else
- {
- cmd->dw12.brc_flag = BRC_KERNEL_CQL;
- cmd->dw16.cq_level = 30;
- }
- cmd->dw12.gopp = seq_param->intra_period - 1;
-
- cmd->dw13.init_frame_width = pic_param->frame_width_src;
- cmd->dw13.init_frame_height = pic_param->frame_height_src;
-
- cmd->dw15.min_qp = 0;
- cmd->dw15.max_qp = 255;
-
- cmd->dw16.cq_level = 30;
-
- cmd->dw17.enable_dynamic_scaling = vp9_state->dys_in_use;
- cmd->dw17.brc_overshoot_cbr_pct = 150;
-
- dInputBitsPerFrame = (double)(cmd->dw8.max_bit_rate) / (vp9_state->frame_rate);
- dbps_ratio = dInputBitsPerFrame / ((double)(vp9_state->vbv_buffer_size_in_bit) / 30);
- if (dbps_ratio < 0.1)
- dbps_ratio = 0.1;
- if (dbps_ratio > 3.5)
- dbps_ratio = 3.5;
-
- *param->pbrc_init_reset_buf_size_in_bits = cmd->dw6.buf_size;
- *param->pbrc_init_reset_input_bits_per_frame = dInputBitsPerFrame;
-
- cmd->dw18.pframe_deviation_threshold0 = (uint32_t)(-50 * pow(0.90, dbps_ratio));
- cmd->dw18.pframe_deviation_threshold1 = (uint32_t)(-50 * pow(0.66, dbps_ratio));
- cmd->dw18.pframe_deviation_threshold2 = (uint32_t)(-50 * pow(0.46, dbps_ratio));
- cmd->dw18.pframe_deviation_threshold3 = (uint32_t)(-50 * pow(0.3, dbps_ratio));
- cmd->dw19.pframe_deviation_threshold4 = (uint32_t)(50 * pow(0.3, dbps_ratio));
- cmd->dw19.pframe_deviation_threshold5 = (uint32_t)(50 * pow(0.46, dbps_ratio));
- cmd->dw19.pframe_deviation_threshold6 = (uint32_t)(50 * pow(0.7, dbps_ratio));
- cmd->dw19.pframe_deviation_threshold7 = (uint32_t)(50 * pow(0.9, dbps_ratio));
-
- cmd->dw20.vbr_deviation_threshold0 = (uint32_t)(-50 * pow(0.9, dbps_ratio));
- cmd->dw20.vbr_deviation_threshold1 = (uint32_t)(-50 * pow(0.7, dbps_ratio));
- cmd->dw20.vbr_deviation_threshold2 = (uint32_t)(-50 * pow(0.5, dbps_ratio));
- cmd->dw20.vbr_deviation_threshold3 = (uint32_t)(-50 * pow(0.3, dbps_ratio));
- cmd->dw21.vbr_deviation_threshold4 = (uint32_t)(100 * pow(0.4, dbps_ratio));
- cmd->dw21.vbr_deviation_threshold5 = (uint32_t)(100 * pow(0.5, dbps_ratio));
- cmd->dw21.vbr_deviation_threshold6 = (uint32_t)(100 * pow(0.75, dbps_ratio));
- cmd->dw21.vbr_deviation_threshold7 = (uint32_t)(100 * pow(0.9, dbps_ratio));
-
- cmd->dw22.kframe_deviation_threshold0 = (uint32_t)(-50 * pow(0.8, dbps_ratio));
- cmd->dw22.kframe_deviation_threshold1 = (uint32_t)(-50 * pow(0.6, dbps_ratio));
- cmd->dw22.kframe_deviation_threshold2 = (uint32_t)(-50 * pow(0.34, dbps_ratio));
- cmd->dw22.kframe_deviation_threshold3 = (uint32_t)(-50 * pow(0.2, dbps_ratio));
- cmd->dw23.kframe_deviation_threshold4 = (uint32_t)(50 * pow(0.2, dbps_ratio));
- cmd->dw23.kframe_deviation_threshold5 = (uint32_t)(50 * pow(0.4, dbps_ratio));
- cmd->dw23.kframe_deviation_threshold6 = (uint32_t)(50 * pow(0.66, dbps_ratio));
- cmd->dw23.kframe_deviation_threshold7 = (uint32_t)(50 * pow(0.9, dbps_ratio));
-
- break;
+ switch (param->media_state_type) {
+ case VP9_MEDIA_STATE_BRC_INIT_RESET: {
+ cmd->dw3.max_level_ratiot0 = 0;
+ cmd->dw3.max_level_ratiot1 = 0;
+ cmd->dw3.max_level_ratiot2 = 0;
+ cmd->dw3.max_level_ratiot3 = 0;
+
+ cmd->dw4.profile_level_max_frame = seq_param->max_frame_width *
+ seq_param->max_frame_height;
+ cmd->dw5.init_buf_fullness = vp9_state->init_vbv_buffer_fullness_in_bit;
+ cmd->dw6.buf_size = vp9_state->vbv_buffer_size_in_bit;
+ cmd->dw7.target_bit_rate = (vp9_state->target_bit_rate + VP9_BRC_KBPS - 1) / VP9_BRC_KBPS *
+ VP9_BRC_KBPS;
+ cmd->dw8.max_bit_rate = (vp9_state->max_bit_rate + VP9_BRC_KBPS - 1) / VP9_BRC_KBPS *
+ VP9_BRC_KBPS;
+ cmd->dw9.min_bit_rate = (vp9_state->min_bit_rate + VP9_BRC_KBPS - 1) / VP9_BRC_KBPS *
+ VP9_BRC_KBPS;
+ cmd->dw10.frame_ratem = vp9_state->framerate.num;
+ cmd->dw11.frame_rated = vp9_state->framerate.den;
+
+ cmd->dw14.avbr_accuracy = 30;
+ cmd->dw14.avbr_convergence = 150;
+
+ if (encoder_context->rate_control_mode == VA_RC_CBR) {
+ cmd->dw12.brc_flag = BRC_KERNEL_CBR;
+ cmd->dw8.max_bit_rate = cmd->dw7.target_bit_rate;
+ cmd->dw9.min_bit_rate = 0;
+ } else if (encoder_context->rate_control_mode == VA_RC_VBR) {
+ cmd->dw12.brc_flag = BRC_KERNEL_VBR;
+ } else {
+ cmd->dw12.brc_flag = BRC_KERNEL_CQL;
+ cmd->dw16.cq_level = 30;
}
- case VP9_MEDIA_STATE_BRC_UPDATE:
- {
- cmd->dw15.min_qp = 0;
- cmd->dw15.max_qp = 255;
+ cmd->dw12.gopp = seq_param->intra_period - 1;
+
+ cmd->dw13.init_frame_width = pic_param->frame_width_src;
+ cmd->dw13.init_frame_height = pic_param->frame_height_src;
+
+ cmd->dw15.min_qp = 1;
+ cmd->dw15.max_qp = 255;
+
+ cmd->dw16.cq_level = 30;
+
+ cmd->dw17.enable_dynamic_scaling = vp9_state->dys_in_use;
+ cmd->dw17.brc_overshoot_cbr_pct = 150;
+
+ dInputBitsPerFrame = (double)cmd->dw8.max_bit_rate * (double)vp9_state->framerate.den / (double)vp9_state->framerate.num;
+ dbps_ratio = dInputBitsPerFrame / ((double)vp9_state->vbv_buffer_size_in_bit / 30.0);
+ if (dbps_ratio < 0.1)
+ dbps_ratio = 0.1;
+ if (dbps_ratio > 3.5)
+ dbps_ratio = 3.5;
+
+ *param->pbrc_init_reset_buf_size_in_bits = cmd->dw6.buf_size;
+ *param->pbrc_init_reset_input_bits_per_frame = dInputBitsPerFrame;
+ *param->pbrc_init_current_target_buf_full_in_bits = cmd->dw6.buf_size >> 1;
+
+ cmd->dw18.pframe_deviation_threshold0 = (uint32_t)(-50 * pow(0.90, dbps_ratio));
+ cmd->dw18.pframe_deviation_threshold1 = (uint32_t)(-50 * pow(0.66, dbps_ratio));
+ cmd->dw18.pframe_deviation_threshold2 = (uint32_t)(-50 * pow(0.46, dbps_ratio));
+ cmd->dw18.pframe_deviation_threshold3 = (uint32_t)(-50 * pow(0.3, dbps_ratio));
+ cmd->dw19.pframe_deviation_threshold4 = (uint32_t)(50 * pow(0.3, dbps_ratio));
+ cmd->dw19.pframe_deviation_threshold5 = (uint32_t)(50 * pow(0.46, dbps_ratio));
+ cmd->dw19.pframe_deviation_threshold6 = (uint32_t)(50 * pow(0.7, dbps_ratio));
+ cmd->dw19.pframe_deviation_threshold7 = (uint32_t)(50 * pow(0.9, dbps_ratio));
+
+ cmd->dw20.vbr_deviation_threshold0 = (uint32_t)(-50 * pow(0.9, dbps_ratio));
+ cmd->dw20.vbr_deviation_threshold1 = (uint32_t)(-50 * pow(0.7, dbps_ratio));
+ cmd->dw20.vbr_deviation_threshold2 = (uint32_t)(-50 * pow(0.5, dbps_ratio));
+ cmd->dw20.vbr_deviation_threshold3 = (uint32_t)(-50 * pow(0.3, dbps_ratio));
+ cmd->dw21.vbr_deviation_threshold4 = (uint32_t)(100 * pow(0.4, dbps_ratio));
+ cmd->dw21.vbr_deviation_threshold5 = (uint32_t)(100 * pow(0.5, dbps_ratio));
+ cmd->dw21.vbr_deviation_threshold6 = (uint32_t)(100 * pow(0.75, dbps_ratio));
+ cmd->dw21.vbr_deviation_threshold7 = (uint32_t)(100 * pow(0.9, dbps_ratio));
+
+ cmd->dw22.kframe_deviation_threshold0 = (uint32_t)(-50 * pow(0.8, dbps_ratio));
+ cmd->dw22.kframe_deviation_threshold1 = (uint32_t)(-50 * pow(0.6, dbps_ratio));
+ cmd->dw22.kframe_deviation_threshold2 = (uint32_t)(-50 * pow(0.34, dbps_ratio));
+ cmd->dw22.kframe_deviation_threshold3 = (uint32_t)(-50 * pow(0.2, dbps_ratio));
+ cmd->dw23.kframe_deviation_threshold4 = (uint32_t)(50 * pow(0.2, dbps_ratio));
+ cmd->dw23.kframe_deviation_threshold5 = (uint32_t)(50 * pow(0.4, dbps_ratio));
+ cmd->dw23.kframe_deviation_threshold6 = (uint32_t)(50 * pow(0.66, dbps_ratio));
+ cmd->dw23.kframe_deviation_threshold7 = (uint32_t)(50 * pow(0.9, dbps_ratio));
- cmd->dw25.frame_number = param->frame_number;
+ break;
+ }
+ case VP9_MEDIA_STATE_BRC_UPDATE: {
+ cmd->dw15.min_qp = 1;
+ cmd->dw15.max_qp = 255;
- // Used in dynamic scaling. set to zero for now
- cmd->dw27.hrd_buffer_fullness_upper_limit = 0;
- cmd->dw28.hrd_buffer_fullness_lower_limit = 0;
+ cmd->dw25.frame_number = param->frame_number;
- if (pic_param->pic_flags.bits.segmentation_enabled) {
- cmd->dw32.seg_delta_qp0 = segment_param->seg_data[0].segment_qindex_delta;
- cmd->dw32.seg_delta_qp1 = segment_param->seg_data[1].segment_qindex_delta;
- cmd->dw32.seg_delta_qp2 = segment_param->seg_data[2].segment_qindex_delta;
- cmd->dw32.seg_delta_qp3 = segment_param->seg_data[3].segment_qindex_delta;
+ // Used in dynamic scaling. set to zero for now
+ cmd->dw27.hrd_buffer_fullness_upper_limit = 0;
+ cmd->dw28.hrd_buffer_fullness_lower_limit = 0;
- cmd->dw33.seg_delta_qp4 = segment_param->seg_data[4].segment_qindex_delta;
- cmd->dw33.seg_delta_qp5 = segment_param->seg_data[5].segment_qindex_delta;
- cmd->dw33.seg_delta_qp6 = segment_param->seg_data[6].segment_qindex_delta;
- cmd->dw33.seg_delta_qp7 = segment_param->seg_data[7].segment_qindex_delta;
- }
+ if (pic_param->pic_flags.bits.segmentation_enabled) {
+ cmd->dw32.seg_delta_qp0 = segment_param->seg_data[0].segment_qindex_delta;
+ cmd->dw32.seg_delta_qp1 = segment_param->seg_data[1].segment_qindex_delta;
+ cmd->dw32.seg_delta_qp2 = segment_param->seg_data[2].segment_qindex_delta;
+ cmd->dw32.seg_delta_qp3 = segment_param->seg_data[3].segment_qindex_delta;
- //cmd->dw34.temporal_id = pPicParams->temporal_idi;
- cmd->dw34.temporal_id = 0;
- cmd->dw34.multi_ref_qp_check = param->multi_ref_qp_check;
-
- cmd->dw35.max_num_pak_passes = param->brc_num_pak_passes;
- cmd->dw35.sync_async = 0;
- cmd->dw35.mbrc = param->mbbrc_enabled;
- if (*param->pbrc_init_current_target_buf_full_in_bits >
- ((double)(*param->pbrc_init_reset_buf_size_in_bits))) {
- *param->pbrc_init_current_target_buf_full_in_bits -=
- (double)(*param->pbrc_init_reset_buf_size_in_bits);
- cmd->dw35.overflow = 1;
- }
- else
- cmd->dw35.overflow = 0;
+ cmd->dw33.seg_delta_qp4 = segment_param->seg_data[4].segment_qindex_delta;
+ cmd->dw33.seg_delta_qp5 = segment_param->seg_data[5].segment_qindex_delta;
+ cmd->dw33.seg_delta_qp6 = segment_param->seg_data[6].segment_qindex_delta;
+ cmd->dw33.seg_delta_qp7 = segment_param->seg_data[7].segment_qindex_delta;
+ }
- cmd->dw24.target_size = (uint32_t)(*param->pbrc_init_current_target_buf_full_in_bits);
+ //cmd->dw34.temporal_id = pPicParams->temporal_idi;
+ cmd->dw34.temporal_id = 0;
+ cmd->dw34.multi_ref_qp_check = param->multi_ref_qp_check;
+
+ cmd->dw35.max_num_pak_passes = param->brc_num_pak_passes;
+ cmd->dw35.sync_async = 0;
+ cmd->dw35.mbrc = param->mbbrc_enabled;
+ if (*param->pbrc_init_current_target_buf_full_in_bits >
+ ((double)(*param->pbrc_init_reset_buf_size_in_bits))) {
+ *param->pbrc_init_current_target_buf_full_in_bits -=
+ (double)(*param->pbrc_init_reset_buf_size_in_bits);
+ cmd->dw35.overflow = 1;
+ } else
+ cmd->dw35.overflow = 0;
- cmd->dw36.segmentation = pic_param->pic_flags.bits.segmentation_enabled;
+ cmd->dw24.target_size = (uint32_t)(*param->pbrc_init_current_target_buf_full_in_bits);
- *param->pbrc_init_current_target_buf_full_in_bits += *param->pbrc_init_reset_input_bits_per_frame;
+ cmd->dw36.segmentation = pic_param->pic_flags.bits.segmentation_enabled;
- cmd->dw38.qdelta_ydc = pic_param->luma_dc_qindex_delta;
- cmd->dw38.qdelta_uvdc = pic_param->chroma_dc_qindex_delta;
- cmd->dw38.qdelta_uvac = pic_param->chroma_ac_qindex_delta;
+ *param->pbrc_init_current_target_buf_full_in_bits += *param->pbrc_init_reset_input_bits_per_frame;
- break;
- }
- case VP9_MEDIA_STATE_ENC_I_FRAME_DIST:
- cmd->dw2.intra_mode_disable = 0;
- break;
- default:
- break;
+ cmd->dw38.qdelta_ydc = pic_param->luma_dc_qindex_delta;
+ cmd->dw38.qdelta_uvdc = pic_param->chroma_dc_qindex_delta;
+ cmd->dw38.qdelta_uvac = pic_param->chroma_ac_qindex_delta;
+
+ break;
+ }
+ case VP9_MEDIA_STATE_ENC_I_FRAME_DIST:
+ cmd->dw2.intra_mode_disable = 0;
+ break;
+ default:
+ break;
}
cmd->dw48.brc_y4x_input_bti = VP9_BTI_BRC_SRCY4X_G9;
cmd->dw60.brc_bitstream_size_data_bti = VP9_BTI_BRC_BITSTREAM_SIZE_G9;
cmd->dw61.brc_hfw_data_output_bti = VP9_BTI_BRC_HFW_DATA_G9;
- gen8p_gpe_context_unmap_curbe(gpe_context);
+ i965_gpe_context_unmap_curbe(gpe_context);
return;
}
{
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
- gen9_add_buffer_gpe_surface(ctx,
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
&vme_context->res_brc_history_buffer,
0,
0,
VP9_BTI_BRC_HISTORY_G9);
- gen9_add_buffer_2d_gpe_surface(ctx,
+ i965_add_buffer_2d_gpe_surface(ctx,
gpe_context,
&vme_context->s4x_memv_distortion_buffer,
1,
brc_initreset_curbe.frame_width = vp9_state->frame_width;
brc_initreset_curbe.frame_height = vp9_state->frame_height;
brc_initreset_curbe.pbrc_init_current_target_buf_full_in_bits =
- &vp9_state->brc_init_current_target_buf_full_in_bits;
+ &vp9_state->brc_init_current_target_buf_full_in_bits;
brc_initreset_curbe.pbrc_init_reset_buf_size_in_bits =
- &vp9_state->brc_init_reset_buf_size_in_bits;
+ &vp9_state->brc_init_reset_buf_size_in_bits;
brc_initreset_curbe.pbrc_init_reset_input_bits_per_frame =
- &vp9_state->brc_init_reset_input_bits_per_frame;
+ &vp9_state->brc_init_reset_input_bits_per_frame;
brc_initreset_curbe.picture_coding_type = vp9_state->picture_coding_type;
brc_initreset_curbe.initbrc = !vp9_state->brc_inited;
brc_initreset_curbe.mbbrc_enabled = 0;
brc_initreset_curbe.ref_frame_flag = vp9_state->ref_frame_flag;
- brc_initreset_curbe.frame_rate = vp9_state->frame_rate;
vme_context->pfn_set_curbe_brc(ctx, encode_state,
gpe_context,
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
obj_surface = vp9_priv_surface->scaled_4x_surface_obj;
- gen9_add_2d_gpe_surface(ctx, gpe_context,
+ i965_add_2d_gpe_surface(ctx, gpe_context,
obj_surface,
0, 1,
I965_SURFACEFORMAT_R8_UNORM,
VP9_BTI_BRC_SRCY4X_G9
- );
+ );
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
obj_surface,
VP9_BTI_BRC_VME_COARSE_INTRA_G9);
- gen9_add_buffer_2d_gpe_surface(ctx,
+ i965_add_buffer_2d_gpe_surface(ctx,
gpe_context,
&vme_context->s4x_memv_distortion_buffer,
1,
I965_SURFACEFORMAT_R8_UNORM,
VP9_BTI_BRC_DISTORTION_G9);
- return;
+ return;
}
/* The function related with BRC */
VAEncPictureParameterBufferVP9 *pic_param;
struct gen9_vp9_state *vp9_state;
struct gpe_media_object_walker_parameter media_object_walker_param;
- struct vp9_encoder_kernel_walker_parameter kernel_walker_param;
+ struct gpe_encoder_kernel_walker_parameter kernel_walker_param;
vp9_state = (struct gen9_vp9_state *) encoder_context->enc_priv_state;
brc_intra_dist_curbe.frame_width = vp9_state->frame_width;
brc_intra_dist_curbe.frame_height = vp9_state->frame_height;
brc_intra_dist_curbe.pbrc_init_current_target_buf_full_in_bits =
- &vp9_state->brc_init_current_target_buf_full_in_bits;
+ &vp9_state->brc_init_current_target_buf_full_in_bits;
brc_intra_dist_curbe.pbrc_init_reset_buf_size_in_bits =
- &vp9_state->brc_init_reset_buf_size_in_bits;
+ &vp9_state->brc_init_reset_buf_size_in_bits;
brc_intra_dist_curbe.pbrc_init_reset_input_bits_per_frame =
- &vp9_state->brc_init_reset_input_bits_per_frame;
+ &vp9_state->brc_init_reset_input_bits_per_frame;
brc_intra_dist_curbe.picture_coding_type = vp9_state->picture_coding_type;
brc_intra_dist_curbe.initbrc = !vp9_state->brc_inited;
brc_intra_dist_curbe.mbbrc_enabled = 0;
brc_intra_dist_curbe.ref_frame_flag = vp9_state->ref_frame_flag;
- brc_intra_dist_curbe.frame_rate = vp9_state->frame_rate;
vme_context->pfn_set_curbe_brc(ctx, encode_state,
gpe_context,
gen8_gpe_setup_interface_data(ctx, gpe_context);
memset(&kernel_walker_param, 0, sizeof(kernel_walker_param));
- kernel_walker_param.resolution_x = vme_context->downscaled_width_in_mb4x;
- kernel_walker_param.resolution_y = vme_context->downscaled_height_in_mb4x;
+ kernel_walker_param.resolution_x = vp9_state->downscaled_width_4x_in_mb;
+ kernel_walker_param.resolution_y = vp9_state->downscaled_height_4x_in_mb;
kernel_walker_param.no_dependency = 1;
gen9_init_media_object_walker_parameter(encoder_context, &kernel_walker_param, &media_object_walker_param);
static void
intel_vp9enc_construct_picstate_batchbuf(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context,
- struct i965_gpe_resource *gpe_resource)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ struct i965_gpe_resource *gpe_resource)
{
struct gen9_vp9_state *vp9_state;
VAEncPictureParameterBufferVP9 *pic_param;
if (i == 0)
non_first_pass = 0;
- cmd_ptr =(unsigned int *)(pdata + i * VP9_PIC_STATE_BUFFER_SIZE);
+ cmd_ptr = (unsigned int *)(pdata + i * VP9_PIC_STATE_BUFFER_SIZE);
*cmd_ptr++ = (HCP_VP9_PIC_STATE | (33 - 2));
*cmd_ptr++ = (frame_height_minus1 << 16 |
frame_width_minus1);
/* dw2 */
- *cmd_ptr++ = ( 0 << 31 | /* disable segment_in */
- 0 << 30 | /* disable segment_out */
- is_lossless << 29 | /* loseless */
- (pic_param->pic_flags.bits.segmentation_enabled && pic_param->pic_flags.bits.segmentation_temporal_update) << 28 | /* temporal update */
- (pic_param->pic_flags.bits.segmentation_enabled && pic_param->pic_flags.bits.segmentation_update_map) << 27 | /* temporal update */
- (pic_param->pic_flags.bits.segmentation_enabled << 26) |
- (pic_param->sharpness_level << 23) |
- (pic_param->filter_level << 17) |
- (pic_param->pic_flags.bits.frame_parallel_decoding_mode << 16) |
- (pic_param->pic_flags.bits.error_resilient_mode << 15) |
- (pic_param->pic_flags.bits.refresh_frame_context << 14) |
- (last_frame_type << 13) |
- (vp9_state->tx_mode == TX_MODE_SELECT) << 12 |
- (pic_param->pic_flags.bits.comp_prediction_mode == REFERENCE_MODE_SELECT) << 11 |
- (use_prev_frame_mvs) << 10 |
- ref_flags |
- (pic_param->pic_flags.bits.mcomp_filter_type << 4) |
- (pic_param->pic_flags.bits.allow_high_precision_mv << 3) |
- (is_intra_only << 2) |
- (adapt_flag << 1) |
- (pic_param->pic_flags.bits.frame_type) << 0);
-
- *cmd_ptr++ =((0 << 28) | /* VP9Profile0 */
- (0 << 24) | /* 8-bit depth */
- (0 << 22) | /* only 420 format */
- (0 << 0) | /* sse statistics */
- (pic_param->log2_tile_rows << 8) |
- (pic_param->log2_tile_columns << 0));
+ *cmd_ptr++ = (0 << 31 | /* disable segment_in */
+ 0 << 30 | /* disable segment_out */
+ is_lossless << 29 | /* loseless */
+ (pic_param->pic_flags.bits.segmentation_enabled && pic_param->pic_flags.bits.segmentation_temporal_update) << 28 | /* temporal update */
+ (pic_param->pic_flags.bits.segmentation_enabled && pic_param->pic_flags.bits.segmentation_update_map) << 27 | /* temporal update */
+ (pic_param->pic_flags.bits.segmentation_enabled << 26) |
+ (pic_param->sharpness_level << 23) |
+ (pic_param->filter_level << 17) |
+ (pic_param->pic_flags.bits.frame_parallel_decoding_mode << 16) |
+ (pic_param->pic_flags.bits.error_resilient_mode << 15) |
+ (pic_param->pic_flags.bits.refresh_frame_context << 14) |
+ (last_frame_type << 13) |
+ (vp9_state->tx_mode == TX_MODE_SELECT) << 12 |
+ (pic_param->pic_flags.bits.comp_prediction_mode == REFERENCE_MODE_SELECT) << 11 |
+ (use_prev_frame_mvs) << 10 |
+ ref_flags |
+ (pic_param->pic_flags.bits.mcomp_filter_type << 4) |
+ (pic_param->pic_flags.bits.allow_high_precision_mv << 3) |
+ (is_intra_only << 2) |
+ (adapt_flag << 1) |
+ (pic_param->pic_flags.bits.frame_type) << 0);
+
+ *cmd_ptr++ = ((0 << 28) | /* VP9Profile0 */
+ (0 << 24) | /* 8-bit depth */
+ (0 << 22) | /* only 420 format */
+ (0 << 0) | /* sse statistics */
+ (pic_param->log2_tile_rows << 8) |
+ (pic_param->log2_tile_columns << 0));
/* dw4..6 */
if (pic_param->pic_flags.bits.frame_type &&
*cmd_ptr++ = 0;
}
/* dw7..9 */
- for(j = 0; j < 3; j++) {
+ for (j = 0; j < 3; j++) {
obj_surface = encode_state->reference_objects[j];
vp9_surface = NULL;
/* dw17 */
*cmd_ptr++ = vp9_state->frame_header.bit_offset_ref_lf_delta |
- (vp9_state->frame_header.bit_offset_mode_lf_delta << 16);
+ (vp9_state->frame_header.bit_offset_mode_lf_delta << 16);
*cmd_ptr++ = vp9_state->frame_header.bit_offset_qindex |
- (vp9_state->frame_header.bit_offset_lf_level << 16);
+ (vp9_state->frame_header.bit_offset_lf_level << 16);
/* dw19 */
*cmd_ptr++ = (1 << 26 | (1 << 25) |
static void
gen9_brc_update_add_surfaces_vp9(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context,
- struct i965_gpe_context *brc_gpe_context,
- struct i965_gpe_context *mbenc_gpe_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ struct i965_gpe_context *brc_gpe_context,
+ struct i965_gpe_context *mbenc_gpe_context)
{
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
/* 0. BRC history buffer */
- gen9_add_buffer_gpe_surface(ctx,
+ i965_add_buffer_gpe_surface(ctx,
brc_gpe_context,
&vme_context->res_brc_history_buffer,
0,
VP9_BTI_BRC_HISTORY_G9);
/* 1. Constant data buffer */
- gen9_add_buffer_gpe_surface(ctx,
+ i965_add_buffer_gpe_surface(ctx,
brc_gpe_context,
&vme_context->res_brc_const_data_buffer,
0,
VP9_BTI_BRC_CONSTANT_DATA_G9);
/* 2. Distortion 2D surface buffer */
- gen9_add_buffer_2d_gpe_surface(ctx,
+ i965_add_buffer_2d_gpe_surface(ctx,
brc_gpe_context,
&vme_context->s4x_memv_distortion_buffer,
1,
VP9_BTI_BRC_DISTORTION_G9);
/* 3. pak buffer */
- gen9_add_buffer_gpe_surface(ctx,
+ i965_add_buffer_gpe_surface(ctx,
brc_gpe_context,
&vme_context->res_brc_mmdk_pak_buffer,
0,
/* 4. Mbenc curbe input buffer */
gen9_add_dri_buffer_gpe_surface(ctx,
brc_gpe_context,
- mbenc_gpe_context->dynamic_state.bo,
+ mbenc_gpe_context->curbe.bo,
0,
- ALIGN(mbenc_gpe_context->curbe_size, 64),
- mbenc_gpe_context->curbe_offset,
+ ALIGN(mbenc_gpe_context->curbe.length, 64),
+ mbenc_gpe_context->curbe.offset,
VP9_BTI_BRC_MBENC_CURBE_INPUT_G9);
/* 5. Mbenc curbe output buffer */
gen9_add_dri_buffer_gpe_surface(ctx,
brc_gpe_context,
- mbenc_gpe_context->dynamic_state.bo,
+ mbenc_gpe_context->curbe.bo,
0,
- ALIGN(mbenc_gpe_context->curbe_size, 64),
- mbenc_gpe_context->curbe_offset,
+ ALIGN(mbenc_gpe_context->curbe.length, 64),
+ mbenc_gpe_context->curbe.offset,
VP9_BTI_BRC_MBENC_CURBE_OUTPUT_G9);
/* 6. BRC_PIC_STATE read buffer */
- gen9_add_buffer_gpe_surface(ctx, brc_gpe_context,
+ i965_add_buffer_gpe_surface(ctx, brc_gpe_context,
&vme_context->res_pic_state_brc_read_buffer,
0,
vme_context->res_pic_state_brc_read_buffer.size,
VP9_BTI_BRC_PIC_STATE_INPUT_G9);
/* 7. BRC_PIC_STATE write buffer */
- gen9_add_buffer_gpe_surface(ctx, brc_gpe_context,
+ i965_add_buffer_gpe_surface(ctx, brc_gpe_context,
&vme_context->res_pic_state_brc_write_hfw_read_buffer,
0,
vme_context->res_pic_state_brc_write_hfw_read_buffer.size,
VP9_BTI_BRC_PIC_STATE_OUTPUT_G9);
/* 8. SEGMENT_STATE read buffer */
- gen9_add_buffer_gpe_surface(ctx, brc_gpe_context,
+ i965_add_buffer_gpe_surface(ctx, brc_gpe_context,
&vme_context->res_seg_state_brc_read_buffer,
0,
vme_context->res_seg_state_brc_read_buffer.size,
VP9_BTI_BRC_SEGMENT_STATE_INPUT_G9);
/* 9. SEGMENT_STATE write buffer */
- gen9_add_buffer_gpe_surface(ctx, brc_gpe_context,
+ i965_add_buffer_gpe_surface(ctx, brc_gpe_context,
&vme_context->res_seg_state_brc_write_buffer,
0,
vme_context->res_seg_state_brc_write_buffer.size,
VP9_BTI_BRC_SEGMENT_STATE_OUTPUT_G9);
/* 10. Bitstream size buffer */
- gen9_add_buffer_gpe_surface(ctx, brc_gpe_context,
+ i965_add_buffer_gpe_surface(ctx, brc_gpe_context,
&vme_context->res_brc_bitstream_size_buffer,
0,
vme_context->res_brc_bitstream_size_buffer.size,
0,
VP9_BTI_BRC_BITSTREAM_SIZE_G9);
- gen9_add_buffer_gpe_surface(ctx, brc_gpe_context,
+ i965_add_buffer_gpe_surface(ctx, brc_gpe_context,
&vme_context->res_brc_hfw_data_buffer,
0,
vme_context->res_brc_hfw_data_buffer.size,
0,
VP9_BTI_BRC_HFW_DATA_G9);
- return;
+ return;
}
static VAStatus
gen9_vp9_brc_update_kernel(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
struct vp9_brc_context *brc_context = &vme_context->brc_context;
mbenc_curbe_param.media_state_type = mbenc_function;
vme_context->pfn_set_curbe_mbenc(ctx, encode_state,
- mbenc_gpe_context,
- encoder_context,
- &mbenc_curbe_param);
+ mbenc_gpe_context,
+ encoder_context,
+ &mbenc_curbe_param);
vp9_state->mbenc_curbe_set_in_brc_update = true;
brc_update_curbe_param.brc_num_pak_passes = vp9_state->num_pak_passes;
brc_update_curbe_param.pbrc_init_current_target_buf_full_in_bits =
- &vp9_state->brc_init_current_target_buf_full_in_bits;
+ &vp9_state->brc_init_current_target_buf_full_in_bits;
brc_update_curbe_param.pbrc_init_reset_buf_size_in_bits =
- &vp9_state->brc_init_reset_buf_size_in_bits;
+ &vp9_state->brc_init_reset_buf_size_in_bits;
brc_update_curbe_param.pbrc_init_reset_input_bits_per_frame =
- &vp9_state->brc_init_reset_input_bits_per_frame;
+ &vp9_state->brc_init_reset_input_bits_per_frame;
vme_context->pfn_set_curbe_brc(ctx, encode_state,
brc_gpe_context,
// Check if the constant data surface is present
- if (vp9_state->brc_constant_buffer_supported)
- {
+ if (vp9_state->brc_constant_buffer_supported) {
char *brc_const_buffer;
brc_const_buffer = i965_map_gpe_resource(&vme_context->res_brc_const_data_buffer);
i965_unmap_gpe_resource(&vme_context->res_brc_const_data_buffer);
}
- if (pic_param->pic_flags.bits.segmentation_enabled)
- {
- //reallocate the vme_state->mb_segment_map_surface
- /* this will be added later */
+ if (pic_param->pic_flags.bits.segmentation_enabled) {
+ //reallocate the vme_state->mb_segment_map_surface
+ /* this will be added later */
}
{
pic_param->filter_level = 0;
// clear the filter level value in picParams ebfore programming pic state, as this value will be determined and updated by BRC.
intel_vp9enc_construct_picstate_batchbuf(ctx, encode_state,
- encoder_context, &vme_context->res_pic_state_brc_read_buffer);
+ encoder_context, &vme_context->res_pic_state_brc_read_buffer);
}
gen9_brc_update_add_surfaces_vp9(ctx, encode_state,
static
void gen9_vp9_set_curbe_me(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct i965_gpe_context *gpe_context,
- struct intel_encoder_context *encoder_context,
- struct gen9_vp9_me_curbe_param *param)
+ struct encode_state *encode_state,
+ struct i965_gpe_context *gpe_context,
+ struct intel_encoder_context *encoder_context,
+ struct gen9_vp9_me_curbe_param *param)
{
vp9_me_curbe_data *me_cmd;
int enc_media_state;
else
enc_media_state = VP9_MEDIA_STATE_4X_ME;
- me_cmd = gen8p_gpe_context_map_curbe(gpe_context);
+ me_cmd = i965_gpe_context_map_curbe(gpe_context);
if (!me_cmd)
return;
me_cmd->dw36.vme_fwd_inter_pred_surf_index = VP9_BTI_ME_CURR_PIC_L0;
me_cmd->dw37.vme_bdw_inter_pred_surf_index = VP9_BTI_ME_CURR_PIC_L1;
- gen8p_gpe_context_unmap_curbe(gpe_context);
+ i965_gpe_context_unmap_curbe(gpe_context);
}
static void
return;
vp9_priv_surface = obj_surface->private_data;
- if (param->use_16x_me)
- {
+ if (param->use_16x_me) {
gpe_resource = param->pres_16x_memv_data_buffer;
- }
- else
- {
+ } else {
gpe_resource = param->pres_4x_memv_data_buffer;
}
- gen9_add_buffer_2d_gpe_surface(ctx, gpe_context,
+ i965_add_buffer_2d_gpe_surface(ctx, gpe_context,
gpe_resource,
1,
I965_SURFACEFORMAT_R8_UNORM,
if (param->b16xme_enabled) {
gpe_resource = param->pres_16x_memv_data_buffer;
- gen9_add_buffer_2d_gpe_surface(ctx, gpe_context,
+ i965_add_buffer_2d_gpe_surface(ctx, gpe_context,
gpe_resource,
1,
I965_SURFACEFORMAT_R8_UNORM,
if (!param->use_16x_me) {
gpe_resource = param->pres_me_brc_distortion_buffer;
- gen9_add_buffer_2d_gpe_surface(ctx, gpe_context,
+ i965_add_buffer_2d_gpe_surface(ctx, gpe_context,
gpe_resource,
1,
I965_SURFACEFORMAT_R8_UNORM,
gpe_resource = param->pres_me_distortion_buffer;
- gen9_add_buffer_2d_gpe_surface(ctx, gpe_context,
+ i965_add_buffer_2d_gpe_surface(ctx, gpe_context,
gpe_resource,
1,
I965_SURFACEFORMAT_R8_UNORM,
else
input_surface = vp9_priv_surface->scaled_4x_surface_obj;
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
input_surface,
VP9_BTI_ME_CURR_PIC_L0);
else
input_surface = vp9_priv_surface->dys_4x_surface_obj;
}
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
input_surface,
ref_bti);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
input_surface,
ref_bti + 1);
ref_bti += 2;
input_surface = vp9_priv_surface->dys_4x_surface_obj;
}
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
input_surface,
ref_bti);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
input_surface,
ref_bti + 1);
ref_bti += 2;
else
input_surface = vp9_priv_surface->dys_4x_surface_obj;
}
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
input_surface,
ref_bti);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
input_surface,
ref_bti + 1);
ref_bti += 2;
struct gen9_vp9_me_curbe_param me_curbe_param;
struct gen9_vp9_state *vp9_state;
struct gpe_media_object_walker_parameter media_object_walker_param;
- struct vp9_encoder_kernel_walker_parameter kernel_walker_param;
+ struct gpe_encoder_kernel_walker_parameter kernel_walker_param;
vp9_state = (struct gen9_vp9_state *) encoder_context->enc_priv_state;
if (!vp9_state || !vp9_state->pic_param)
static void
gen9_vp9_set_curbe_scaling_cm(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct i965_gpe_context *gpe_context,
- struct intel_encoder_context *encoder_context,
- struct gen9_vp9_scaling_curbe_param *curbe_param)
+ struct encode_state *encode_state,
+ struct i965_gpe_context *gpe_context,
+ struct intel_encoder_context *encoder_context,
+ struct gen9_vp9_scaling_curbe_param *curbe_param)
{
vp9_scaling4x_curbe_data_cm *curbe_cmd;
- curbe_cmd = gen8p_gpe_context_map_curbe(gpe_context);
+ curbe_cmd = i965_gpe_context_map_curbe(gpe_context);
if (!curbe_cmd)
return;
curbe_cmd->dw6.enable_blk8x8_stat_output = 0;
if (curbe_param->mb_variance_output_enabled ||
- curbe_param->mb_pixel_average_output_enabled)
- {
+ curbe_param->mb_pixel_average_output_enabled) {
curbe_cmd->dw10.mbv_proc_stat_bti = VP9_BTI_SCALING_FRAME_MBVPROCSTATS_DST_CM;
}
- gen8p_gpe_context_unmap_curbe(gpe_context);
+ i965_gpe_context_unmap_curbe(gpe_context);
return;
}
else
surface_format = I965_SURFACEFORMAT_R8_UNORM;
- gen9_add_2d_gpe_surface(ctx, gpe_context,
+ i965_add_2d_gpe_surface(ctx, gpe_context,
scaling_surface_param->input_surface,
0, 1, surface_format,
scaling_bti->scaling_frame_src_y);
- gen9_add_2d_gpe_surface(ctx, gpe_context,
+ i965_add_2d_gpe_surface(ctx, gpe_context,
scaling_surface_param->output_surface,
0, 1, surface_format,
scaling_bti->scaling_frame_dst_y);
struct gen9_vp9_state *vp9_state;
VAEncPictureParameterBufferVP9 *pic_param;
struct gpe_media_object_walker_parameter media_object_walker_param;
- struct vp9_encoder_kernel_walker_parameter kernel_walker_param;
+ struct gpe_encoder_kernel_walker_parameter kernel_walker_param;
struct object_surface *obj_surface;
struct object_surface *input_surface, *output_surface;
struct gen9_surface_vp9 *vp9_priv_surface;
obj_surface = encode_state->reconstructed_object;
vp9_priv_surface = obj_surface->private_data;
- if (use_16x_scaling)
- {
+ if (use_16x_scaling) {
downscaled_width_in_mb = vp9_state->downscaled_width_16x_in_mb;
downscaled_height_in_mb = vp9_state->downscaled_height_16x_in_mb;
downscaled_height_in_mb = vp9_state->downscaled_height_4x_in_mb;
if (vp9_state->dys_in_use &&
- ((pic_param->frame_width_src != pic_param->frame_width_dst) ||
- (pic_param->frame_height_src != pic_param->frame_height_dst)))
+ ((pic_param->frame_width_src != pic_param->frame_width_dst) ||
+ (pic_param->frame_height_src != pic_param->frame_height_dst)))
input_surface = vp9_priv_surface->dys_surface_obj;
else
input_surface = encode_state->input_yuv_object;
scaling_curbe_param.blk8x8_stat_enabled = 0;
vme_context->pfn_set_curbe_scaling(ctx, encode_state,
- gpe_context,
- encoder_context,
- &scaling_curbe_param);
+ gpe_context,
+ encoder_context,
+ &scaling_curbe_param);
memset(&scaling_surface_param, 0, sizeof(scaling_surface_param));
scaling_surface_param.p_scaling_bti = (void *)(&vme_context->scaling_context.scaling_4x_bti);
if (!gpe_context)
return;
- dri_bo_map(gpe_context->dynamic_state.bo, 1);
+ dri_bo_map(gpe_context->sampler.bo, 1);
- if (!gpe_context->dynamic_state.bo->virtual)
+ if (!gpe_context->sampler.bo->virtual)
return;
sampler_cmd = (struct gen9_sampler_8x8_avs *)
- (gpe_context->dynamic_state.bo->virtual + gpe_context->sampler_offset);
+ (gpe_context->sampler.bo->virtual + gpe_context->sampler.offset);
memset(sampler_cmd, 0, sizeof(struct gen9_sampler_8x8_avs));
&gen9_vp9_avs_coeffs[17 * 8],
15 * sizeof(struct gen8_sampler_8x8_avs_coefficients));
- dri_bo_unmap(gpe_context->dynamic_state.bo);
+ dri_bo_unmap(gpe_context->sampler.bo);
}
static void
{
vp9_dys_curbe_data *curbe_cmd;
- curbe_cmd = gen8p_gpe_context_map_curbe(gpe_context);
+ curbe_cmd = i965_gpe_context_map_curbe(gpe_context);
if (!curbe_cmd)
return;
curbe_cmd->dw17.output_frame_y_bti = VP9_BTI_DYS_OUTPUT_Y;
curbe_cmd->dw18.avs_sample_idx = 0;
- gen8p_gpe_context_unmap_curbe(gpe_context);
+ i965_gpe_context_unmap_curbe(gpe_context);
}
static void
gen9_vp9_send_dys_surface(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct i965_gpe_context *gpe_context,
- struct intel_encoder_context *encoder_context,
- struct gen9_vp9_dys_surface_param *surface_param)
+ struct encode_state *encode_state,
+ struct i965_gpe_context *gpe_context,
+ struct intel_encoder_context *encoder_context,
+ struct gen9_vp9_dys_surface_param *surface_param)
{
if (surface_param->input_frame)
- gen9_add_adv_gpe_surface(ctx,
+ i965_add_adv_gpe_surface(ctx,
gpe_context,
surface_param->input_frame,
VP9_BTI_DYS_INPUT_NV12);
if (surface_param->output_frame) {
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
surface_param->output_frame,
0,
I965_SURFACEFORMAT_R8_UNORM,
VP9_BTI_DYS_OUTPUT_Y);
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
surface_param->output_frame,
1,
static VAStatus
gen9_vp9_dys_kernel(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context,
- gen9_vp9_dys_kernel_param *dys_kernel_param)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ gen9_vp9_dys_kernel_param *dys_kernel_param)
{
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
struct i965_gpe_context *gpe_context;
struct gen9_vp9_dys_curbe_param curbe_param;
struct gen9_vp9_dys_surface_param surface_param;
struct gpe_media_object_walker_parameter media_object_walker_param;
- struct vp9_encoder_kernel_walker_parameter kernel_walker_param;
+ struct gpe_encoder_kernel_walker_parameter kernel_walker_param;
unsigned int resolution_x, resolution_y;
media_function = VP9_MEDIA_STATE_DYS;
curbe_param.output_width = dys_kernel_param->output_width;
curbe_param.output_height = dys_kernel_param->output_height;
vme_context->pfn_set_curbe_dys(ctx, encode_state,
- gpe_context,
- encoder_context,
- &curbe_param);
+ gpe_context,
+ encoder_context,
+ &curbe_param);
// Add surface states
memset(&surface_param, 0, sizeof(surface_param));
static VAStatus
gen9_vp9_run_dys_refframes(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct gen9_vp9_state *vp9_state;
VAEncPictureParameterBufferVP9 *pic_param;
}
if ((vp9_state->dys_ref_frame_flag & VP9_LAST_REF) &&
- vp9_state->last_ref_obj) {
+ vp9_state->last_ref_obj) {
obj_surface = vp9_state->last_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
if (vp9_state->hme_enabled) {
dys_kernel_param.input_width = ALIGN((vp9_priv_surface->frame_width / 4), 16);
- dys_kernel_param.input_width = ALIGN((vp9_priv_surface->frame_height / 4), 16);
+ dys_kernel_param.input_height = ALIGN((vp9_priv_surface->frame_height / 4), 16);
dys_kernel_param.input_surface = vp9_priv_surface->scaled_4x_surface_obj;
dys_kernel_param.output_width = vp9_state->frame_width_4x;
}
if ((vp9_state->dys_ref_frame_flag & VP9_GOLDEN_REF) &&
- vp9_state->golden_ref_obj) {
+ vp9_state->golden_ref_obj) {
obj_surface = vp9_state->golden_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
}
if ((vp9_state->dys_ref_frame_flag & VP9_ALT_REF) &&
- vp9_state->alt_ref_obj) {
+ vp9_state->alt_ref_obj) {
obj_surface = vp9_state->alt_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
seg_param = &tmp_seg_param;
}
- curbe_cmd = gen8p_gpe_context_map_curbe(gpe_context);
+ curbe_cmd = i965_gpe_context_map_curbe(gpe_context);
if (!curbe_cmd)
return;
memset(curbe_cmd, 0, sizeof(vp9_mbenc_curbe_data));
- if (vp9_state->dys_in_use)
- {
+ if (vp9_state->dys_in_use) {
curbe_cmd->dw0.frame_width = pic_param->frame_width_dst;
curbe_cmd->dw0.frame_height = pic_param->frame_height_dst;
- }
- else
- {
+ } else {
curbe_cmd->dw0.frame_width = pic_param->frame_width_src;
curbe_cmd->dw0.frame_height = pic_param->frame_height_src;
}
curbe_cmd->dw1.ref_frame_flags = curbe_param->ref_frame_flag;
//right now set them to normal settings
- if (curbe_param->picture_coding_type)
- {
- switch (vp9_state->target_usage)
- {
+ if (curbe_param->picture_coding_type) {
+ switch (vp9_state->target_usage) {
case INTEL_ENC_VP9_TU_QUALITY:
curbe_cmd->dw1.min_16for32_check = 0x00;
curbe_cmd->dw2.multi_pred = 0x02;
curbe_cmd->dw4.disable_mr_threshold = 0x000C;
memcpy(&curbe_cmd->dw16,
- vp9_diamond_ime_search_path_delta,
- 14 * sizeof(unsigned int));
+ vp9_diamond_ime_search_path_delta,
+ 14 * sizeof(unsigned int));
break;
case INTEL_ENC_VP9_TU_PERFORMANCE:
curbe_cmd->dw1.min_16for32_check = 0x02;
curbe_cmd->dw4.disable_mr_threshold = 0x0016;
memcpy(&curbe_cmd->dw16,
- vp9_fullspiral_ime_search_path_delta,
- 14 * sizeof(unsigned int));
+ vp9_fullspiral_ime_search_path_delta,
+ 14 * sizeof(unsigned int));
break;
default: // normal settings
curbe_cmd->dw4.disable_mr_threshold = 0x0011;
memcpy(&curbe_cmd->dw16,
- vp9_diamond_ime_search_path_delta,
- 14 * sizeof(unsigned int));
+ vp9_diamond_ime_search_path_delta,
+ 14 * sizeof(unsigned int));
break;
}
curbe_cmd->dw5.intra_round = 4;
curbe_cmd->dw5.frame_qpindex = pic_param->luma_ac_qindex;
- for (i = 0; i < segment_count; i++)
- {
+ for (i = 0; i < segment_count; i++) {
seg_qindex = pic_param->luma_ac_qindex + pic_param->luma_dc_qindex_delta
+ seg_param->seg_data[i].segment_qindex_delta;
16 * sizeof(unsigned int));
}
- if (curbe_param->picture_coding_type)
- {
- if (curbe_cmd->dw3.multi_ref_qp_check)
- {
- if (curbe_param->ref_frame_flag & 0x01)
- {
+ if (curbe_param->picture_coding_type) {
+ if (curbe_cmd->dw3.multi_ref_qp_check) {
+ if (curbe_param->ref_frame_flag & 0x01) {
obj_surface = curbe_param->last_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
curbe_cmd->dw8.last_ref_qp = vp9_quant_dc[vp9_priv_surface->qp_value];
}
- if (curbe_param->ref_frame_flag & 0x02)
- {
+ if (curbe_param->ref_frame_flag & 0x02) {
obj_surface = curbe_param->golden_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
curbe_cmd->dw8.golden_ref_qp = vp9_quant_dc[vp9_priv_surface->qp_value];
}
- if (curbe_param->ref_frame_flag & 0x04)
- {
+ if (curbe_param->ref_frame_flag & 0x04) {
obj_surface = curbe_param->alt_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
curbe_cmd->dw9.alt_ref_qp = vp9_quant_dc[vp9_priv_surface->qp_value];
curbe_cmd->dw174.cu_record_bti = VP9_BTI_MBENC_CU_RECORDS_G9;
curbe_cmd->dw175.pak_data_bti = VP9_BTI_MBENC_PAK_DATA_G9;
- gen8p_gpe_context_unmap_curbe(gpe_context);
+ i965_gpe_context_unmap_curbe(gpe_context);
return;
}
frame_height_in_sb = ALIGN(mbenc_param->frame_height, 64) / 64;
media_function = mbenc_param->media_state_type;
- switch (media_function)
- {
- case VP9_MEDIA_STATE_MBENC_I_32x32:
- {
+ switch (media_function) {
+ case VP9_MEDIA_STATE_MBENC_I_32x32: {
obj_surface = mbenc_param->curr_frame_obj;
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
obj_surface,
0,
I965_SURFACEFORMAT_R8_UNORM,
VP9_BTI_MBENC_CURR_Y_G9);
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
obj_surface,
1,
VP9_BTI_MBENC_CURR_UV_G9);
- if (mbenc_param->segmentation_enabled)
- {
- gen9_add_buffer_2d_gpe_surface(ctx,
- gpe_context,
- mbenc_param->pres_segmentation_map,
- 1,
- I965_SURFACEFORMAT_R8_UNORM,
- VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
+ if (mbenc_param->segmentation_enabled) {
+ i965_add_buffer_2d_gpe_surface(ctx,
+ gpe_context,
+ mbenc_param->pres_segmentation_map,
+ 1,
+ I965_SURFACEFORMAT_R8_UNORM,
+ VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
}
res_size = 16 * mbenc_param->frame_width_in_mb *
- mbenc_param->frame_height_in_mb * sizeof(unsigned int);
- gen9_add_buffer_gpe_surface(ctx,
+ mbenc_param->frame_height_in_mb * sizeof(unsigned int);
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
mbenc_param->pres_mode_decision,
0,
break;
}
- case VP9_MEDIA_STATE_MBENC_I_16x16:
- {
+ case VP9_MEDIA_STATE_MBENC_I_16x16: {
obj_surface = mbenc_param->curr_frame_obj;
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
obj_surface,
0,
I965_SURFACEFORMAT_R8_UNORM,
VP9_BTI_MBENC_CURR_Y_G9);
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
obj_surface,
1,
I965_SURFACEFORMAT_R16_UINT,
VP9_BTI_MBENC_CURR_UV_G9);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
obj_surface,
VP9_BTI_MBENC_CURR_NV12_G9);
- if (mbenc_param->segmentation_enabled)
- {
- gen9_add_buffer_2d_gpe_surface(ctx,
- gpe_context,
- mbenc_param->pres_segmentation_map,
- 1,
- I965_SURFACEFORMAT_R8_UNORM,
- VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
+ if (mbenc_param->segmentation_enabled) {
+ i965_add_buffer_2d_gpe_surface(ctx,
+ gpe_context,
+ mbenc_param->pres_segmentation_map,
+ 1,
+ I965_SURFACEFORMAT_R8_UNORM,
+ VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
}
res_size = 16 * mbenc_param->frame_width_in_mb *
- mbenc_param->frame_height_in_mb * sizeof(unsigned int);
- gen9_add_buffer_gpe_surface(ctx,
+ mbenc_param->frame_height_in_mb * sizeof(unsigned int);
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
mbenc_param->pres_mode_decision,
0,
gen9_add_dri_buffer_gpe_surface(ctx,
gpe_context,
- mbenc_param->gpe_context_tx->dynamic_state.bo,
+ mbenc_param->gpe_context_tx->curbe.bo,
0,
ALIGN(res_size, 64),
- mbenc_param->gpe_context_tx->curbe_offset,
+ mbenc_param->gpe_context_tx->curbe.offset,
VP9_BTI_MBENC_TX_CURBE_G9);
break;
}
- case VP9_MEDIA_STATE_MBENC_P:
- {
+ case VP9_MEDIA_STATE_MBENC_P: {
obj_surface = mbenc_param->curr_frame_obj;
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
obj_surface,
0,
I965_SURFACEFORMAT_R8_UNORM,
VP9_BTI_MBENC_CURR_Y_G9);
- gen9_add_2d_gpe_surface(ctx, gpe_context,
+ i965_add_2d_gpe_surface(ctx, gpe_context,
obj_surface,
1,
1,
I965_SURFACEFORMAT_R16_UINT,
VP9_BTI_MBENC_CURR_UV_G9);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
+ i965_add_adv_gpe_surface(ctx, gpe_context,
obj_surface,
VP9_BTI_MBENC_CURR_NV12_G9);
- if (mbenc_param->last_ref_obj)
- {
+ if (mbenc_param->last_ref_obj) {
obj_surface = mbenc_param->last_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
else
tmp_input = obj_surface;
- gen9_add_adv_gpe_surface(ctx, gpe_context,
- tmp_input,
- VP9_BTI_MBENC_LAST_NV12_G9);
+ i965_add_adv_gpe_surface(ctx, gpe_context,
+ tmp_input,
+ VP9_BTI_MBENC_LAST_NV12_G9);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
- tmp_input,
- VP9_BTI_MBENC_LAST_NV12_G9 + 1);
+ i965_add_adv_gpe_surface(ctx, gpe_context,
+ tmp_input,
+ VP9_BTI_MBENC_LAST_NV12_G9 + 1);
}
- if (mbenc_param->golden_ref_obj)
- {
+ if (mbenc_param->golden_ref_obj) {
obj_surface = mbenc_param->golden_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
else
tmp_input = obj_surface;
- gen9_add_adv_gpe_surface(ctx, gpe_context,
- tmp_input,
- VP9_BTI_MBENC_GOLD_NV12_G9);
+ i965_add_adv_gpe_surface(ctx, gpe_context,
+ tmp_input,
+ VP9_BTI_MBENC_GOLD_NV12_G9);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
- tmp_input,
- VP9_BTI_MBENC_GOLD_NV12_G9 + 1);
+ i965_add_adv_gpe_surface(ctx, gpe_context,
+ tmp_input,
+ VP9_BTI_MBENC_GOLD_NV12_G9 + 1);
}
- if (mbenc_param->alt_ref_obj)
- {
+ if (mbenc_param->alt_ref_obj) {
obj_surface = mbenc_param->alt_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
else
tmp_input = obj_surface;
- gen9_add_adv_gpe_surface(ctx, gpe_context,
- tmp_input,
- VP9_BTI_MBENC_ALTREF_NV12_G9);
+ i965_add_adv_gpe_surface(ctx, gpe_context,
+ tmp_input,
+ VP9_BTI_MBENC_ALTREF_NV12_G9);
- gen9_add_adv_gpe_surface(ctx, gpe_context,
- tmp_input,
- VP9_BTI_MBENC_ALTREF_NV12_G9 + 1);
+ i965_add_adv_gpe_surface(ctx, gpe_context,
+ tmp_input,
+ VP9_BTI_MBENC_ALTREF_NV12_G9 + 1);
}
- if (mbenc_param->hme_enabled)
- {
- gen9_add_buffer_2d_gpe_surface(ctx, gpe_context,
- mbenc_param->ps4x_memv_data_buffer,
- 1,
- I965_SURFACEFORMAT_R8_UNORM,
- VP9_BTI_MBENC_HME_MV_DATA_G9);
-
- gen9_add_buffer_2d_gpe_surface(ctx, gpe_context,
- mbenc_param->ps4x_memv_distortion_buffer,
- 1,
- I965_SURFACEFORMAT_R8_UNORM,
- VP9_BTI_MBENC_HME_DISTORTION_G9);
+ if (mbenc_param->hme_enabled) {
+ i965_add_buffer_2d_gpe_surface(ctx, gpe_context,
+ mbenc_param->ps4x_memv_data_buffer,
+ 1,
+ I965_SURFACEFORMAT_R8_UNORM,
+ VP9_BTI_MBENC_HME_MV_DATA_G9);
+
+ i965_add_buffer_2d_gpe_surface(ctx, gpe_context,
+ mbenc_param->ps4x_memv_distortion_buffer,
+ 1,
+ I965_SURFACEFORMAT_R8_UNORM,
+ VP9_BTI_MBENC_HME_DISTORTION_G9);
}
- if (mbenc_param->segmentation_enabled)
- {
- gen9_add_buffer_2d_gpe_surface(ctx,
- gpe_context,
- mbenc_param->pres_segmentation_map,
- 1,
- I965_SURFACEFORMAT_R8_UNORM,
- VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
+ if (mbenc_param->segmentation_enabled) {
+ i965_add_buffer_2d_gpe_surface(ctx,
+ gpe_context,
+ mbenc_param->pres_segmentation_map,
+ 1,
+ I965_SURFACEFORMAT_R8_UNORM,
+ VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
}
res_size = 16 * mbenc_param->frame_width_in_mb *
- mbenc_param->frame_height_in_mb * sizeof(unsigned int);
- gen9_add_buffer_gpe_surface(ctx,
+ mbenc_param->frame_height_in_mb * sizeof(unsigned int);
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
mbenc_param->pres_mode_decision_prev,
0,
0,
VP9_BTI_MBENC_MODE_DECISION_PREV_G9);
- gen9_add_buffer_gpe_surface(ctx,
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
mbenc_param->pres_mode_decision,
0,
0,
VP9_BTI_MBENC_MODE_DECISION_G9);
- gen9_add_buffer_2d_gpe_surface(ctx,
- gpe_context,
- mbenc_param->pres_output_16x16_inter_modes,
- 1,
- I965_SURFACEFORMAT_R8_UNORM,
- VP9_BTI_MBENC_OUT_16x16_INTER_MODES_G9);
+ i965_add_buffer_2d_gpe_surface(ctx,
+ gpe_context,
+ mbenc_param->pres_output_16x16_inter_modes,
+ 1,
+ I965_SURFACEFORMAT_R8_UNORM,
+ VP9_BTI_MBENC_OUT_16x16_INTER_MODES_G9);
res_size = 160;
gen9_add_dri_buffer_gpe_surface(ctx,
gpe_context,
- mbenc_param->gpe_context_tx->dynamic_state.bo,
+ mbenc_param->gpe_context_tx->curbe.bo,
0,
ALIGN(res_size, 64),
- mbenc_param->gpe_context_tx->curbe_offset,
+ mbenc_param->gpe_context_tx->curbe.offset,
VP9_BTI_MBENC_TX_CURBE_G9);
break;
}
- case VP9_MEDIA_STATE_MBENC_TX:
- {
+ case VP9_MEDIA_STATE_MBENC_TX: {
obj_surface = mbenc_param->curr_frame_obj;
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
obj_surface,
0,
I965_SURFACEFORMAT_R8_UNORM,
VP9_BTI_MBENC_CURR_Y_G9);
- gen9_add_2d_gpe_surface(ctx,
+ i965_add_2d_gpe_surface(ctx,
gpe_context,
obj_surface,
1,
I965_SURFACEFORMAT_R16_UINT,
VP9_BTI_MBENC_CURR_UV_G9);
- if (mbenc_param->segmentation_enabled)
- {
- gen9_add_buffer_2d_gpe_surface(ctx,
- gpe_context,
- mbenc_param->pres_segmentation_map,
- 1,
- I965_SURFACEFORMAT_R8_UNORM,
- VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
+ if (mbenc_param->segmentation_enabled) {
+ i965_add_buffer_2d_gpe_surface(ctx,
+ gpe_context,
+ mbenc_param->pres_segmentation_map,
+ 1,
+ I965_SURFACEFORMAT_R8_UNORM,
+ VP9_BTI_MBENC_SEGMENTATION_MAP_G9);
}
res_size = 16 * mbenc_param->frame_width_in_mb *
- mbenc_param->frame_height_in_mb * sizeof(unsigned int);
- gen9_add_buffer_gpe_surface(ctx,
+ mbenc_param->frame_height_in_mb * sizeof(unsigned int);
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
mbenc_param->pres_mode_decision,
0,
VP9_BTI_MBENC_MODE_DECISION_G9);
res_size = frame_width_in_sb * frame_height_in_sb * 4 * sizeof(unsigned int);
- gen9_add_buffer_gpe_surface(ctx,
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
mbenc_param->pres_mb_code_surface,
0,
res_size = frame_width_in_sb * frame_height_in_sb *
64 * 16 * sizeof(unsigned int);
- gen9_add_buffer_gpe_surface(ctx,
+ i965_add_buffer_gpe_surface(ctx,
gpe_context,
mbenc_param->pres_mb_code_surface,
0,
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
struct i965_gpe_context *gpe_context, *tx_gpe_context;
struct gpe_media_object_walker_parameter media_object_walker_param;
- struct vp9_encoder_kernel_walker_parameter kernel_walker_param;
+ struct gpe_encoder_kernel_walker_parameter kernel_walker_param;
unsigned int resolution_x, resolution_y;
struct gen9_vp9_state *vp9_state;
VAEncPictureParameterBufferVP9 *pic_param;
pic_param = vp9_state->pic_param;
- switch (media_function)
- {
- case VP9_MEDIA_STATE_MBENC_I_32x32:
- mbenc_gpe_index = VP9_MBENC_IDX_KEY_32x32;
- break;
+ switch (media_function) {
+ case VP9_MEDIA_STATE_MBENC_I_32x32:
+ mbenc_gpe_index = VP9_MBENC_IDX_KEY_32x32;
+ break;
- case VP9_MEDIA_STATE_MBENC_I_16x16:
- mbenc_gpe_index = VP9_MBENC_IDX_KEY_16x16;
- break;
+ case VP9_MEDIA_STATE_MBENC_I_16x16:
+ mbenc_gpe_index = VP9_MBENC_IDX_KEY_16x16;
+ break;
- case VP9_MEDIA_STATE_MBENC_P:
- mbenc_gpe_index = VP9_MBENC_IDX_INTER;
- break;
+ case VP9_MEDIA_STATE_MBENC_P:
+ mbenc_gpe_index = VP9_MBENC_IDX_INTER;
+ break;
- case VP9_MEDIA_STATE_MBENC_TX:
- mbenc_gpe_index = VP9_MBENC_IDX_TX;
- break;
+ case VP9_MEDIA_STATE_MBENC_TX:
+ mbenc_gpe_index = VP9_MBENC_IDX_TX;
+ break;
- default:
- va_status = VA_STATUS_ERROR_OPERATION_FAILED;
- return va_status;
+ default:
+ va_status = VA_STATUS_ERROR_OPERATION_FAILED;
+ return va_status;
}
gpe_context = &(vme_context->mbenc_context.gpe_contexts[mbenc_gpe_index]);
gen9_gpe_reset_binding_table(ctx, gpe_context);
// Set curbe
- if (!vp9_state->mbenc_curbe_set_in_brc_update)
- {
- if(media_function == VP9_MEDIA_STATE_MBENC_I_32x32 ||
- media_function == VP9_MEDIA_STATE_MBENC_P)
- {
+ if (!vp9_state->mbenc_curbe_set_in_brc_update) {
+ if (media_function == VP9_MEDIA_STATE_MBENC_I_32x32 ||
+ media_function == VP9_MEDIA_STATE_MBENC_P) {
memset(&curbe_param, 0, sizeof(curbe_param));
curbe_param.ppic_param = vp9_state->pic_param;
curbe_param.pseq_param = vp9_state->seq_param;
surface_param.ps4x_memv_data_buffer = &vme_context->s4x_memv_data_buffer;
surface_param.ps4x_memv_distortion_buffer = &vme_context->s4x_memv_distortion_buffer;
surface_param.pres_mode_decision =
- &vme_context->res_mode_decision[vp9_state->curr_mode_decision_index];
+ &vme_context->res_mode_decision[vp9_state->curr_mode_decision_index];
surface_param.pres_mode_decision_prev =
- &vme_context->res_mode_decision[!vp9_state->curr_mode_decision_index];
+ &vme_context->res_mode_decision[!vp9_state->curr_mode_decision_index];
surface_param.pres_output_16x16_inter_modes = &vme_context->res_output_16x16_inter_modes;
surface_param.pres_mbenc_curbe_buffer = NULL;
surface_param.last_ref_obj = vp9_state->last_ref_obj;
}
static void
-gen9_init_gpe_context_vp9(struct i965_gpe_context *gpe_context,
+gen9_init_gpe_context_vp9(VADriverContextP ctx,
+ struct i965_gpe_context *gpe_context,
struct vp9_encoder_kernel_parameter *kernel_param)
{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+
gpe_context->curbe.length = kernel_param->curbe_size; // in bytes
- gpe_context->curbe_size = ALIGN(kernel_param->curbe_size, 64);
+ gpe_context->sampler.entry_size = 0;
+ gpe_context->sampler.max_entries = 0;
- gpe_context->sampler_size = 0;
if (kernel_param->sampler_size) {
- gpe_context->sampler_size = ALIGN(kernel_param->sampler_size, 64);
+ gpe_context->sampler.entry_size = ALIGN(kernel_param->sampler_size, 64);
+ gpe_context->sampler.max_entries = 1;
}
gpe_context->idrt.entry_size = ALIGN(sizeof(struct gen8_interface_descriptor_data), 64); // 8 dws, 1 register
gpe_context->idrt.max_entries = NUM_KERNELS_PER_GPE_CONTEXT;
- gpe_context->idrt_size = ALIGN(sizeof(struct gen8_interface_descriptor_data), 64) *
- NUM_KERNELS_PER_GPE_CONTEXT;
gpe_context->surface_state_binding_table.max_entries = MAX_VP9_ENCODER_SURFACES;
gpe_context->surface_state_binding_table.binding_table_offset = 0;
gpe_context->surface_state_binding_table.surface_state_offset = ALIGN(MAX_VP9_ENCODER_SURFACES * 4, 64);
gpe_context->surface_state_binding_table.length = ALIGN(MAX_VP9_ENCODER_SURFACES * 4, 64) + ALIGN(MAX_VP9_ENCODER_SURFACES * SURFACE_STATE_PADDED_SIZE_GEN9, 64);
- gpe_context->vfe_state.max_num_threads = 112; // 16 EU * 7 threads
+ if (i965->intel.eu_total > 0)
+ gpe_context->vfe_state.max_num_threads = 6 * i965->intel.eu_total;
+ else
+ gpe_context->vfe_state.max_num_threads = 112; // 16 EU * 7 threads
+
gpe_context->vfe_state.curbe_allocation_size = MAX(1, ALIGN(gpe_context->curbe.length, 32) >> 5); // in registers
gpe_context->vfe_state.urb_entry_size = MAX(1, ALIGN(kernel_param->inline_data_size, 32) >> 5); // in registers
gpe_context->vfe_state.num_urb_entries = (MAX_URB_SIZE -
gpe_context->vfe_desc5.scoreboard0.mask = 0x0F;
gpe_context->vfe_desc5.scoreboard0.type = 1;
- gpe_context->vfe_desc6.scoreboard1.delta_x0 = 0x0;
- gpe_context->vfe_desc6.scoreboard1.delta_y0 = 0xF;
+ gpe_context->vfe_desc6.scoreboard1.delta_x0 = 0;
+ gpe_context->vfe_desc6.scoreboard1.delta_y0 = -1;
- gpe_context->vfe_desc6.scoreboard1.delta_x1 = 0x0;
- gpe_context->vfe_desc6.scoreboard1.delta_y1 = 0xE;
+ gpe_context->vfe_desc6.scoreboard1.delta_x1 = 0;
+ gpe_context->vfe_desc6.scoreboard1.delta_y1 = -2;
- gpe_context->vfe_desc6.scoreboard1.delta_x2 = 0xF;
- gpe_context->vfe_desc6.scoreboard1.delta_y2 = 0x3;
+ gpe_context->vfe_desc6.scoreboard1.delta_x2 = -1;
+ gpe_context->vfe_desc6.scoreboard1.delta_y2 = 3;
- gpe_context->vfe_desc6.scoreboard1.delta_x3 = 0xF;
- gpe_context->vfe_desc6.scoreboard1.delta_y3 = 0x1;
+ gpe_context->vfe_desc6.scoreboard1.delta_x3 = -1;
+ gpe_context->vfe_desc6.scoreboard1.delta_y3 = 1;
} else {
// Scoreboard 0
- gpe_context->vfe_desc6.scoreboard1.delta_x0 = 0xF;
- gpe_context->vfe_desc6.scoreboard1.delta_y0 = 0x0;
+ gpe_context->vfe_desc6.scoreboard1.delta_x0 = -1;
+ gpe_context->vfe_desc6.scoreboard1.delta_y0 = 0;
// Scoreboard 1
- gpe_context->vfe_desc6.scoreboard1.delta_x1 = 0x0;
- gpe_context->vfe_desc6.scoreboard1.delta_y1 = 0xF;
+ gpe_context->vfe_desc6.scoreboard1.delta_x1 = 0;
+ gpe_context->vfe_desc6.scoreboard1.delta_y1 = -1;
// Scoreboard 2
- gpe_context->vfe_desc6.scoreboard1.delta_x2 = 0x1;
- gpe_context->vfe_desc6.scoreboard1.delta_y2 = 0xF;
+ gpe_context->vfe_desc6.scoreboard1.delta_x2 = 1;
+ gpe_context->vfe_desc6.scoreboard1.delta_y2 = -1;
// Scoreboard 3
- gpe_context->vfe_desc6.scoreboard1.delta_x3 = 0xF;
- gpe_context->vfe_desc6.scoreboard1.delta_y3 = 0xF;
+ gpe_context->vfe_desc6.scoreboard1.delta_x3 = -1;
+ gpe_context->vfe_desc6.scoreboard1.delta_y3 = -1;
// Scoreboard 4
- gpe_context->vfe_desc7.scoreboard2.delta_x4 = 0xF;
- gpe_context->vfe_desc7.scoreboard2.delta_y4 = 0x1;
+ gpe_context->vfe_desc7.scoreboard2.delta_x4 = -1;
+ gpe_context->vfe_desc7.scoreboard2.delta_y4 = 1;
// Scoreboard 5
- gpe_context->vfe_desc7.scoreboard2.delta_x5 = 0x0;
- gpe_context->vfe_desc7.scoreboard2.delta_y5 = 0xE;
+ gpe_context->vfe_desc7.scoreboard2.delta_x5 = 0;
+ gpe_context->vfe_desc7.scoreboard2.delta_y5 = -2;
// Scoreboard 6
- gpe_context->vfe_desc7.scoreboard2.delta_x6 = 0x1;
- gpe_context->vfe_desc7.scoreboard2.delta_y6 = 0xE;
+ gpe_context->vfe_desc7.scoreboard2.delta_x6 = 1;
+ gpe_context->vfe_desc7.scoreboard2.delta_y6 = -2;
// Scoreboard 7
- gpe_context->vfe_desc7.scoreboard2.delta_x6 = 0xF;
- gpe_context->vfe_desc7.scoreboard2.delta_y6 = 0xE;
+ gpe_context->vfe_desc7.scoreboard2.delta_x6 = -1;
+ gpe_context->vfe_desc7.scoreboard2.delta_y6 = -2;
}
}
-#define VP9_MI_BLOCK_MASK 0x07
#define VP9_VME_REF_WIN 48
static VAStatus
gen9_encode_vp9_check_parameter(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct gen9_vp9_state *vp9_state;
}
pic_param = (VAEncPictureParameterBufferVP9 *)encode_state->pic_param_ext->buffer;
- if (pic_param->frame_width_src & VP9_MI_BLOCK_MASK ||
- pic_param->frame_height_src & VP9_MI_BLOCK_MASK ||
- pic_param->frame_width_dst & VP9_MI_BLOCK_MASK ||
- pic_param->frame_height_dst & VP9_MI_BLOCK_MASK)
- return VA_STATUS_ERROR_INVALID_PARAMETER;
-
obj_buffer = BUFFER(pic_param->coded_buf);
if (!obj_buffer ||
return VA_STATUS_ERROR_INVALID_PARAMETER;
}
seg_param = (VAEncMiscParameterTypeVP9PerSegmantParam *)
- encode_state->q_matrix->buffer;
+ encode_state->q_matrix->buffer;
}
seq_param = NULL;
return VA_STATUS_ERROR_INVALID_SURFACE;
if (!vp9_state->dys_enabled &&
- ((pic_param->frame_width_src != pic_param->frame_width_dst) ||
- (pic_param->frame_height_src != pic_param->frame_height_dst)))
+ ((pic_param->frame_width_src != pic_param->frame_width_dst) ||
+ (pic_param->frame_height_src != pic_param->frame_height_dst)))
return VA_STATUS_ERROR_UNIMPLEMENTED;
if (vp9_state->brc_enabled) {
- if (vp9_state->brc_flag_check & VP9_BRC_FAILURE) {
- WARN_ONCE("Rate control misc_parameter is required for BRC\n");
- return VA_STATUS_ERROR_INVALID_PARAMETER;
- }
-
- if (vp9_state->first_frame) {
- unsigned int brc_flag;
- VAEncMiscParameterBuffer *misc_param;
+ if (vp9_state->first_frame || vp9_state->picture_coding_type == KEY_FRAME) {
+ vp9_state->brc_reset = encoder_context->brc.need_reset || vp9_state->first_frame;
- brc_flag = VP9_BRC_SEQ | VP9_BRC_RC;
- if ((vp9_state->brc_flag_check & brc_flag) != brc_flag) {
- WARN_ONCE("SPS/RC misc is required for BRC\n");
+ if (!encoder_context->brc.framerate[0].num || !encoder_context->brc.framerate[0].den ||
+ !encoder_context->brc.bits_per_second[0])
return VA_STATUS_ERROR_INVALID_PARAMETER;
+
+ vp9_state->gop_size = encoder_context->brc.gop_size;
+ vp9_state->framerate = encoder_context->brc.framerate[0];
+ if ((vp9_state->framerate.num / vp9_state->framerate.den) > MAX_VP9_ENCODER_FRAMERATE) {
+ vp9_state->framerate.num = MAX_VP9_ENCODER_FRAMERATE * vp9_state->framerate.den;
+ i965_log_info(ctx, "gen9_encode_vp9_check_parameter: Too high frame rate(num: %d, den: %d), max supported is %d fps.\n",
+ vp9_state->framerate.num, vp9_state->framerate.den, MAX_VP9_ENCODER_FRAMERATE);
}
- /* check the corresponding BRC parameter for CBR and VBR */
- if (encoder_context->rate_control_mode == VA_RC_CBR) {
- vp9_state->target_bit_rate = seq_param->bits_per_second;
- vp9_state->gop_size = seq_param->intra_period;
-
- if (vp9_state->brc_flag_check & VP9_BRC_HRD) {
- VAEncMiscParameterHRD *misc_param_hrd;
-
- misc_param = (VAEncMiscParameterBuffer *)
- encode_state->misc_param[VAEncMiscParameterTypeHRD]->buffer;
- misc_param_hrd = (VAEncMiscParameterHRD *)misc_param->data;
-
- vp9_state->init_vbv_buffer_fullness_in_bit = misc_param_hrd->initial_buffer_fullness;
- vp9_state->vbv_buffer_size_in_bit = misc_param_hrd->buffer_size;
- }
-
- if (vp9_state->brc_flag_check & VP9_BRC_FR) {
- VAEncMiscParameterFrameRate *misc_param_fr;
-
- misc_param = (VAEncMiscParameterBuffer *)
- encode_state->misc_param[VAEncMiscParameterTypeFrameRate]->buffer;
- misc_param_fr = (VAEncMiscParameterFrameRate *)misc_param->data;
-
- vp9_state->frame_rate = misc_param_fr->framerate;
- } else {
- /* Assign the default frame rate */
- vp9_state->frame_rate = 30;
- }
-
- /* RC misc will override HRD parameter */
- if (vp9_state->brc_flag_check & VP9_BRC_RC) {
- VAEncMiscParameterRateControl *misc_param_rc;
-
- misc_param = (VAEncMiscParameterBuffer *)
- encode_state->misc_param[VAEncMiscParameterTypeRateControl]->buffer;
- misc_param_rc = (VAEncMiscParameterRateControl *)misc_param->data;
-
- vp9_state->target_bit_rate = misc_param_rc->bits_per_second;
- vp9_state->vbv_buffer_size_in_bit = (misc_param_rc->bits_per_second / 1000) *
- misc_param_rc->window_size;
- vp9_state->init_vbv_buffer_fullness_in_bit = vp9_state->vbv_buffer_size_in_bit / 2;
- vp9_state->window_size = misc_param_rc->window_size;
- }
+ if (encoder_context->rate_control_mode == VA_RC_CBR ||
+ !encoder_context->brc.target_percentage[0]) {
+ vp9_state->target_bit_rate = encoder_context->brc.bits_per_second[0];
vp9_state->max_bit_rate = vp9_state->target_bit_rate;
vp9_state->min_bit_rate = vp9_state->target_bit_rate;
} else {
- /* VBR mode */
- brc_flag = VP9_BRC_SEQ | VP9_BRC_RC;
- vp9_state->target_bit_rate = seq_param->bits_per_second;
- vp9_state->gop_size = seq_param->intra_period;
-
- if (vp9_state->brc_flag_check & VP9_BRC_FR) {
- VAEncMiscParameterFrameRate *misc_param_fr;
-
- misc_param = (VAEncMiscParameterBuffer *)
- encode_state->misc_param[VAEncMiscParameterTypeFrameRate]->buffer;
- misc_param_fr = (VAEncMiscParameterFrameRate *)misc_param->data;
-
- vp9_state->frame_rate = misc_param_fr->framerate;
- } else {
- /* Assign the default frame rate */
- vp9_state->frame_rate = 30;
- }
-
- if (vp9_state->brc_flag_check & VP9_BRC_RC) {
- VAEncMiscParameterRateControl *misc_param_rc;
-
- misc_param = (VAEncMiscParameterBuffer *)
- encode_state->misc_param[VAEncMiscParameterTypeRateControl]->buffer;
- misc_param_rc = (VAEncMiscParameterRateControl *)misc_param->data;
-
- vp9_state->max_bit_rate = misc_param_rc->bits_per_second;
- vp9_state->vbv_buffer_size_in_bit = (misc_param_rc->bits_per_second / 1000) *
- misc_param_rc->window_size;
- vp9_state->init_vbv_buffer_fullness_in_bit = vp9_state->vbv_buffer_size_in_bit / 2;
- vp9_state->target_bit_rate = (misc_param_rc->bits_per_second / 100) *
- misc_param_rc->target_percentage;
- vp9_state->min_bit_rate = (misc_param_rc->bits_per_second / 100) *
- (2 * misc_param_rc->target_percentage - 100);
- vp9_state->target_percentage = misc_param_rc->target_percentage;
- vp9_state->window_size = misc_param_rc->window_size;
- }
- }
- }
- else if (vp9_state->picture_coding_type == KEY_FRAME){
- VAEncMiscParameterBuffer *misc_param;
- /* update the BRC parameter only when it is key-frame */
- /* If the parameter related with RC is changed. Reset BRC */
- if (vp9_state->brc_flag_check & VP9_BRC_FR) {
- VAEncMiscParameterFrameRate *misc_param_fr;
-
- misc_param = (VAEncMiscParameterBuffer *)
- encode_state->misc_param[VAEncMiscParameterTypeFrameRate]->buffer;
- misc_param_fr = (VAEncMiscParameterFrameRate *)misc_param->data;
-
- if (vp9_state->frame_rate != misc_param_fr->framerate) {
- vp9_state->brc_reset = 1;
- vp9_state->frame_rate = misc_param_fr->framerate;
- }
- }
-
- /* check the GOP size. And bit_per_second in SPS is ignored */
- if (vp9_state->brc_flag_check & VP9_BRC_SEQ) {
- if (vp9_state->gop_size != seq_param->intra_period) {
- vp9_state->brc_reset = 1;
- vp9_state->gop_size = seq_param->intra_period;
- }
+ vp9_state->max_bit_rate = encoder_context->brc.bits_per_second[0];
+ vp9_state->target_bit_rate = vp9_state->max_bit_rate * encoder_context->brc.target_percentage[0] / 100;
+ if (2 * vp9_state->target_bit_rate < vp9_state->max_bit_rate)
+ vp9_state->min_bit_rate = 0;
+ else
+ vp9_state->min_bit_rate = 2 * vp9_state->target_bit_rate - vp9_state->max_bit_rate;
}
- /* update the bit_per_second */
- if (vp9_state->brc_flag_check & VP9_BRC_RC) {
- VAEncMiscParameterRateControl *misc_param_rc;
-
- misc_param = (VAEncMiscParameterBuffer *)
- encode_state->misc_param[VAEncMiscParameterTypeRateControl]->buffer;
- misc_param_rc = (VAEncMiscParameterRateControl *)misc_param->data;
-
- if (encoder_context->rate_control_mode == VA_RC_CBR) {
- if (vp9_state->target_bit_rate != misc_param_rc->bits_per_second ||
- vp9_state->window_size != misc_param_rc->window_size) {
- vp9_state->target_bit_rate = misc_param_rc->bits_per_second;
- vp9_state->vbv_buffer_size_in_bit = (misc_param_rc->bits_per_second / 1000) *
- misc_param_rc->window_size;
- vp9_state->init_vbv_buffer_fullness_in_bit = vp9_state->vbv_buffer_size_in_bit * 2;
- vp9_state->window_size = misc_param_rc->window_size;
- vp9_state->max_bit_rate = vp9_state->target_bit_rate;
- vp9_state->min_bit_rate = vp9_state->target_bit_rate;
- vp9_state->brc_reset = 1;
- }
- } else {
- /* VBR mode */
- if (vp9_state->max_bit_rate != misc_param_rc->bits_per_second ||
- vp9_state->target_percentage != misc_param_rc->target_percentage) {
-
- vp9_state->target_bit_rate = (misc_param_rc->bits_per_second / 100) *
- misc_param_rc->target_percentage;
- vp9_state->min_bit_rate = (misc_param_rc->bits_per_second / 100) *
- (2 * misc_param_rc->target_percentage - 100);
- vp9_state->max_bit_rate = misc_param_rc->bits_per_second;
- vp9_state->vbv_buffer_size_in_bit = (misc_param_rc->bits_per_second / 1000) *
- misc_param_rc->window_size;
- vp9_state->init_vbv_buffer_fullness_in_bit = vp9_state->vbv_buffer_size_in_bit / 2;
- vp9_state->target_percentage = misc_param_rc->target_percentage;
- vp9_state->window_size = misc_param_rc->window_size;
- vp9_state->brc_reset = 1;
- }
- }
- }
+ if (encoder_context->brc.hrd_buffer_size)
+ vp9_state->vbv_buffer_size_in_bit = encoder_context->brc.hrd_buffer_size;
+ else if (encoder_context->brc.window_size)
+ vp9_state->vbv_buffer_size_in_bit = (uint64_t)vp9_state->max_bit_rate * encoder_context->brc.window_size / 1000;
+ else
+ vp9_state->vbv_buffer_size_in_bit = vp9_state->max_bit_rate;
+ if (encoder_context->brc.hrd_initial_buffer_fullness)
+ vp9_state->init_vbv_buffer_fullness_in_bit = encoder_context->brc.hrd_initial_buffer_fullness;
+ else
+ vp9_state->init_vbv_buffer_fullness_in_bit = vp9_state->vbv_buffer_size_in_bit / 2;
}
}
vp9_state->downscaled_height_16x_in_mb = vp9_state->frame_height_16x / 16;
vp9_state->dys_in_use = 0;
- if(pic_param->frame_width_src != pic_param->frame_width_dst ||
- pic_param->frame_height_src != pic_param->frame_height_dst)
+ if (pic_param->frame_width_src != pic_param->frame_width_dst ||
+ pic_param->frame_height_src != pic_param->frame_height_dst)
vp9_state->dys_in_use = 1;
vp9_state->dys_ref_frame_flag = 0;
/* check the dys setting. The dys is supported by default. */
vp9_state->dys_ref_frame_flag = vp9_state->ref_frame_flag;
if ((vp9_state->ref_frame_flag & VP9_LAST_REF) &&
- vp9_state->last_ref_obj) {
+ vp9_state->last_ref_obj) {
obj_surface = vp9_state->last_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
vp9_state->dys_ref_frame_flag &= ~(VP9_LAST_REF);
}
if ((vp9_state->ref_frame_flag & VP9_GOLDEN_REF) &&
- vp9_state->golden_ref_obj) {
+ vp9_state->golden_ref_obj) {
obj_surface = vp9_state->golden_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
vp9_state->dys_ref_frame_flag &= ~(VP9_GOLDEN_REF);
}
if ((vp9_state->ref_frame_flag & VP9_ALT_REF) &&
- vp9_state->alt_ref_obj) {
+ vp9_state->alt_ref_obj) {
obj_surface = vp9_state->alt_ref_obj;
vp9_priv_surface = (struct gen9_surface_vp9 *)(obj_surface->private_data);
static VAStatus
gen9_vme_gpe_kernel_prepare_vp9(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
struct vp9_surface_param surface_param;
driver_header_flag = 0;
vp9_state->frame_header.bit_offset_first_partition_size =
- pic_param->bit_offset_first_partition_size;
+ pic_param->bit_offset_first_partition_size;
vp9_state->header_length = ALIGN(length_in_bits, 8) >> 3;
vp9_state->alias_insert_data = header_data;
}
va_status = i965_check_alloc_surface_bo(ctx, encode_state->input_yuv_object,
- 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
+ 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
if (va_status != VA_STATUS_SUCCESS)
return va_status;
va_status = i965_check_alloc_surface_bo(ctx, encode_state->reconstructed_object,
- 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
+ 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
if (va_status != VA_STATUS_SUCCESS)
return va_status;
surface_param.frame_width = pic_param->frame_width_dst;
surface_param.frame_height = pic_param->frame_height_dst;
va_status = gen9_vp9_check_dys_surfaces(ctx,
- encode_state->reconstructed_object,
- &surface_param);
+ encode_state->reconstructed_object,
+ &surface_param);
if (va_status)
return va_status;
if (vp9_state->dys_ref_frame_flag) {
if ((vp9_state->dys_ref_frame_flag & VP9_LAST_REF) &&
- vp9_state->last_ref_obj) {
+ vp9_state->last_ref_obj) {
obj_surface = vp9_state->last_ref_obj;
surface_param.frame_width = vp9_state->frame_width;
surface_param.frame_height = vp9_state->frame_height;
va_status = gen9_vp9_check_dys_surfaces(ctx,
- obj_surface,
- &surface_param);
+ obj_surface,
+ &surface_param);
if (va_status)
return va_status;
}
if ((vp9_state->dys_ref_frame_flag & VP9_GOLDEN_REF) &&
- vp9_state->golden_ref_obj) {
+ vp9_state->golden_ref_obj) {
obj_surface = vp9_state->golden_ref_obj;
surface_param.frame_width = vp9_state->frame_width;
surface_param.frame_height = vp9_state->frame_height;
va_status = gen9_vp9_check_dys_surfaces(ctx,
- obj_surface,
- &surface_param);
+ obj_surface,
+ &surface_param);
if (va_status)
return va_status;
}
if ((vp9_state->dys_ref_frame_flag & VP9_ALT_REF) &&
- vp9_state->alt_ref_obj) {
+ vp9_state->alt_ref_obj) {
obj_surface = vp9_state->alt_ref_obj;
surface_param.frame_width = vp9_state->frame_width;
surface_param.frame_height = vp9_state->frame_height;
va_status = gen9_vp9_check_dys_surfaces(ctx,
- obj_surface,
- &surface_param);
+ obj_surface,
+ &surface_param);
if (va_status)
return va_status;
static VAStatus
gen9_vme_gpe_kernel_init_vp9(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
* curbe_buffer.
*/
ds_param.bo_size = ALIGN(sizeof(vp9_mbenc_curbe_data), 64) + 128 +
- ALIGN(sizeof(struct gen8_interface_descriptor_data), 64) * NUM_VP9_MBENC;
+ ALIGN(sizeof(struct gen8_interface_descriptor_data), 64) * NUM_VP9_MBENC;
mbenc_context->mbenc_bo_dys = dri_bo_alloc(i965->intel.bufmgr,
"mbenc_dys",
ds_param.bo_size,
ds_param.sampler_offset = ALIGN(sizeof(vp9_mbenc_curbe_data), 64);
for (i = 0; i < NUM_VP9_MBENC; i++) {
ds_param.idrt_offset = ds_param.sampler_offset + 128 +
- ALIGN(sizeof(struct gen8_interface_descriptor_data), 64) * i;
+ ALIGN(sizeof(struct gen8_interface_descriptor_data), 64) * i;
gen8_gpe_context_set_dynamic_buffer(ctx,
&mbenc_context->gpe_contexts[i],
static VAStatus
gen9_vme_gpe_kernel_run_vp9(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct gen9_encoder_context_vp9 *vme_context = encoder_context->vme_context;
struct gen9_vp9_state *vp9_state;
static VAStatus
gen9_vme_pipeline_vp9(VADriverContextP ctx,
- VAProfile profile,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ VAProfile profile,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
VAStatus va_status;
struct gen9_vp9_state *vp9_state;
return va_status;
va_status = gen9_vp9_allocate_resources(ctx, encode_state,
- encoder_context,
- !vp9_state->brc_allocated);
+ encoder_context,
+ !vp9_state->brc_allocated);
if (va_status != VA_STATUS_SUCCESS)
return va_status;
static void
gen9_vme_scaling_context_init_vp9(VADriverContextP ctx,
- struct gen9_encoder_context_vp9 *vme_context,
- struct vp9_scaling_context *scaling_context)
+ struct gen9_encoder_context_vp9 *vme_context,
+ struct vp9_scaling_context *scaling_context)
{
struct i965_gpe_context *gpe_context = NULL;
struct vp9_encoder_kernel_parameter kernel_param;
scoreboard_param.walkpat_flag = 0;
gpe_context = &scaling_context->gpe_contexts[0];
- gen9_init_gpe_context_vp9(gpe_context, &kernel_param);
+ gen9_init_gpe_context_vp9(ctx, gpe_context, &kernel_param);
gen9_init_vfe_scoreboard_vp9(gpe_context, &scoreboard_param);
scaling_context->scaling_4x_bti.scaling_frame_src_y = VP9_BTI_SCALING_FRAME_SRC_Y;
scaling_context->scaling_4x_bti.scaling_frame_dst_y = VP9_BTI_SCALING_FRAME_DST_Y;
scaling_context->scaling_4x_bti.scaling_frame_mbv_proc_stat_dst =
- VP9_BTI_SCALING_FRAME_MBVPROCSTATS_DST_CM;
+ VP9_BTI_SCALING_FRAME_MBVPROCSTATS_DST_CM;
memset(&scale_kernel, 0, sizeof(scale_kernel));
kernel_param.sampler_size = 0;
gpe_context = &scaling_context->gpe_contexts[1];
- gen9_init_gpe_context_vp9(gpe_context, &kernel_param);
+ gen9_init_gpe_context_vp9(ctx, gpe_context, &kernel_param);
gen9_init_vfe_scoreboard_vp9(gpe_context, &scoreboard_param);
memset(&scale_kernel, 0, sizeof(scale_kernel));
scoreboard_param.walkpat_flag = 0;
gpe_context = &me_context->gpe_context;
- gen9_init_gpe_context_vp9(gpe_context, &kernel_param);
+ gen9_init_gpe_context_vp9(ctx, gpe_context, &kernel_param);
gen9_init_vfe_scoreboard_vp9(gpe_context, &scoreboard_param);
memset(&scale_kernel, 0, sizeof(scale_kernel));
static void
gen9_vme_mbenc_context_init_vp9(VADriverContextP ctx,
- struct gen9_encoder_context_vp9 *vme_context,
- struct vp9_mbenc_context *mbenc_context)
+ struct gen9_encoder_context_vp9 *vme_context,
+ struct vp9_mbenc_context *mbenc_context)
{
struct i965_gpe_context *gpe_context = NULL;
struct vp9_encoder_kernel_parameter kernel_param;
} else
scoreboard_param.walkpat_flag = 0;
- gen9_init_gpe_context_vp9(gpe_context, &kernel_param);
+ gen9_init_gpe_context_vp9(ctx, gpe_context, &kernel_param);
gen9_init_vfe_scoreboard_vp9(gpe_context, &scoreboard_param);
memset(&scale_kernel, 0, sizeof(scale_kernel));
intel_vp9_get_kernel_header_and_size((void *)media_vp9_kernels,
- sizeof(media_vp9_kernels),
- INTEL_VP9_ENC_MBENC,
- i,
- &scale_kernel);
+ sizeof(media_vp9_kernels),
+ INTEL_VP9_ENC_MBENC,
+ i,
+ &scale_kernel);
gen8_gpe_load_kernels(ctx,
gpe_context,
for (i = 0; i < NUM_VP9_BRC; i++) {
gpe_context = &brc_context->gpe_contexts[i];
- gen9_init_gpe_context_vp9(gpe_context, &kernel_param);
+ gen9_init_gpe_context_vp9(ctx, gpe_context, &kernel_param);
gen9_init_vfe_scoreboard_vp9(gpe_context, &scoreboard_param);
memset(&scale_kernel, 0, sizeof(scale_kernel));
intel_vp9_get_kernel_header_and_size((void *)media_vp9_kernels,
- sizeof(media_vp9_kernels),
- INTEL_VP9_ENC_BRC,
- i,
- &scale_kernel);
+ sizeof(media_vp9_kernels),
+ INTEL_VP9_ENC_BRC,
+ i,
+ &scale_kernel);
gen8_gpe_load_kernels(ctx,
gpe_context,
static void
gen9_vme_dys_context_init_vp9(VADriverContextP ctx,
- struct gen9_encoder_context_vp9 *vme_context,
- struct vp9_dys_context *dys_context)
+ struct gen9_encoder_context_vp9 *vme_context,
+ struct vp9_dys_context *dys_context)
{
struct i965_gpe_context *gpe_context = NULL;
struct vp9_encoder_kernel_parameter kernel_param;
scoreboard_param.walkpat_flag = 0;
gpe_context = &dys_context->gpe_context;
- gen9_init_gpe_context_vp9(gpe_context, &kernel_param);
+ gen9_init_gpe_context_vp9(ctx, gpe_context, &kernel_param);
gen9_init_vfe_scoreboard_vp9(gpe_context, &scoreboard_param);
memset(&scale_kernel, 0, sizeof(scale_kernel));
static Bool
gen9_vme_kernels_context_init_vp9(VADriverContextP ctx,
- struct intel_encoder_context *encoder_context,
- struct gen9_encoder_context_vp9 *vme_context)
+ struct intel_encoder_context *encoder_context,
+ struct gen9_encoder_context_vp9 *vme_context)
{
gen9_vme_scaling_context_init_vp9(ctx, vme_context, &vme_context->scaling_context);
gen9_vme_me_context_init_vp9(ctx, vme_context, &vme_context->me_context);
if (index % 2) {
vp9_element->b_valid = 1;
vp9_element->b_probdiff_select = 1;
- vp9_element->b_prob_select = (prob == 252) ? 1: 0;
+ vp9_element->b_prob_select = (prob == 252) ? 1 : 0;
vp9_element->b_bin = value;
} else {
vp9_element->a_valid = 1;
vp9_element->a_probdiff_select = 1;
- vp9_element->a_prob_select = (prob == 252) ? 1: 0;
+ vp9_element->a_prob_select = (prob == 252) ? 1 : 0;
vp9_element->a_bin = value;
}
}
pic_param = vp9_state->pic_param;
if ((pic_param->pic_flags.bits.frame_type == HCP_VP9_KEY_FRAME) ||
(pic_param->pic_flags.bits.intra_only) ||
- pic_param->pic_flags.bits.error_resilient_mode) {
+ pic_param->pic_flags.bits.error_resilient_mode) {
/* reset current frame_context */
intel_init_default_vp9_probs(&vp9_state->vp9_current_fc);
if ((pic_param->pic_flags.bits.frame_type == HCP_VP9_KEY_FRAME) ||
(pic_param->chroma_dc_qindex_delta == 0)) {
/* lossless flag */
/* nothing is needed */
- gen9_vp9_write_compressed_element(buffer,
+ gen9_vp9_write_compressed_element(buffer,
0, 128, 0);
- gen9_vp9_write_compressed_element(buffer,
+ gen9_vp9_write_compressed_element(buffer,
1, 128, 0);
- gen9_vp9_write_compressed_element(buffer,
+ gen9_vp9_write_compressed_element(buffer,
2, 128, 0);
} else {
if (vp9_state->tx_mode == TX_MODE_SELECT) {
gen9_vp9_write_compressed_element(buffer,
- 0, 128, 1);
+ 0, 128, 1);
gen9_vp9_write_compressed_element(buffer,
- 1, 128, 1);
+ 1, 128, 1);
gen9_vp9_write_compressed_element(buffer,
- 2, 128, 1);
+ 2, 128, 1);
} else if (vp9_state->tx_mode == ALLOW_32X32) {
gen9_vp9_write_compressed_element(buffer,
- 0, 128, 1);
+ 0, 128, 1);
gen9_vp9_write_compressed_element(buffer,
- 1, 128, 1);
+ 1, 128, 1);
gen9_vp9_write_compressed_element(buffer,
- 2, 128, 0);
+ 2, 128, 0);
} else {
unsigned int tx_mode;
tx_mode = vp9_state->tx_mode;
gen9_vp9_write_compressed_element(buffer,
- 0, 128, ((tx_mode) & 2));
+ 0, 128, ((tx_mode) & 2));
gen9_vp9_write_compressed_element(buffer,
- 1, 128, ((tx_mode) & 1));
+ 1, 128, ((tx_mode) & 1));
gen9_vp9_write_compressed_element(buffer,
- 2, 128, 0);
+ 2, 128, 0);
}
if (vp9_state->tx_mode == TX_MODE_SELECT) {
gen9_vp9_write_compressed_element(buffer,
- 3, 128, 0);
+ 3, 128, 0);
gen9_vp9_write_compressed_element(buffer,
- 7, 128, 0);
+ 7, 128, 0);
gen9_vp9_write_compressed_element(buffer,
- 15, 128, 0);
+ 15, 128, 0);
}
}
- /*Setup all the input&output object*/
+ /*Setup all the input&output object*/
{
/* update the coeff_update flag */
gen9_vp9_write_compressed_element(buffer,
- 27, 128, 0);
+ 27, 128, 0);
gen9_vp9_write_compressed_element(buffer,
- 820, 128, 0);
+ 820, 128, 0);
gen9_vp9_write_compressed_element(buffer,
- 1613, 128, 0);
+ 1613, 128, 0);
gen9_vp9_write_compressed_element(buffer,
- 2406, 128, 0);
+ 2406, 128, 0);
}
- if (pic_param->pic_flags.bits.frame_type && !pic_param->pic_flags.bits.intra_only)
- {
+ if (pic_param->pic_flags.bits.frame_type && !pic_param->pic_flags.bits.intra_only) {
bool allow_comp = !(
- (pic_param->ref_flags.bits.ref_last_sign_bias && pic_param->ref_flags.bits.ref_gf_sign_bias && pic_param->ref_flags.bits.ref_arf_sign_bias) ||
- (!pic_param->ref_flags.bits.ref_last_sign_bias && !pic_param->ref_flags.bits.ref_gf_sign_bias && !pic_param->ref_flags.bits.ref_arf_sign_bias)
- );
+ (pic_param->ref_flags.bits.ref_last_sign_bias && pic_param->ref_flags.bits.ref_gf_sign_bias && pic_param->ref_flags.bits.ref_arf_sign_bias) ||
+ (!pic_param->ref_flags.bits.ref_last_sign_bias && !pic_param->ref_flags.bits.ref_gf_sign_bias && !pic_param->ref_flags.bits.ref_arf_sign_bias)
+ );
- if (allow_comp)
- {
+ if (allow_comp) {
if (pic_param->pic_flags.bits.comp_prediction_mode == REFERENCE_MODE_SELECT) {
gen9_vp9_write_compressed_element(buffer,
- 3271, 128, 1);
+ 3271, 128, 1);
gen9_vp9_write_compressed_element(buffer,
- 3272, 128, 1);
- }
- else if (pic_param->pic_flags.bits.comp_prediction_mode == COMPOUND_REFERENCE) {
+ 3272, 128, 1);
+ } else if (pic_param->pic_flags.bits.comp_prediction_mode == COMPOUND_REFERENCE) {
gen9_vp9_write_compressed_element(buffer,
- 3271, 128, 1);
+ 3271, 128, 1);
gen9_vp9_write_compressed_element(buffer,
- 3272, 128, 0);
- }
- else {
+ 3272, 128, 0);
+ } else {
gen9_vp9_write_compressed_element(buffer,
- 3271, 128, 0);
+ 3271, 128, 0);
gen9_vp9_write_compressed_element(buffer,
- 3272, 128, 0);
+ 3272, 128, 0);
}
}
}
static void
gen9_pak_vp9_pipe_mode_select(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context,
- struct gen9_hcpe_pipe_mode_select_param *pipe_mode_param)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ struct gen9_hcpe_pipe_mode_select_param *pipe_mode_param)
{
struct intel_batchbuffer *batch = encoder_context->base.batch;
static void
gen9_vp9_add_surface_state(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context,
- hcp_surface_state *hcp_state)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ hcp_surface_state *hcp_state)
{
struct intel_batchbuffer *batch = encoder_context->base.batch;
if (!hcp_state)
struct encode_state *encode_state,
struct intel_encoder_context *encoder_context)
{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = encoder_context->base.batch;
struct gen9_encoder_context_vp9 *pak_context = encoder_context->mfc_context;
struct gen9_vp9_state *vp9_state;
vp9_state = (struct gen9_vp9_state *)(encoder_context->enc_priv_state);
if (!vp9_state || !vp9_state->pic_param)
- return;
+ return;
BEGIN_BCS_BATCH(batch, 104);
obj_surface->bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 4..6 deblocking line */
OUT_RELOC64(batch,
pak_context->res_deblocking_filter_line_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 7..9 deblocking tile line */
OUT_RELOC64(batch,
pak_context->res_deblocking_filter_tile_line_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 10..12 deblocking tile col */
OUT_RELOC64(batch,
pak_context->res_deblocking_filter_tile_col_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 13..15 metadata line */
OUT_RELOC64(batch,
pak_context->res_metadata_line_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 16..18 metadata tile line */
OUT_RELOC64(batch,
pak_context->res_metadata_tile_line_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 19..21 metadata tile col */
OUT_RELOC64(batch,
pak_context->res_metadata_tile_col_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 22..30 SAO is not used for VP9 */
OUT_BCS_BATCH(batch, 0);
pak_context->res_mv_temporal_buffer[vp9_state->curr_mv_temporal_index].bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 34..36 Not used */
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
}
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 54..56 for source input */
OUT_RELOC64(batch,
pak_context->uncompressed_picture_source.bo,
I915_GEM_DOMAIN_INSTRUCTION, 0,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 57..59 StreamOut is not used */
OUT_BCS_BATCH(batch, 0);
if (vp9_state->picture_coding_type) {
int prev_index = vp9_state->curr_mv_temporal_index ^ 0x01;
OUT_RELOC64(batch,
- pak_context->res_mv_temporal_buffer[prev_index].bo,
- I915_GEM_DOMAIN_INSTRUCTION, 0,
- 0);
+ pak_context->res_mv_temporal_buffer[prev_index].bo,
+ I915_GEM_DOMAIN_INSTRUCTION, 0,
+ 0);
} else {
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
}
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 83..85 VP9 prob buffer */
OUT_RELOC64(batch,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 86..88 Segment id buffer */
if (pak_context->res_segmentid_buffer.bo) {
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
}
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 89..91 HVD line rowstore buffer */
OUT_RELOC64(batch,
pak_context->res_hvd_line_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 92..94 HVD tile line rowstore buffer */
OUT_RELOC64(batch,
pak_context->res_hvd_tile_line_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 95..97 SAO streamout. Not used for VP9 */
OUT_BCS_BATCH(batch, 0);
struct encode_state *encode_state,
struct intel_encoder_context *encoder_context)
{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = encoder_context->base.batch;
struct gen9_encoder_context_vp9 *pak_context = encoder_context->mfc_context;
struct gen9_vp9_state *vp9_state;
I915_GEM_DOMAIN_INSTRUCTION, 0, /* No write domain */
vp9_state->mb_data_offset);
/* default attribute */
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 9..11, PAK-BSE */
OUT_RELOC64(batch,
- pak_context->indirect_pak_bse_object.bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
- pak_context->indirect_pak_bse_object.offset);
- OUT_BCS_BATCH(batch, 0);
+ pak_context->indirect_pak_bse_object.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ pak_context->indirect_pak_bse_object.offset);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 12..13 upper bound */
OUT_RELOC64(batch,
- pak_context->indirect_pak_bse_object.bo,
- I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
- pak_context->indirect_pak_bse_object.end_offset);
+ pak_context->indirect_pak_bse_object.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ pak_context->indirect_pak_bse_object.end_offset);
/* DW 14..16 compressed header buffer */
OUT_RELOC64(batch,
pak_context->res_compressed_input_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, 0,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 17..19 prob counter streamout */
OUT_RELOC64(batch,
pak_context->res_prob_counter_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 20..22 prob delta streamin */
OUT_RELOC64(batch,
pak_context->res_prob_delta_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 23..25 Tile record streamout */
OUT_RELOC64(batch,
pak_context->res_tile_record_streamout_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
/* DW 26..28 CU record streamout */
OUT_RELOC64(batch,
pak_context->res_cu_stat_streamout_buffer.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
0);
- OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, i965->intel.mocs_state);
ADVANCE_BCS_BATCH(batch);
}
OUT_BCS_BATCH(batch,
(seg_param->seg_flags.bits.segment_reference_enabled << 3) |
(batch_value << 1) |
- (seg_param->seg_flags.bits.segment_reference_skipped <<0)
+ (seg_param->seg_flags.bits.segment_reference_skipped << 0)
);
/* DW 3..6 is not used for encoder */
static void
intel_vp9enc_construct_pak_insertobj_batchbuffer(VADriverContextP ctx,
- struct intel_encoder_context *encoder_context,
- struct i965_gpe_resource *obj_batch_buffer)
+ struct intel_encoder_context *encoder_context,
+ struct i965_gpe_resource *obj_batch_buffer)
{
struct gen9_encoder_context_vp9 *pak_context = encoder_context->mfc_context;
struct gen9_vp9_state *vp9_state;
pic_param = vp9_state->pic_param;
seg_param = vp9_state->segment_param;
- if (vp9_state->curr_pak_pass == 0)
- {
+ if (vp9_state->curr_pak_pass == 0) {
intel_vp9enc_construct_pak_insertobj_batchbuffer(ctx, encoder_context,
- &pak_context->res_pak_uncompressed_input_buffer);
+ &pak_context->res_pak_uncompressed_input_buffer);
// Check if driver already programmed pic state as part of BRC update kernel programming.
- if (!vp9_state->brc_enabled)
- {
+ if (!vp9_state->brc_enabled) {
intel_vp9enc_construct_picstate_batchbuf(ctx, encode_state,
- encoder_context, &pak_context->res_pic_state_brc_write_hfw_read_buffer);
+ encoder_context, &pak_context->res_pic_state_brc_write_hfw_read_buffer);
}
}
- if (vp9_state->curr_pak_pass == 0)
- {
+ if (vp9_state->curr_pak_pass == 0) {
intel_vp9enc_refresh_frame_internal_buffers(ctx, encoder_context);
}
/* copy the current fc to vp9_prob buffer */
memcpy(prob_ptr, &vp9_state->vp9_current_fc, sizeof(FRAME_CONTEXT));
if ((pic_param->pic_flags.bits.frame_type == HCP_VP9_KEY_FRAME) ||
- pic_param->pic_flags.bits.intra_only) {
- FRAME_CONTEXT *frame_ptr = (FRAME_CONTEXT *)prob_ptr;
+ pic_param->pic_flags.bits.intra_only) {
+ FRAME_CONTEXT *frame_ptr = (FRAME_CONTEXT *)prob_ptr;
- memcpy(frame_ptr->partition_prob, vp9_kf_partition_probs,
- sizeof(vp9_kf_partition_probs));
- memcpy(frame_ptr->uv_mode_prob, vp9_kf_uv_mode_prob,
- sizeof(vp9_kf_uv_mode_prob));
+ memcpy(frame_ptr->partition_prob, vp9_kf_partition_probs,
+ sizeof(vp9_kf_partition_probs));
+ memcpy(frame_ptr->uv_mode_prob, vp9_kf_uv_mode_prob,
+ sizeof(vp9_kf_uv_mode_prob));
}
i965_unmap_gpe_resource(&pak_context->res_prob_buffer);
}
hcp_surface.dw2.surface_format = SURFACE_FORMAT_PLANAR_420_8;
hcp_surface.dw2.y_cb_offset = obj_surface->y_cb_offset;
gen9_vp9_add_surface_state(ctx, encode_state, encoder_context,
- &hcp_surface);
+ &hcp_surface);
}
if (vp9_state->golden_ref_obj) {
obj_surface = vp9_state->golden_ref_obj;
hcp_surface.dw2.surface_format = SURFACE_FORMAT_PLANAR_420_8;
hcp_surface.dw2.y_cb_offset = obj_surface->y_cb_offset;
gen9_vp9_add_surface_state(ctx, encode_state, encoder_context,
- &hcp_surface);
+ &hcp_surface);
}
if (vp9_state->alt_ref_obj) {
obj_surface = vp9_state->alt_ref_obj;
hcp_surface.dw2.surface_format = SURFACE_FORMAT_PLANAR_420_8;
hcp_surface.dw2.y_cb_offset = obj_surface->y_cb_offset;
gen9_vp9_add_surface_state(ctx, encode_state, encoder_context,
- &hcp_surface);
+ &hcp_surface);
}
}
second_level_batch.is_second_level = 1;
second_level_batch.bo = pak_context->res_pic_state_brc_write_hfw_read_buffer.bo;
- gen9_gpe_mi_batch_buffer_start(ctx, batch, &second_level_batch);
+ gen8_gpe_mi_batch_buffer_start(ctx, batch, &second_level_batch);
if (pic_param->pic_flags.bits.segmentation_enabled &&
seg_param)
memset(&tmp_seg_param, 0, sizeof(tmp_seg_param));
seg_param = &tmp_seg_param;
}
- for (i = 0; i < segment_count; i++)
- {
+ for (i = 0; i < segment_count; i++) {
gen9_pak_vp9_segment_state(ctx, encode_state,
encoder_context,
&seg_param->seg_data[i], i);
second_level_batch.offset = 0;
second_level_batch.bo = pak_context->res_pak_uncompressed_input_buffer.bo;
- gen9_gpe_mi_batch_buffer_start(ctx, batch, &second_level_batch);
+ gen8_gpe_mi_batch_buffer_start(ctx, batch, &second_level_batch);
/* PAK_OBJECT */
second_level_batch.is_second_level = 1;
second_level_batch.offset = 0;
second_level_batch.bo = pak_context->res_mb_code_surface.bo;
- gen9_gpe_mi_batch_buffer_start(ctx, batch, &second_level_batch);
+ gen8_gpe_mi_batch_buffer_start(ctx, batch, &second_level_batch);
return;
}
status_buffer = &(vp9_state->status_buffer);
memset(&mi_flush_dw_param, 0, sizeof(mi_flush_dw_param));
- gen9_gpe_mi_flush_dw(ctx, batch, &mi_flush_dw_param);
+ gen8_gpe_mi_flush_dw(ctx, batch, &mi_flush_dw_param);
memset(&mi_store_reg_mem_param, 0, sizeof(mi_store_reg_mem_param));
mi_store_reg_mem_param.bo = status_buffer->bo;
mi_store_reg_mem_param.offset = status_buffer->bs_byte_count_offset;
mi_store_reg_mem_param.mmio_offset = status_buffer->vp9_bs_frame_reg_offset;
- gen9_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
+ gen8_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
mi_store_reg_mem_param.bo = pak_context->res_brc_bitstream_size_buffer.bo;
mi_store_reg_mem_param.offset = 0;
mi_store_reg_mem_param.mmio_offset = status_buffer->vp9_bs_frame_reg_offset;
- gen9_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
+ gen8_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
/* Read HCP Image status */
mi_store_reg_mem_param.bo = status_buffer->bo;
mi_store_reg_mem_param.offset = status_buffer->image_status_mask_offset;
mi_store_reg_mem_param.mmio_offset =
- status_buffer->vp9_image_mask_reg_offset;
- gen9_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
+ status_buffer->vp9_image_mask_reg_offset;
+ gen8_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
mi_store_reg_mem_param.bo = status_buffer->bo;
mi_store_reg_mem_param.offset = status_buffer->image_status_ctrl_offset;
mi_store_reg_mem_param.mmio_offset =
- status_buffer->vp9_image_ctrl_reg_offset;
- gen9_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
+ status_buffer->vp9_image_ctrl_reg_offset;
+ gen8_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
mi_store_reg_mem_param.bo = pak_context->res_brc_bitstream_size_buffer.bo;
mi_store_reg_mem_param.offset = 4;
mi_store_reg_mem_param.mmio_offset =
- status_buffer->vp9_image_ctrl_reg_offset;
- gen9_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
+ status_buffer->vp9_image_ctrl_reg_offset;
+ gen8_gpe_mi_store_register_mem(ctx, batch, &mi_store_reg_mem_param);
- gen9_gpe_mi_flush_dw(ctx, batch, &mi_flush_dw_param);
+ gen8_gpe_mi_flush_dw(ctx, batch, &mi_flush_dw_param);
return;
}
obj_surface = encode_state->reconstructed_object;
i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
+ dri_bo_unreference(pak_context->reconstructed_object.bo);
+
pak_context->reconstructed_object.bo = obj_surface->bo;
dri_bo_reference(pak_context->reconstructed_object.bo);
coded_buffer_segment = (struct i965_coded_buffer_segment *)bo->virtual;
coded_buffer_segment->mapped = 0;
coded_buffer_segment->codec = encoder_context->codec;
+ coded_buffer_segment->status_support = 1;
dri_bo_unmap(bo);
return VA_STATUS_SUCCESS;
static void
gen9_vp9_pak_brc_prepare(struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ struct intel_encoder_context *encoder_context)
{
- struct gen9_encoder_context_vp9 *pak_context = encoder_context->mfc_context;
- struct gen9_vp9_state *vp9_state;
-
- vp9_state = (struct gen9_vp9_state *)(encoder_context->enc_priv_state);
-
- if (!vp9_state || !pak_context)
- return;
-
- if (vp9_state->brc_enabled) {
- /* check the buffer related with BRC */
- vp9_state->brc_flag_check = 0;
- if (encode_state->seq_param_ext && encode_state->seq_param_ext->buffer) {
- vp9_state->brc_flag_check |= VP9_BRC_SEQ;
- }
-
- /* Frame_rate */
- if (encode_state->misc_param[VAEncMiscParameterTypeFrameRate] &&
- encode_state->misc_param[VAEncMiscParameterTypeFrameRate]->buffer) {
- vp9_state->brc_flag_check |= VP9_BRC_FR;
- }
-
- /* HRD */
- if (encode_state->misc_param[VAEncMiscParameterTypeRateControl] &&
- encode_state->misc_param[VAEncMiscParameterTypeRateControl]->buffer) {
- vp9_state->brc_flag_check |= VP9_BRC_RC;
- }
-
- if (encode_state->misc_param[VAEncMiscParameterTypeHRD] &&
- encode_state->misc_param[VAEncMiscParameterTypeHRD]->buffer) {
- vp9_state->brc_flag_check |= VP9_BRC_HRD;
- }
-
- /*
- * If user-app doesn't pass the buffer related with BRC for the first
- * frame, the error flag is returned.
- */
- if (vp9_state->brc_flag_check == 0 && vp9_state->first_frame) {
- vp9_state->brc_flag_check |= VP9_BRC_FAILURE;
- }
- }
- return;
}
static void
dri_bo_unreference(pak_context->indirect_pak_bse_object.bo);
pak_context->indirect_pak_bse_object.bo = NULL;
- for (i = 0; i < 8; i++){
+ for (i = 0; i < 8; i++) {
dri_bo_unreference(pak_context->reference_surfaces[i].bo);
pak_context->reference_surfaces[i].bo = NULL;
}
static VAStatus
gen9_vp9_pak_pipeline(VADriverContextP ctx,
- VAProfile profile,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context)
+ VAProfile profile,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = encoder_context->base.batch;
memset(&mi_load_reg_imm, 0, sizeof(mi_load_reg_imm));
mi_load_reg_imm.mmio_offset = status_buffer->vp9_image_ctrl_reg_offset;
mi_load_reg_imm.data = 0;
- gen9_gpe_mi_load_register_imm(ctx, batch, &mi_load_reg_imm);
+ gen8_gpe_mi_load_register_imm(ctx, batch, &mi_load_reg_imm);
}
gen9_vp9_pak_picture_level(ctx, encode_state, encoder_context);
gen9_vp9_read_mfc_status(ctx, encoder_context);
/* the definition of status buffer offset for VP9 */
{
struct vp9_encode_status_buffer_internal *status_buffer;
+ uint32_t base_offset = offsetof(struct i965_coded_buffer_segment, codec_private_data);
status_buffer = &vp9_state->status_buffer;
memset(status_buffer, 0,
sizeof(struct vp9_encode_status_buffer_internal));
- status_buffer->bs_byte_count_offset = 2048;
- status_buffer->image_status_mask_offset = 2052;
- status_buffer->image_status_ctrl_offset = 2056;
- status_buffer->media_index_offset = 2060;
+ status_buffer->bs_byte_count_offset = base_offset + offsetof(struct vp9_encode_status, bs_byte_count);
+ status_buffer->image_status_mask_offset = base_offset + offsetof(struct vp9_encode_status, image_status_mask);
+ status_buffer->image_status_ctrl_offset = base_offset + offsetof(struct vp9_encode_status, image_status_ctrl);
+ status_buffer->media_index_offset = base_offset + offsetof(struct vp9_encode_status, media_index);
status_buffer->vp9_bs_frame_reg_offset = 0x1E9E0;
status_buffer->vp9_image_mask_reg_offset = 0x1E9F0;
return true;
}
+static VAStatus
+gen9_vp9_get_coded_status(VADriverContextP ctx,
+ struct intel_encoder_context *encoder_context,
+ struct i965_coded_buffer_segment *coded_buf_seg)
+{
+ struct vp9_encode_status *vp9_encode_status;
+
+ if (!encoder_context || !coded_buf_seg)
+ return VA_STATUS_ERROR_INVALID_BUFFER;
+
+ vp9_encode_status = (struct vp9_encode_status *)coded_buf_seg->codec_private_data;
+ coded_buf_seg->base.size = vp9_encode_status->bs_byte_count;
+
+ /* One VACodedBufferSegment for VP9 will be added later.
+ * It will be linked to the next element of coded_buf_seg->base.next
+ */
+
+ return VA_STATUS_SUCCESS;
+}
+
Bool
gen9_vp9_pak_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
{
encoder_context->mfc_context_destroy = gen9_vp9_pak_context_destroy;
encoder_context->mfc_pipeline = gen9_vp9_pak_pipeline;
encoder_context->mfc_brc_prepare = gen9_vp9_pak_brc_prepare;
-
+ encoder_context->get_status = gen9_vp9_get_coded_status;
return true;
}
-
-VAStatus
-gen9_vp9_get_coded_status(VADriverContextP ctx,
- char *buffer,
- struct hw_context *hw_context)
-{
- struct intel_encoder_context *encoder_context =
- (struct intel_encoder_context *)hw_context;
- struct gen9_vp9_state *vp9_state = NULL;
- struct vp9_encode_status_buffer_internal *status_buffer;
- struct i965_coded_buffer_segment *coded_buf_seg;
- unsigned int *buf_ptr;
-
- if (!encoder_context || !buffer)
- return VA_STATUS_ERROR_INVALID_BUFFER;
-
- vp9_state = (struct gen9_vp9_state *)(encoder_context->enc_priv_state);
- coded_buf_seg = (struct i965_coded_buffer_segment *) buffer;
-
- if (!vp9_state)
- return VA_STATUS_ERROR_INVALID_BUFFER;
-
- status_buffer = &vp9_state->status_buffer;
-
- buf_ptr = (unsigned int *)(buffer + status_buffer->bs_byte_count_offset);
- coded_buf_seg->base.buf = buffer + I965_CODEDBUFFER_HEADER_SIZE;
- coded_buf_seg->base.next = NULL;
-
- /* the stream size is writen into the bs_byte_count_offset address of buffer */
- coded_buf_seg->base.size = *buf_ptr;
-
- coded_buf_seg->mapped = 1;
-
- /* One VACodedBufferSegment for VP9 will be added later.
- * It will be linked to the next element of coded_buf_seg->base.next
- */
-
- return VA_STATUS_SUCCESS;
-}