OSDN Git Service

anv/batch_chain: Fall back to growing batches when chaining isn't available
authorJason Ekstrand <jason.ekstrand@intel.com>
Fri, 18 Mar 2016 23:32:46 +0000 (16:32 -0700)
committerJason Ekstrand <jason.ekstrand@intel.com>
Mon, 21 Mar 2016 22:29:30 +0000 (15:29 -0700)
src/intel/vulkan/anv_batch_chain.c
src/intel/vulkan/anv_device.c
src/intel/vulkan/anv_private.h

index eab050f..034f3fd 100644 (file)
@@ -340,6 +340,37 @@ anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
    VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
 }
 
+static VkResult
+anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
+                  struct anv_batch *batch, size_t aditional,
+                  size_t batch_padding)
+{
+   assert(batch->start == bbo->bo.map);
+   bbo->length = batch->next - batch->start;
+
+   size_t new_size = bbo->bo.size;
+   while (new_size <= bbo->length + aditional + batch_padding)
+      new_size *= 2;
+
+   if (new_size == bbo->bo.size)
+      return VK_SUCCESS;
+
+   struct anv_bo new_bo;
+   VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
+                                       &new_bo, new_size);
+   if (result != VK_SUCCESS)
+      return result;
+
+   memcpy(new_bo.map, bbo->bo.map, bbo->length);
+
+   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+
+   bbo->bo = new_bo;
+   anv_batch_bo_continue(bbo, batch, batch_padding);
+
+   return VK_SUCCESS;
+}
+
 static void
 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
                      struct anv_cmd_buffer *cmd_buffer)
@@ -478,6 +509,18 @@ anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
    return VK_SUCCESS;
 }
 
+static VkResult
+anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
+{
+   struct anv_cmd_buffer *cmd_buffer = _data;
+   struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
+
+   anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
+                     GEN8_MI_BATCH_BUFFER_START_length * 4);
+
+   return VK_SUCCESS;
+}
+
 struct anv_state
 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
                                    uint32_t entries, uint32_t *state_offset)
@@ -548,9 +591,14 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
    list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
 
    cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
-   cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
    cmd_buffer->batch.user_data = cmd_buffer;
 
+   if (cmd_buffer->device->can_chain_batches) {
+      cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
+   } else {
+      cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
+   }
+
    anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
                       GEN8_MI_BATCH_BUFFER_START_length * 4);
 
@@ -680,7 +728,9 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
        * determine this statically here so that this stays in sync with the
        * actual ExecuteCommands implementation.
        */
-      if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
+      if (!cmd_buffer->device->can_chain_batches) {
+         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
+      } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
           (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
          /* If the secondary has exactly one batch buffer in its list *and*
           * that batch buffer is less than half of the maximum size, we're
@@ -728,6 +778,15 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
       anv_batch_emit_batch(&primary->batch, &secondary->batch);
       anv_cmd_buffer_emit_state_base_address(primary);
       break;
+   case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
+      struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
+      unsigned length = secondary->batch.end - secondary->batch.start;
+      anv_batch_bo_grow(primary, bbo, &primary->batch, length,
+                        GEN8_MI_BATCH_BUFFER_START_length * 4);
+      anv_batch_emit_batch(&primary->batch, &secondary->batch);
+      anv_cmd_buffer_emit_state_base_address(primary);
+      break;
+   }
    case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
       struct anv_batch_bo *first_bbo =
          list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
index 068626d..ce2045e 100644 (file)
@@ -826,6 +826,13 @@ VkResult anv_CreateDevice(
    device->info = *physical_device->info;
    device->isl_dev = physical_device->isl_dev;
 
+   /* On Broadwell and later, we can use batch chaining to more efficiently
+    * implement growing command buffers.  Prior to Haswell, the kernel
+    * command parser gets in the way and we have to fall back to growing
+    * the batch.
+    */
+   device->can_chain_batches = device->info.gen >= 8;
+
    pthread_mutex_init(&device->mutex, NULL);
 
    anv_bo_pool_init(&device->batch_bo_pool, device);
index 6d98e02..03e8767 100644 (file)
@@ -672,6 +672,7 @@ struct anv_device {
     struct isl_device                           isl_dev;
     int                                         context_id;
     int                                         fd;
+    bool                                        can_chain_batches;
 
     struct anv_bo_pool                          batch_bo_pool;
 
@@ -1192,6 +1193,7 @@ struct anv_cmd_pool {
 enum anv_cmd_buffer_exec_mode {
    ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
    ANV_CMD_BUFFER_EXEC_MODE_EMIT,
+   ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
    ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
    ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
 };