#include <linux/dma_remapping.h>
#include <linux/reservation.h>
+#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drmP.h>
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (unlikely(IS_ERR(vma))) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
+ vma->exec_entry = NULL;
i915_vma_put(vma);
}
kfree(eb);
memset(&cache->node, 0, sizeof(cache->node));
ret = drm_mm_insert_node_in_range_generic
(&ggtt->base.mm, &cache->node,
- 4096, 0, 0,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma));
- if (entry->alignment &&
- vma->node.start & (entry->alignment - 1))
+ if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
return true;
if (vma->node.size < entry->pad_to_size)
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
+ if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
+ continue;
+
ret = i915_gem_request_await_object
(req, obj, obj->base.pending_write_domain);
if (ret)
if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL;
-
- /* From drm_mm perspective address space is continuous,
- * so from this point we're always using non-canonical
- * form internally.
- */
- exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
}
+ /* From drm_mm perspective address space is continuous,
+ * so from this point we're always using non-canonical
+ * form internally.
+ */
+ exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
+
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL;
struct intel_engine_cs *engine, const u32 ctx_id)
{
struct i915_gem_context *ctx;
- struct i915_ctx_hang_stats *hs;
ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
- hs = &ctx->hang_stats;
- if (hs->banned) {
+ if (i915_gem_context_is_banned(ctx)) {
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
return ERR_PTR(-EIO);
}
struct drm_i915_gem_object *obj = vma->obj;
const unsigned int idx = req->engine->id;
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/* Add a reference if we're newly entering the active list.
struct i915_execbuffer_params *params = ¶ms_master;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 dispatch_flags;
+ struct dma_fence *in_fence = NULL;
+ struct sync_file *out_fence = NULL;
+ int out_fence_fd = -1;
int ret;
bool need_relocs;
dispatch_flags |= I915_DISPATCH_RS;
}
+ if (args->flags & I915_EXEC_FENCE_IN) {
+ in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
+ if (!in_fence)
+ return -EINVAL;
+ }
+
+ if (args->flags & I915_EXEC_FENCE_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto err_in_fence;
+ }
+ }
+
/* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
* process. Upon first dispatch, we acquire another prolonged
}
params->args_batch_start_offset = args->batch_start_offset;
- if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
+ if (engine->needs_cmd_parser && args->batch_len) {
struct i915_vma *vma;
vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
goto err_batch_unpin;
}
+ if (in_fence) {
+ ret = i915_gem_request_await_dma_fence(params->request,
+ in_fence);
+ if (ret < 0)
+ goto err_request;
+ }
+
+ if (out_fence_fd != -1) {
+ out_fence = sync_file_create(¶ms->request->fence);
+ if (!out_fence) {
+ ret = -ENOMEM;
+ goto err_request;
+ }
+ }
+
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
* request is retired will the the batch_obj be moved onto the
ret = execbuf_submit(params, args, &eb->vmas);
err_request:
__i915_add_request(params->request, ret == 0);
+ if (out_fence) {
+ if (ret == 0) {
+ fd_install(out_fence_fd, out_fence->file);
+ args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
+ args->rsvd2 |= (u64)out_fence_fd << 32;
+ out_fence_fd = -1;
+ } else {
+ fput(out_fence->file);
+ }
+ }
err_batch_unpin:
/*
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
+ if (out_fence_fd != -1)
+ put_unused_fd(out_fence_fd);
+err_in_fence:
+ dma_fence_put(in_fence);
return ret;
}
return -EINVAL;
}
- if (args->rsvd2 != 0) {
- DRM_DEBUG("dirty rvsd2 field\n");
- return -EINVAL;
- }
-
exec2_list = drm_malloc_gfp(args->buffer_count,
sizeof(*exec2_list),
GFP_TEMPORARY);