vma->vm = vm;
vma->obj = obj;
vma->size = obj->base.size;
+ vma->display_alignment = 4096;
if (view) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ GEM_BUG_ON(range_overflows_t(u64,
+ view->params.partial.offset,
+ view->params.partial.size,
+ obj->base.size >> PAGE_SHIFT));
vma->size = view->params.partial.size;
vma->size <<= PAGE_SHIFT;
+ GEM_BUG_ON(vma->size >= obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size =
intel_rotation_info_size(&view->params.rotated);
}
if (i915_is_ggtt(vm)) {
+ GEM_BUG_ON(overflows_type(vma->size, u32));
+ vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
+ GEM_BUG_ON(vma->fence_size & 4095);
+
+ vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
+ GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
+
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
if (bind_flags == 0)
return 0;
+ if (GEM_WARN_ON(range_overflows(vma->node.start,
+ vma->node.size,
+ vma->vm->total)))
+ return -ENODEV;
+
if (vma_flags == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
void __iomem *ptr;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+ assert_rpm_wakelock_held(vma->vm->i915);
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
return IO_ERR_PTR(-ENODEV);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->fence_size);
/*
* Explicitly disable for rotated VMA since the display does not
* need the fence and the VMA is not accessible to other users.
*/
- if (mappable && fenceable &&
- vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ return;
+
+ fenceable = (vma->node.size >= vma->fence_size &&
+ (vma->node.start & (vma->fence_alignment - 1)) == 0);
+
+ mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+
+ if (mappable && fenceable)
vma->flags |= I915_VMA_CAN_FENCE;
else
vma->flags &= ~I915_VMA_CAN_FENCE;
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
+static bool color_differs(struct drm_mm_node *node, unsigned long color)
+{
+ return node->allocated && node->color != color;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
{
- struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
/*
if (vma->vm->mm.color_adjust == NULL)
return true;
- if (!drm_mm_node_allocated(gtt_space))
- return true;
+ /* Only valid to be called on an already inserted vma */
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
+ GEM_BUG_ON(list_empty(&node->node_list));
- if (list_empty(>t_space->node_list))
- return true;
-
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
+ other = list_prev_entry(node, node_list);
+ if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
return false;
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ other = list_next_entry(node, node_list);
+ if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
return false;
return true;
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_private *dev_priv = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
u64 start, end;
int ret;
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
-
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
+ alignment = max(alignment, vma->display_alignment);
+ if (flags & PIN_MAPPABLE) {
+ size = max_t(typeof(size), size, vma->fence_size);
+ alignment = max_t(typeof(alignment),
+ alignment, vma->fence_alignment);
+ }
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
+ if (offset & (alignment - 1) ||
+ range_overflows(offset, size, end)) {
ret = -EINVAL;
goto err_unpin;
}
vma->node.color = obj->cache_level;
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret) {
- ret = i915_gem_evict_for_vma(vma);
+ ret = i915_gem_evict_for_vma(vma, flags);
if (ret == 0)
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret)
unsigned int bound = vma->flags;
int ret;
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
for_each_active(active, idx) {
ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
+ &vma->vm->i915->drm.struct_mutex);
if (ret)
break;
}
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
destroy:
if (unlikely(i915_vma_is_closed(vma)))