OSDN Git Service

drm/i915: Move dev_priv->mm.[un]bound_list to its own lock
[uclinux-h8/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 1f66a61..60e7fec 100644 (file)
@@ -1537,6 +1537,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
        struct list_head *list;
        struct i915_vma *vma;
 
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
                if (!i915_vma_is_ggtt(vma))
                        break;
@@ -1551,8 +1553,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
        }
 
        i915 = to_i915(obj->base.dev);
+       spin_lock(&i915->mm.obj_lock);
        list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
-       list_move_tail(&obj->global_link, list);
+       list_move_tail(&obj->mm.link, list);
+       spin_unlock(&i915->mm.obj_lock);
 }
 
 /**
@@ -2253,6 +2257,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
                                 enum i915_mm_subclass subclass)
 {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct sg_table *pages;
 
        if (i915_gem_object_has_pinned_pages(obj))
@@ -2273,6 +2278,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
        pages = fetch_and_zero(&obj->mm.pages);
        GEM_BUG_ON(!pages);
 
+       spin_lock(&i915->mm.obj_lock);
+       list_del(&obj->mm.link);
+       spin_unlock(&i915->mm.obj_lock);
+
        if (obj->mm.mapping) {
                void *ptr;
 
@@ -2507,7 +2516,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
        obj->mm.pages = pages;
 
        if (i915_gem_object_is_tiled(obj) &&
-           to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                GEM_BUG_ON(obj->mm.quirked);
                __i915_gem_object_pin_pages(obj);
                obj->mm.quirked = true;
@@ -2529,8 +2538,11 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
                if (obj->mm.page_sizes.phys & ~0u << i)
                        obj->mm.page_sizes.sg |= BIT(i);
        }
-
        GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+       spin_lock(&i915->mm.obj_lock);
+       list_add(&obj->mm.link, &i915->mm.unbound_list);
+       spin_unlock(&i915->mm.obj_lock);
 }
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
@@ -4324,7 +4336,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 {
        mutex_init(&obj->mm.lock);
 
-       INIT_LIST_HEAD(&obj->global_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->lut_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4496,7 +4507,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                GEM_BUG_ON(!list_empty(&obj->vma_list));
                GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
 
-               list_del(&obj->global_link);
+               /* This serializes freeing with the shrinker. Since the free
+                * is delayed, first by RCU then by the workqueue, we want the
+                * shrinker to be able to free pages of unreferenced objects,
+                * or else we may oom whilst there are plenty of deferred
+                * freed objects.
+                */
+               if (i915_gem_object_has_pages(obj)) {
+                       spin_lock(&i915->mm.obj_lock);
+                       list_del_init(&obj->mm.link);
+                       spin_unlock(&i915->mm.obj_lock);
+               }
+
        }
        intel_runtime_pm_put(i915);
        mutex_unlock(&i915->drm.struct_mutex);
@@ -5035,11 +5057,14 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
                goto err_priorities;
 
        INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+
+       spin_lock_init(&dev_priv->mm.obj_lock);
        init_llist_head(&dev_priv->mm.free_list);
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
+
        INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
                          i915_gem_retire_work_handler);
        INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -5133,12 +5158,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
        i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
        i915_gem_drain_freed_objects(dev_priv);
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
+       spin_lock(&dev_priv->mm.obj_lock);
        for (p = phases; *p; p++) {
-               list_for_each_entry(obj, *p, global_link)
+               list_for_each_entry(obj, *p, mm.link)
                        __start_cpu_write(obj);
        }
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       spin_unlock(&dev_priv->mm.obj_lock);
 
        return 0;
 }
@@ -5457,7 +5482,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
                goto err_unlock;
        }
 
-       pages = obj->mm.pages;
+       pages = fetch_and_zero(&obj->mm.pages);
+       if (pages) {
+               struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+               __i915_gem_object_reset_page_iter(obj);
+
+               spin_lock(&i915->mm.obj_lock);
+               list_del(&obj->mm.link);
+               spin_unlock(&i915->mm.obj_lock);
+       }
+
        obj->ops = &i915_gem_phys_ops;
 
        err = ____i915_gem_object_get_pages(obj);