1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_user_dma_buffer {
38 struct ttm_prime_object prime;
39 struct vmw_dma_buffer dma;
42 struct vmw_bo_user_rep {
48 struct vmw_resource res;
52 struct vmw_user_stream {
53 struct ttm_base_object base;
54 struct vmw_stream stream;
58 static uint64_t vmw_user_stream_size;
60 static const struct vmw_res_func vmw_stream_func = {
61 .res_type = vmw_res_stream,
62 .needs_backup = false,
64 .type_name = "video streams",
65 .backup_placement = NULL,
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
75 return container_of(bo, struct vmw_dma_buffer, base);
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
98 * vmw_resource_release_id - release a resource id to the id manager.
100 * @res: Pointer to the resource.
102 * Release the resource id to the resource id manager and set it to -1
104 void vmw_resource_release_id(struct vmw_resource *res)
106 struct vmw_private *dev_priv = res->dev_priv;
107 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
109 write_lock(&dev_priv->resource_lock);
111 idr_remove(idr, res->id);
113 write_unlock(&dev_priv->resource_lock);
116 static void vmw_resource_release(struct kref *kref)
118 struct vmw_resource *res =
119 container_of(kref, struct vmw_resource, kref);
120 struct vmw_private *dev_priv = res->dev_priv;
122 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
124 write_lock(&dev_priv->resource_lock);
126 list_del_init(&res->lru_head);
127 write_unlock(&dev_priv->resource_lock);
129 struct ttm_buffer_object *bo = &res->backup->base;
131 ttm_bo_reserve(bo, false, false, false, NULL);
132 if (!list_empty(&res->mob_head) &&
133 res->func->unbind != NULL) {
134 struct ttm_validate_buffer val_buf;
137 val_buf.shared = false;
138 res->func->unbind(res, false, &val_buf);
140 res->backup_dirty = false;
141 list_del_init(&res->mob_head);
142 ttm_bo_unreserve(bo);
143 vmw_dmabuf_unreference(&res->backup);
146 if (likely(res->hw_destroy != NULL)) {
147 res->hw_destroy(res);
148 mutex_lock(&dev_priv->binding_mutex);
149 vmw_context_binding_res_list_kill(&res->binding_head);
150 mutex_unlock(&dev_priv->binding_mutex);
154 if (res->res_free != NULL)
159 write_lock(&dev_priv->resource_lock);
162 write_unlock(&dev_priv->resource_lock);
165 void vmw_resource_unreference(struct vmw_resource **p_res)
167 struct vmw_resource *res = *p_res;
170 kref_put(&res->kref, vmw_resource_release);
175 * vmw_resource_alloc_id - release a resource id to the id manager.
177 * @res: Pointer to the resource.
179 * Allocate the lowest free resource from the resource manager, and set
180 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
182 int vmw_resource_alloc_id(struct vmw_resource *res)
184 struct vmw_private *dev_priv = res->dev_priv;
186 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188 BUG_ON(res->id != -1);
190 idr_preload(GFP_KERNEL);
191 write_lock(&dev_priv->resource_lock);
193 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
197 write_unlock(&dev_priv->resource_lock);
199 return ret < 0 ? ret : 0;
203 * vmw_resource_init - initialize a struct vmw_resource
205 * @dev_priv: Pointer to a device private struct.
206 * @res: The struct vmw_resource to initialize.
207 * @obj_type: Resource object type.
208 * @delay_id: Boolean whether to defer device id allocation until
209 * the first validation.
210 * @res_free: Resource destructor.
211 * @func: Resource function table.
213 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 void (*res_free) (struct vmw_resource *res),
216 const struct vmw_res_func *func)
218 kref_init(&res->kref);
219 res->hw_destroy = NULL;
220 res->res_free = res_free;
222 res->dev_priv = dev_priv;
224 INIT_LIST_HEAD(&res->lru_head);
225 INIT_LIST_HEAD(&res->mob_head);
226 INIT_LIST_HEAD(&res->binding_head);
229 res->backup_offset = 0;
230 res->backup_dirty = false;
231 res->res_dirty = false;
235 return vmw_resource_alloc_id(res);
239 * vmw_resource_activate
241 * @res: Pointer to the newly created resource
242 * @hw_destroy: Destroy function. NULL if none.
244 * Activate a resource after the hardware has been made aware of it.
245 * Set tye destroy function to @destroy. Typically this frees the
246 * resource and destroys the hardware resources associated with it.
247 * Activate basically means that the function vmw_resource_lookup will
250 void vmw_resource_activate(struct vmw_resource *res,
251 void (*hw_destroy) (struct vmw_resource *))
253 struct vmw_private *dev_priv = res->dev_priv;
255 write_lock(&dev_priv->resource_lock);
257 res->hw_destroy = hw_destroy;
258 write_unlock(&dev_priv->resource_lock);
261 static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
262 struct idr *idr, int id)
264 struct vmw_resource *res;
266 read_lock(&dev_priv->resource_lock);
267 res = idr_find(idr, id);
268 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
271 read_unlock(&dev_priv->resource_lock);
273 if (unlikely(res == NULL))
280 * vmw_user_resource_lookup_handle - lookup a struct resource from a
281 * TTM user-space handle and perform basic type checks
283 * @dev_priv: Pointer to a device private struct
284 * @tfile: Pointer to a struct ttm_object_file identifying the caller
285 * @handle: The TTM user-space handle
286 * @converter: Pointer to an object describing the resource type
287 * @p_res: On successful return the location pointed to will contain
288 * a pointer to a refcounted struct vmw_resource.
290 * If the handle can't be found or is associated with an incorrect resource
291 * type, -EINVAL will be returned.
293 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
294 struct ttm_object_file *tfile,
296 const struct vmw_user_resource_conv
298 struct vmw_resource **p_res)
300 struct ttm_base_object *base;
301 struct vmw_resource *res;
304 base = ttm_base_object_lookup(tfile, handle);
305 if (unlikely(base == NULL))
308 if (unlikely(ttm_base_object_type(base) != converter->object_type))
309 goto out_bad_resource;
311 res = converter->base_obj_to_res(base);
313 read_lock(&dev_priv->resource_lock);
314 if (!res->avail || res->res_free != converter->res_free) {
315 read_unlock(&dev_priv->resource_lock);
316 goto out_bad_resource;
319 kref_get(&res->kref);
320 read_unlock(&dev_priv->resource_lock);
326 ttm_base_object_unref(&base);
332 * Helper function that looks either a surface or dmabuf.
334 * The pointer this pointed at by out_surf and out_buf needs to be null.
336 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
337 struct ttm_object_file *tfile,
339 struct vmw_surface **out_surf,
340 struct vmw_dma_buffer **out_buf)
342 struct vmw_resource *res;
345 BUG_ON(*out_surf || *out_buf);
347 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
348 user_surface_converter,
351 *out_surf = vmw_res_to_srf(res);
356 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
365 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
367 * @dev_priv: Pointer to a struct vmw_private identifying the device.
368 * @size: The requested buffer size.
369 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
371 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
374 static size_t struct_size, user_struct_size;
375 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
376 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
378 if (unlikely(struct_size == 0)) {
379 size_t backend_size = ttm_round_pot(vmw_tt_size);
381 struct_size = backend_size +
382 ttm_round_pot(sizeof(struct vmw_dma_buffer));
383 user_struct_size = backend_size +
384 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
387 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
389 ttm_round_pot(num_pages * sizeof(dma_addr_t));
391 return ((user) ? user_struct_size : struct_size) +
395 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
397 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
402 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
404 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
406 ttm_prime_object_kfree(vmw_user_bo, prime);
409 int vmw_dmabuf_init(struct vmw_private *dev_priv,
410 struct vmw_dma_buffer *vmw_bo,
411 size_t size, struct ttm_placement *placement,
413 void (*bo_free) (struct ttm_buffer_object *bo))
415 struct ttm_bo_device *bdev = &dev_priv->bdev;
418 bool user = (bo_free == &vmw_user_dmabuf_destroy);
420 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
422 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
423 memset(vmw_bo, 0, sizeof(*vmw_bo));
425 INIT_LIST_HEAD(&vmw_bo->res_list);
427 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
428 ttm_bo_type_device, placement,
430 NULL, acc_size, NULL, NULL, bo_free);
434 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
436 struct vmw_user_dma_buffer *vmw_user_bo;
437 struct ttm_base_object *base = *p_base;
438 struct ttm_buffer_object *bo;
442 if (unlikely(base == NULL))
445 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
447 bo = &vmw_user_bo->dma.base;
451 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
452 enum ttm_ref_type ref_type)
454 struct vmw_user_dma_buffer *user_bo;
455 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
458 case TTM_REF_SYNCCPU_WRITE:
459 ttm_bo_synccpu_write_release(&user_bo->dma.base);
467 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
469 * @dev_priv: Pointer to a struct device private.
470 * @tfile: Pointer to a struct ttm_object_file on which to register the user
472 * @size: Size of the dma buffer.
473 * @shareable: Boolean whether the buffer is shareable with other open files.
474 * @handle: Pointer to where the handle value should be assigned.
475 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
476 * should be assigned.
478 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
479 struct ttm_object_file *tfile,
483 struct vmw_dma_buffer **p_dma_buf)
485 struct vmw_user_dma_buffer *user_bo;
486 struct ttm_buffer_object *tmp;
489 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
490 if (unlikely(user_bo == NULL)) {
491 DRM_ERROR("Failed to allocate a buffer.\n");
495 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
496 (dev_priv->has_mob) ?
498 &vmw_vram_sys_placement, true,
499 &vmw_user_dmabuf_destroy);
500 if (unlikely(ret != 0))
503 tmp = ttm_bo_reference(&user_bo->dma.base);
504 ret = ttm_prime_object_init(tfile,
509 &vmw_user_dmabuf_release,
510 &vmw_user_dmabuf_ref_obj_release);
511 if (unlikely(ret != 0)) {
513 goto out_no_base_object;
516 *p_dma_buf = &user_bo->dma;
517 *handle = user_bo->prime.base.hash.key;
524 * vmw_user_dmabuf_verify_access - verify access permissions on this
527 * @bo: Pointer to the buffer object being accessed
528 * @tfile: Identifying the caller.
530 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
531 struct ttm_object_file *tfile)
533 struct vmw_user_dma_buffer *vmw_user_bo;
535 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
538 vmw_user_bo = vmw_user_dma_buffer(bo);
540 /* Check that the caller has opened the object. */
541 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
544 DRM_ERROR("Could not grant buffer access.\n");
549 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
550 * access, idling previous GPU operations on the buffer and optionally
551 * blocking it for further command submissions.
553 * @user_bo: Pointer to the buffer object being grabbed for CPU access
554 * @tfile: Identifying the caller.
555 * @flags: Flags indicating how the grab should be performed.
557 * A blocking grab will be automatically released when @tfile is closed.
559 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
560 struct ttm_object_file *tfile,
563 struct ttm_buffer_object *bo = &user_bo->dma.base;
567 if (flags & drm_vmw_synccpu_allow_cs) {
568 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
572 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
574 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
582 ret = ttm_bo_synccpu_write_grab
583 (bo, !!(flags & drm_vmw_synccpu_dontblock));
584 if (unlikely(ret != 0))
587 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
588 TTM_REF_SYNCCPU_WRITE, &existed);
589 if (ret != 0 || existed)
590 ttm_bo_synccpu_write_release(&user_bo->dma.base);
596 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
597 * and unblock command submission on the buffer if blocked.
599 * @handle: Handle identifying the buffer object.
600 * @tfile: Identifying the caller.
601 * @flags: Flags indicating the type of release.
603 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
604 struct ttm_object_file *tfile,
607 if (!(flags & drm_vmw_synccpu_allow_cs))
608 return ttm_ref_object_base_unref(tfile, handle,
609 TTM_REF_SYNCCPU_WRITE);
615 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
618 * @dev: Identifies the drm device.
619 * @data: Pointer to the ioctl argument.
620 * @file_priv: Identifies the caller.
622 * This function checks the ioctl arguments for validity and calls the
623 * relevant synccpu functions.
625 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
626 struct drm_file *file_priv)
628 struct drm_vmw_synccpu_arg *arg =
629 (struct drm_vmw_synccpu_arg *) data;
630 struct vmw_dma_buffer *dma_buf;
631 struct vmw_user_dma_buffer *user_bo;
632 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
635 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
636 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
637 drm_vmw_synccpu_dontblock |
638 drm_vmw_synccpu_allow_cs)) != 0) {
639 DRM_ERROR("Illegal synccpu flags.\n");
644 case drm_vmw_synccpu_grab:
645 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
646 if (unlikely(ret != 0))
649 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
651 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
652 vmw_dmabuf_unreference(&dma_buf);
653 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
655 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
656 (unsigned int) arg->handle);
660 case drm_vmw_synccpu_release:
661 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
663 if (unlikely(ret != 0)) {
664 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
665 (unsigned int) arg->handle);
670 DRM_ERROR("Invalid synccpu operation.\n");
677 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
678 struct drm_file *file_priv)
680 struct vmw_private *dev_priv = vmw_priv(dev);
681 union drm_vmw_alloc_dmabuf_arg *arg =
682 (union drm_vmw_alloc_dmabuf_arg *)data;
683 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
684 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
685 struct vmw_dma_buffer *dma_buf;
689 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
690 if (unlikely(ret != 0))
693 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
694 req->size, false, &handle, &dma_buf);
695 if (unlikely(ret != 0))
698 rep->handle = handle;
699 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
700 rep->cur_gmr_id = handle;
701 rep->cur_gmr_offset = 0;
703 vmw_dmabuf_unreference(&dma_buf);
706 ttm_read_unlock(&dev_priv->reservation_sem);
711 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
712 struct drm_file *file_priv)
714 struct drm_vmw_unref_dmabuf_arg *arg =
715 (struct drm_vmw_unref_dmabuf_arg *)data;
717 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
722 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
723 uint32_t handle, struct vmw_dma_buffer **out)
725 struct vmw_user_dma_buffer *vmw_user_bo;
726 struct ttm_base_object *base;
728 base = ttm_base_object_lookup(tfile, handle);
729 if (unlikely(base == NULL)) {
730 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
731 (unsigned long)handle);
735 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
736 ttm_base_object_unref(&base);
737 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
738 (unsigned long)handle);
742 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
744 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
745 ttm_base_object_unref(&base);
746 *out = &vmw_user_bo->dma;
751 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
752 struct vmw_dma_buffer *dma_buf,
755 struct vmw_user_dma_buffer *user_bo;
757 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
760 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
762 *handle = user_bo->prime.base.hash.key;
763 return ttm_ref_object_add(tfile, &user_bo->prime.base,
764 TTM_REF_USAGE, NULL);
771 static void vmw_stream_destroy(struct vmw_resource *res)
773 struct vmw_private *dev_priv = res->dev_priv;
774 struct vmw_stream *stream;
777 DRM_INFO("%s: unref\n", __func__);
778 stream = container_of(res, struct vmw_stream, res);
780 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
784 static int vmw_stream_init(struct vmw_private *dev_priv,
785 struct vmw_stream *stream,
786 void (*res_free) (struct vmw_resource *res))
788 struct vmw_resource *res = &stream->res;
791 ret = vmw_resource_init(dev_priv, res, false, res_free,
794 if (unlikely(ret != 0)) {
795 if (res_free == NULL)
798 res_free(&stream->res);
802 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
804 vmw_resource_unreference(&res);
808 DRM_INFO("%s: claimed\n", __func__);
810 vmw_resource_activate(&stream->res, vmw_stream_destroy);
814 static void vmw_user_stream_free(struct vmw_resource *res)
816 struct vmw_user_stream *stream =
817 container_of(res, struct vmw_user_stream, stream.res);
818 struct vmw_private *dev_priv = res->dev_priv;
820 ttm_base_object_kfree(stream, base);
821 ttm_mem_global_free(vmw_mem_glob(dev_priv),
822 vmw_user_stream_size);
826 * This function is called when user space has no more references on the
827 * base object. It releases the base-object's reference on the resource object.
830 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
832 struct ttm_base_object *base = *p_base;
833 struct vmw_user_stream *stream =
834 container_of(base, struct vmw_user_stream, base);
835 struct vmw_resource *res = &stream->stream.res;
838 vmw_resource_unreference(&res);
841 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
842 struct drm_file *file_priv)
844 struct vmw_private *dev_priv = vmw_priv(dev);
845 struct vmw_resource *res;
846 struct vmw_user_stream *stream;
847 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
848 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
849 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
853 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
854 if (unlikely(res == NULL))
857 if (res->res_free != &vmw_user_stream_free) {
862 stream = container_of(res, struct vmw_user_stream, stream.res);
863 if (stream->base.tfile != tfile) {
868 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
870 vmw_resource_unreference(&res);
874 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
875 struct drm_file *file_priv)
877 struct vmw_private *dev_priv = vmw_priv(dev);
878 struct vmw_user_stream *stream;
879 struct vmw_resource *res;
880 struct vmw_resource *tmp;
881 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
882 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
886 * Approximate idr memory usage with 128 bytes. It will be limited
887 * by maximum number_of streams anyway?
890 if (unlikely(vmw_user_stream_size == 0))
891 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
893 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
894 if (unlikely(ret != 0))
897 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
898 vmw_user_stream_size,
900 ttm_read_unlock(&dev_priv->reservation_sem);
901 if (unlikely(ret != 0)) {
902 if (ret != -ERESTARTSYS)
903 DRM_ERROR("Out of graphics memory for stream"
909 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
910 if (unlikely(stream == NULL)) {
911 ttm_mem_global_free(vmw_mem_glob(dev_priv),
912 vmw_user_stream_size);
917 res = &stream->stream.res;
918 stream->base.shareable = false;
919 stream->base.tfile = NULL;
922 * From here on, the destructor takes over resource freeing.
925 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
926 if (unlikely(ret != 0))
929 tmp = vmw_resource_reference(res);
930 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
931 &vmw_user_stream_base_release, NULL);
933 if (unlikely(ret != 0)) {
934 vmw_resource_unreference(&tmp);
938 arg->stream_id = res->id;
940 vmw_resource_unreference(&res);
945 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
946 struct ttm_object_file *tfile,
947 uint32_t *inout_id, struct vmw_resource **out)
949 struct vmw_user_stream *stream;
950 struct vmw_resource *res;
953 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
955 if (unlikely(res == NULL))
958 if (res->res_free != &vmw_user_stream_free) {
963 stream = container_of(res, struct vmw_user_stream, stream.res);
964 if (stream->base.tfile != tfile) {
969 *inout_id = stream->stream.stream_id;
973 vmw_resource_unreference(&res);
979 * vmw_dumb_create - Create a dumb kms buffer
981 * @file_priv: Pointer to a struct drm_file identifying the caller.
982 * @dev: Pointer to the drm device.
983 * @args: Pointer to a struct drm_mode_create_dumb structure
985 * This is a driver callback for the core drm create_dumb functionality.
986 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
987 * that the arguments have a different format.
989 int vmw_dumb_create(struct drm_file *file_priv,
990 struct drm_device *dev,
991 struct drm_mode_create_dumb *args)
993 struct vmw_private *dev_priv = vmw_priv(dev);
994 struct vmw_dma_buffer *dma_buf;
997 args->pitch = args->width * ((args->bpp + 7) / 8);
998 args->size = args->pitch * args->height;
1000 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1001 if (unlikely(ret != 0))
1004 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1005 args->size, false, &args->handle,
1007 if (unlikely(ret != 0))
1010 vmw_dmabuf_unreference(&dma_buf);
1012 ttm_read_unlock(&dev_priv->reservation_sem);
1017 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1019 * @file_priv: Pointer to a struct drm_file identifying the caller.
1020 * @dev: Pointer to the drm device.
1021 * @handle: Handle identifying the dumb buffer.
1022 * @offset: The address space offset returned.
1024 * This is a driver callback for the core drm dumb_map_offset functionality.
1026 int vmw_dumb_map_offset(struct drm_file *file_priv,
1027 struct drm_device *dev, uint32_t handle,
1030 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1031 struct vmw_dma_buffer *out_buf;
1034 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1038 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1039 vmw_dmabuf_unreference(&out_buf);
1044 * vmw_dumb_destroy - Destroy a dumb boffer
1046 * @file_priv: Pointer to a struct drm_file identifying the caller.
1047 * @dev: Pointer to the drm device.
1048 * @handle: Handle identifying the dumb buffer.
1050 * This is a driver callback for the core drm dumb_destroy functionality.
1052 int vmw_dumb_destroy(struct drm_file *file_priv,
1053 struct drm_device *dev,
1056 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1057 handle, TTM_REF_USAGE);
1061 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1063 * @res: The resource for which to allocate a backup buffer.
1064 * @interruptible: Whether any sleeps during allocation should be
1065 * performed while interruptible.
1067 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1070 unsigned long size =
1071 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1072 struct vmw_dma_buffer *backup;
1075 if (likely(res->backup)) {
1076 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1080 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1081 if (unlikely(backup == NULL))
1084 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1085 res->func->backup_placement,
1087 &vmw_dmabuf_bo_free);
1088 if (unlikely(ret != 0))
1091 res->backup = backup;
1098 * vmw_resource_do_validate - Make a resource up-to-date and visible
1101 * @res: The resource to make visible to the device.
1102 * @val_buf: Information about a buffer possibly
1103 * containing backup data if a bind operation is needed.
1105 * On hardware resource shortage, this function returns -EBUSY and
1106 * should be retried once resources have been freed up.
1108 static int vmw_resource_do_validate(struct vmw_resource *res,
1109 struct ttm_validate_buffer *val_buf)
1112 const struct vmw_res_func *func = res->func;
1114 if (unlikely(res->id == -1)) {
1115 ret = func->create(res);
1116 if (unlikely(ret != 0))
1121 ((func->needs_backup && list_empty(&res->mob_head) &&
1122 val_buf->bo != NULL) ||
1123 (!func->needs_backup && val_buf->bo != NULL))) {
1124 ret = func->bind(res, val_buf);
1125 if (unlikely(ret != 0))
1126 goto out_bind_failed;
1127 if (func->needs_backup)
1128 list_add_tail(&res->mob_head, &res->backup->res_list);
1132 * Only do this on write operations, and move to
1133 * vmw_resource_unreserve if it can be called after
1134 * backup buffers have been unreserved. Otherwise
1137 res->res_dirty = true;
1148 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1149 * command submission.
1151 * @res: Pointer to the struct vmw_resource to unreserve.
1152 * @new_backup: Pointer to new backup buffer if command submission
1154 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1156 * Currently unreserving a resource means putting it back on the device's
1157 * resource lru list, so that it can be evicted if necessary.
1159 void vmw_resource_unreserve(struct vmw_resource *res,
1160 struct vmw_dma_buffer *new_backup,
1161 unsigned long new_backup_offset)
1163 struct vmw_private *dev_priv = res->dev_priv;
1165 if (!list_empty(&res->lru_head))
1168 if (new_backup && new_backup != res->backup) {
1171 lockdep_assert_held(&res->backup->base.resv->lock.base);
1172 list_del_init(&res->mob_head);
1173 vmw_dmabuf_unreference(&res->backup);
1176 res->backup = vmw_dmabuf_reference(new_backup);
1177 lockdep_assert_held(&new_backup->base.resv->lock.base);
1178 list_add_tail(&res->mob_head, &new_backup->res_list);
1181 res->backup_offset = new_backup_offset;
1183 if (!res->func->may_evict || res->id == -1 || res->pin_count)
1186 write_lock(&dev_priv->resource_lock);
1187 list_add_tail(&res->lru_head,
1188 &res->dev_priv->res_lru[res->func->res_type]);
1189 write_unlock(&dev_priv->resource_lock);
1193 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1194 * for a resource and in that case, allocate
1195 * one, reserve and validate it.
1197 * @res: The resource for which to allocate a backup buffer.
1198 * @interruptible: Whether any sleeps during allocation should be
1199 * performed while interruptible.
1200 * @val_buf: On successful return contains data about the
1201 * reserved and validated backup buffer.
1204 vmw_resource_check_buffer(struct vmw_resource *res,
1206 struct ttm_validate_buffer *val_buf)
1208 struct list_head val_list;
1209 bool backup_dirty = false;
1212 if (unlikely(res->backup == NULL)) {
1213 ret = vmw_resource_buf_alloc(res, interruptible);
1214 if (unlikely(ret != 0))
1218 INIT_LIST_HEAD(&val_list);
1219 val_buf->bo = ttm_bo_reference(&res->backup->base);
1220 val_buf->shared = false;
1221 list_add_tail(&val_buf->head, &val_list);
1222 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1223 if (unlikely(ret != 0))
1224 goto out_no_reserve;
1226 if (res->func->needs_backup && list_empty(&res->mob_head))
1229 backup_dirty = res->backup_dirty;
1230 ret = ttm_bo_validate(&res->backup->base,
1231 res->func->backup_placement,
1234 if (unlikely(ret != 0))
1235 goto out_no_validate;
1240 ttm_eu_backoff_reservation(NULL, &val_list);
1242 ttm_bo_unref(&val_buf->bo);
1244 vmw_dmabuf_unreference(&res->backup);
1250 * vmw_resource_reserve - Reserve a resource for command submission
1252 * @res: The resource to reserve.
1254 * This function takes the resource off the LRU list and make sure
1255 * a backup buffer is present for guest-backed resources. However,
1256 * the buffer may not be bound to the resource at this point.
1259 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1262 struct vmw_private *dev_priv = res->dev_priv;
1265 write_lock(&dev_priv->resource_lock);
1266 list_del_init(&res->lru_head);
1267 write_unlock(&dev_priv->resource_lock);
1269 if (res->func->needs_backup && res->backup == NULL &&
1271 ret = vmw_resource_buf_alloc(res, interruptible);
1272 if (unlikely(ret != 0))
1280 * vmw_resource_backoff_reservation - Unreserve and unreference a
1283 * @val_buf: Backup buffer information.
1286 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1288 struct list_head val_list;
1290 if (likely(val_buf->bo == NULL))
1293 INIT_LIST_HEAD(&val_list);
1294 list_add_tail(&val_buf->head, &val_list);
1295 ttm_eu_backoff_reservation(NULL, &val_list);
1296 ttm_bo_unref(&val_buf->bo);
1300 * vmw_resource_do_evict - Evict a resource, and transfer its data
1301 * to a backup buffer.
1303 * @res: The resource to evict.
1304 * @interruptible: Whether to wait interruptible.
1306 static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1308 struct ttm_validate_buffer val_buf;
1309 const struct vmw_res_func *func = res->func;
1312 BUG_ON(!func->may_evict);
1315 val_buf.shared = false;
1316 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1317 if (unlikely(ret != 0))
1320 if (unlikely(func->unbind != NULL &&
1321 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1322 ret = func->unbind(res, res->res_dirty, &val_buf);
1323 if (unlikely(ret != 0))
1325 list_del_init(&res->mob_head);
1327 ret = func->destroy(res);
1328 res->backup_dirty = true;
1329 res->res_dirty = false;
1331 vmw_resource_backoff_reservation(&val_buf);
1338 * vmw_resource_validate - Make a resource up-to-date and visible
1341 * @res: The resource to make visible to the device.
1343 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1344 * be reserved and validated.
1345 * On hardware resource shortage, this function will repeatedly evict
1346 * resources of the same type until the validation succeeds.
1348 int vmw_resource_validate(struct vmw_resource *res)
1351 struct vmw_resource *evict_res;
1352 struct vmw_private *dev_priv = res->dev_priv;
1353 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1354 struct ttm_validate_buffer val_buf;
1355 unsigned err_count = 0;
1357 if (likely(!res->func->may_evict))
1361 val_buf.shared = false;
1363 val_buf.bo = &res->backup->base;
1365 ret = vmw_resource_do_validate(res, &val_buf);
1366 if (likely(ret != -EBUSY))
1369 write_lock(&dev_priv->resource_lock);
1370 if (list_empty(lru_list) || !res->func->may_evict) {
1371 DRM_ERROR("Out of device device resources "
1372 "for %s.\n", res->func->type_name);
1374 write_unlock(&dev_priv->resource_lock);
1378 evict_res = vmw_resource_reference
1379 (list_first_entry(lru_list, struct vmw_resource,
1381 list_del_init(&evict_res->lru_head);
1383 write_unlock(&dev_priv->resource_lock);
1385 ret = vmw_resource_do_evict(evict_res, true);
1386 if (unlikely(ret != 0)) {
1387 write_lock(&dev_priv->resource_lock);
1388 list_add_tail(&evict_res->lru_head, lru_list);
1389 write_unlock(&dev_priv->resource_lock);
1390 if (ret == -ERESTARTSYS ||
1391 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1392 vmw_resource_unreference(&evict_res);
1393 goto out_no_validate;
1397 vmw_resource_unreference(&evict_res);
1400 if (unlikely(ret != 0))
1401 goto out_no_validate;
1402 else if (!res->func->needs_backup && res->backup) {
1403 list_del_init(&res->mob_head);
1404 vmw_dmabuf_unreference(&res->backup);
1414 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1415 * object without unreserving it.
1417 * @bo: Pointer to the struct ttm_buffer_object to fence.
1418 * @fence: Pointer to the fence. If NULL, this function will
1419 * insert a fence into the command stream..
1421 * Contrary to the ttm_eu version of this function, it takes only
1422 * a single buffer object instead of a list, and it also doesn't
1423 * unreserve the buffer object, which needs to be done separately.
1425 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1426 struct vmw_fence_obj *fence)
1428 struct ttm_bo_device *bdev = bo->bdev;
1430 struct vmw_private *dev_priv =
1431 container_of(bdev, struct vmw_private, bdev);
1433 if (fence == NULL) {
1434 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1435 reservation_object_add_excl_fence(bo->resv, &fence->base);
1436 fence_put(&fence->base);
1438 reservation_object_add_excl_fence(bo->resv, &fence->base);
1442 * vmw_resource_move_notify - TTM move_notify_callback
1444 * @bo: The TTM buffer object about to move.
1445 * @mem: The truct ttm_mem_reg indicating to what memory
1446 * region the move is taking place.
1448 * Evicts the Guest Backed hardware resource if the backup
1449 * buffer is being moved out of MOB memory.
1450 * Note that this function should not race with the resource
1451 * validation code as long as it accesses only members of struct
1452 * resource that remain static while bo::res is !NULL and
1453 * while we have @bo reserved. struct resource::backup is *not* a
1454 * static member. The resource validation code will take care
1455 * to set @bo::res to NULL, while having @bo reserved when the
1456 * buffer is no longer bound to the resource, so @bo:res can be
1457 * used to determine whether there is a need to unbind and whether
1458 * it is safe to unbind.
1460 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1461 struct ttm_mem_reg *mem)
1463 struct vmw_dma_buffer *dma_buf;
1468 if (bo->destroy != vmw_dmabuf_bo_free &&
1469 bo->destroy != vmw_user_dmabuf_destroy)
1472 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1474 if (mem->mem_type != VMW_PL_MOB) {
1475 struct vmw_resource *res, *n;
1476 struct ttm_validate_buffer val_buf;
1479 val_buf.shared = false;
1481 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1483 if (unlikely(res->func->unbind == NULL))
1486 (void) res->func->unbind(res, true, &val_buf);
1487 res->backup_dirty = true;
1488 res->res_dirty = false;
1489 list_del_init(&res->mob_head);
1492 (void) ttm_bo_wait(bo, false, false, false);
1497 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1499 * @res: The resource being queried.
1501 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1503 return res->func->needs_backup;
1507 * vmw_resource_evict_type - Evict all resources of a specific type
1509 * @dev_priv: Pointer to a device private struct
1510 * @type: The resource type to evict
1512 * To avoid thrashing starvation or as part of the hibernation sequence,
1513 * try to evict all evictable resources of a specific type.
1515 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1516 enum vmw_res_type type)
1518 struct list_head *lru_list = &dev_priv->res_lru[type];
1519 struct vmw_resource *evict_res;
1520 unsigned err_count = 0;
1524 write_lock(&dev_priv->resource_lock);
1526 if (list_empty(lru_list))
1529 evict_res = vmw_resource_reference(
1530 list_first_entry(lru_list, struct vmw_resource,
1532 list_del_init(&evict_res->lru_head);
1533 write_unlock(&dev_priv->resource_lock);
1535 ret = vmw_resource_do_evict(evict_res, false);
1536 if (unlikely(ret != 0)) {
1537 write_lock(&dev_priv->resource_lock);
1538 list_add_tail(&evict_res->lru_head, lru_list);
1539 write_unlock(&dev_priv->resource_lock);
1540 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1541 vmw_resource_unreference(&evict_res);
1546 vmw_resource_unreference(&evict_res);
1550 write_unlock(&dev_priv->resource_lock);
1554 * vmw_resource_evict_all - Evict all evictable resources
1556 * @dev_priv: Pointer to a device private struct
1558 * To avoid thrashing starvation or as part of the hibernation sequence,
1559 * evict all evictable resources. In particular this means that all
1560 * guest-backed resources that are registered with the device are
1561 * evicted and the OTable becomes clean.
1563 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1565 enum vmw_res_type type;
1567 mutex_lock(&dev_priv->cmdbuf_mutex);
1569 for (type = 0; type < vmw_res_max; ++type)
1570 vmw_resource_evict_type(dev_priv, type);
1572 mutex_unlock(&dev_priv->cmdbuf_mutex);
1576 * vmw_resource_pin - Add a pin reference on a resource
1578 * @res: The resource to add a pin reference on
1580 * This function adds a pin reference, and if needed validates the resource.
1581 * Having a pin reference means that the resource can never be evicted, and
1582 * its id will never change as long as there is a pin reference.
1583 * This function returns 0 on success and a negative error code on failure.
1585 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1587 struct vmw_private *dev_priv = res->dev_priv;
1590 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1591 mutex_lock(&dev_priv->cmdbuf_mutex);
1592 ret = vmw_resource_reserve(res, interruptible, false);
1594 goto out_no_reserve;
1596 if (res->pin_count == 0) {
1597 struct vmw_dma_buffer *vbo = NULL;
1602 ttm_bo_reserve(&vbo->base, interruptible, false, false,
1604 if (!vbo->pin_count) {
1605 ret = ttm_bo_validate
1607 res->func->backup_placement,
1608 interruptible, false);
1610 ttm_bo_unreserve(&vbo->base);
1611 goto out_no_validate;
1615 /* Do we really need to pin the MOB as well? */
1616 vmw_bo_pin_reserved(vbo, true);
1618 ret = vmw_resource_validate(res);
1620 ttm_bo_unreserve(&vbo->base);
1622 goto out_no_validate;
1627 vmw_resource_unreserve(res, NULL, 0UL);
1629 mutex_unlock(&dev_priv->cmdbuf_mutex);
1630 ttm_write_unlock(&dev_priv->reservation_sem);
1636 * vmw_resource_unpin - Remove a pin reference from a resource
1638 * @res: The resource to remove a pin reference from
1640 * Having a pin reference means that the resource can never be evicted, and
1641 * its id will never change as long as there is a pin reference.
1643 void vmw_resource_unpin(struct vmw_resource *res)
1645 struct vmw_private *dev_priv = res->dev_priv;
1648 ttm_read_lock(&dev_priv->reservation_sem, false);
1649 mutex_lock(&dev_priv->cmdbuf_mutex);
1651 ret = vmw_resource_reserve(res, false, true);
1654 WARN_ON(res->pin_count == 0);
1655 if (--res->pin_count == 0 && res->backup) {
1656 struct vmw_dma_buffer *vbo = res->backup;
1658 ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1659 vmw_bo_pin_reserved(vbo, false);
1660 ttm_bo_unreserve(&vbo->base);
1663 vmw_resource_unreserve(res, NULL, 0UL);
1665 mutex_unlock(&dev_priv->cmdbuf_mutex);
1666 ttm_read_unlock(&dev_priv->reservation_sem);