1 /**************************************************************************
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
31 * If we set up the screen target otable, screen objects stop working.
34 #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
37 #define VMW_PPN_SIZE 8
38 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
39 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
40 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
42 #define VMW_PPN_SIZE 4
43 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
44 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
45 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
49 * struct vmw_mob - Structure containing page table and metadata for a
50 * Guest Memory OBject.
52 * @num_pages Number of pages that make up the page table.
53 * @pt_level The indirection level of the page table. 0-2.
54 * @pt_root_page DMA address of the level 0 page of the page table.
57 struct ttm_buffer_object *pt_bo;
58 unsigned long num_pages;
60 dma_addr_t pt_root_page;
65 * struct vmw_otable - Guest Memory OBject table metadata
67 * @size: Size of the table (page-aligned).
68 * @page_table: Pointer to a struct vmw_mob holding the page table.
72 struct vmw_mob *page_table;
75 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
77 static void vmw_mob_pt_setup(struct vmw_mob *mob,
78 struct vmw_piter data_iter,
79 unsigned long num_data_pages);
82 * vmw_setup_otable_base - Issue an object table base setup command to
85 * @dev_priv: Pointer to a device private structure
86 * @type: Type of object table base
87 * @offset Start of table offset into dev_priv::otable_bo
88 * @otable Pointer to otable metadata;
90 * This function returns -ENOMEM if it fails to reserve fifo space,
91 * and may block waiting for fifo space.
93 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
96 struct vmw_otable *otable)
99 SVGA3dCmdHeader header;
100 SVGA3dCmdSetOTableBase64 body;
103 const struct vmw_sg_table *vsgt;
104 struct vmw_piter iter;
107 BUG_ON(otable->page_table != NULL);
109 vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
110 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
111 WARN_ON(!vmw_piter_next(&iter));
113 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
114 if (unlikely(mob == NULL)) {
115 DRM_ERROR("Failed creating OTable page table.\n");
119 if (otable->size <= PAGE_SIZE) {
120 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
121 mob->pt_root_page = vmw_piter_dma_addr(&iter);
122 } else if (vsgt->num_regions == 1) {
123 mob->pt_level = SVGA3D_MOBFMT_RANGE;
124 mob->pt_root_page = vmw_piter_dma_addr(&iter);
126 ret = vmw_mob_pt_populate(dev_priv, mob);
127 if (unlikely(ret != 0))
128 goto out_no_populate;
130 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
131 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
135 if (unlikely(cmd == NULL)) {
136 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
141 memset(cmd, 0, sizeof(*cmd));
142 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
143 cmd->header.size = sizeof(cmd->body);
144 cmd->body.type = type;
145 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
146 cmd->body.sizeInBytes = otable->size;
147 cmd->body.validSizeInBytes = 0;
148 cmd->body.ptDepth = mob->pt_level;
151 * The device doesn't support this, But the otable size is
152 * determined at compile-time, so this BUG shouldn't trigger
155 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
157 vmw_fifo_commit(dev_priv, sizeof(*cmd));
158 otable->page_table = mob;
164 vmw_mob_destroy(mob);
169 * vmw_takedown_otable_base - Issue an object table base takedown command
172 * @dev_priv: Pointer to a device private structure
173 * @type: Type of object table base
176 static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
178 struct vmw_otable *otable)
181 SVGA3dCmdHeader header;
182 SVGA3dCmdSetOTableBase body;
184 struct ttm_buffer_object *bo;
186 if (otable->page_table == NULL)
189 bo = otable->page_table->pt_bo;
190 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
191 if (unlikely(cmd == NULL)) {
192 DRM_ERROR("Failed reserving FIFO space for OTable "
197 memset(cmd, 0, sizeof(*cmd));
198 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
199 cmd->header.size = sizeof(cmd->body);
200 cmd->body.type = type;
201 cmd->body.baseAddress = 0;
202 cmd->body.sizeInBytes = 0;
203 cmd->body.validSizeInBytes = 0;
204 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
205 vmw_fifo_commit(dev_priv, sizeof(*cmd));
210 ret = ttm_bo_reserve(bo, false, true, false, NULL);
213 vmw_fence_single_bo(bo, NULL);
214 ttm_bo_unreserve(bo);
217 vmw_mob_destroy(otable->page_table);
218 otable->page_table = NULL;
222 * vmw_otables_setup - Set up guest backed memory object tables
224 * @dev_priv: Pointer to a device private structure
226 * Takes care of the device guest backed surface
227 * initialization, by setting up the guest backed memory object tables.
228 * Returns 0 on success and various error codes on failure. A succesful return
229 * means the object tables can be taken down using the vmw_otables_takedown
232 int vmw_otables_setup(struct vmw_private *dev_priv)
234 unsigned long offset;
235 unsigned long bo_size;
236 struct vmw_otable *otables;
240 otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
242 if (unlikely(otables == NULL)) {
243 DRM_ERROR("Failed to allocate space for otable "
248 otables[SVGA_OTABLE_MOB].size =
249 VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
250 otables[SVGA_OTABLE_SURFACE].size =
251 VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
252 otables[SVGA_OTABLE_CONTEXT].size =
253 VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
254 otables[SVGA_OTABLE_SHADER].size =
255 VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
256 otables[SVGA_OTABLE_SCREEN_TARGET].size =
257 VMWGFX_NUM_GB_SCREEN_TARGET *
258 SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
261 for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
263 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
264 bo_size += otables[i].size;
267 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
269 &vmw_sys_ne_placement,
271 &dev_priv->otable_bo);
273 if (unlikely(ret != 0))
276 ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
278 ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
279 if (unlikely(ret != 0))
281 ret = vmw_bo_map_dma(dev_priv->otable_bo);
282 if (unlikely(ret != 0))
285 ttm_bo_unreserve(dev_priv->otable_bo);
288 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
289 ret = vmw_setup_otable_base(dev_priv, i, offset,
291 if (unlikely(ret != 0))
293 offset += otables[i].size;
296 dev_priv->otables = otables;
300 ttm_bo_unreserve(dev_priv->otable_bo);
302 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
303 vmw_takedown_otable_base(dev_priv, i, &otables[i]);
305 ttm_bo_unref(&dev_priv->otable_bo);
313 * vmw_otables_takedown - Take down guest backed memory object tables
315 * @dev_priv: Pointer to a device private structure
317 * Take down the Guest Memory Object tables.
319 void vmw_otables_takedown(struct vmw_private *dev_priv)
322 struct ttm_buffer_object *bo = dev_priv->otable_bo;
325 for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
326 vmw_takedown_otable_base(dev_priv, i,
327 &dev_priv->otables[i]);
329 ret = ttm_bo_reserve(bo, false, true, false, NULL);
332 vmw_fence_single_bo(bo, NULL);
333 ttm_bo_unreserve(bo);
335 ttm_bo_unref(&dev_priv->otable_bo);
336 kfree(dev_priv->otables);
337 dev_priv->otables = NULL;
342 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
343 * needed for a guest backed memory object.
345 * @data_pages: Number of data pages in the memory object buffer.
347 static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
349 unsigned long data_size = data_pages * PAGE_SIZE;
350 unsigned long tot_size = 0;
352 while (likely(data_size > PAGE_SIZE)) {
353 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
354 data_size *= VMW_PPN_SIZE;
355 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
358 return tot_size >> PAGE_SHIFT;
362 * vmw_mob_create - Create a mob, but don't populate it.
364 * @data_pages: Number of data pages of the underlying buffer object.
366 struct vmw_mob *vmw_mob_create(unsigned long data_pages)
368 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
370 if (unlikely(mob == NULL))
373 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
379 * vmw_mob_pt_populate - Populate the mob pagetable
381 * @mob: Pointer to the mob the pagetable of which we want to
384 * This function allocates memory to be used for the pagetable, and
385 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
386 * memory resources aren't sufficient and may cause TTM buffer objects
387 * to be swapped out by using the TTM memory accounting function.
389 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
393 BUG_ON(mob->pt_bo != NULL);
395 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
397 &vmw_sys_ne_placement,
398 0, false, NULL, &mob->pt_bo);
399 if (unlikely(ret != 0))
402 ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
405 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
406 if (unlikely(ret != 0))
408 ret = vmw_bo_map_dma(mob->pt_bo);
409 if (unlikely(ret != 0))
412 ttm_bo_unreserve(mob->pt_bo);
417 ttm_bo_unreserve(mob->pt_bo);
418 ttm_bo_unref(&mob->pt_bo);
424 * vmw_mob_assign_ppn - Assign a value to a page table entry
426 * @addr: Pointer to pointer to page table entry.
427 * @val: The page table entry
429 * Assigns a value to a page table entry pointed to by *@addr and increments
430 * *@addr according to the page table entry size.
432 #if (VMW_PPN_SIZE == 8)
433 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
435 *((u64 *) *addr) = val >> PAGE_SHIFT;
439 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
441 *(*addr)++ = val >> PAGE_SHIFT;
446 * vmw_mob_build_pt - Build a pagetable
448 * @data_addr: Array of DMA addresses to the underlying buffer
449 * object's data pages.
450 * @num_data_pages: Number of buffer object data pages.
451 * @pt_pages: Array of page pointers to the page table pages.
453 * Returns the number of page table pages actually used.
454 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
456 static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
457 unsigned long num_data_pages,
458 struct vmw_piter *pt_iter)
460 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
461 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
462 unsigned long pt_page;
463 u32 *addr, *save_addr;
467 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
468 page = vmw_piter_page(pt_iter);
470 save_addr = addr = kmap_atomic(page);
472 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
473 vmw_mob_assign_ppn(&addr,
474 vmw_piter_dma_addr(data_iter));
475 if (unlikely(--num_data_pages == 0))
477 WARN_ON(!vmw_piter_next(data_iter));
479 kunmap_atomic(save_addr);
480 vmw_piter_next(pt_iter);
487 * vmw_mob_build_pt - Set up a multilevel mob pagetable
489 * @mob: Pointer to a mob whose page table needs setting up.
490 * @data_addr Array of DMA addresses to the buffer object's data
492 * @num_data_pages: Number of buffer object data pages.
494 * Uses tail recursion to set up a multilevel mob page table.
496 static void vmw_mob_pt_setup(struct vmw_mob *mob,
497 struct vmw_piter data_iter,
498 unsigned long num_data_pages)
500 unsigned long num_pt_pages = 0;
501 struct ttm_buffer_object *bo = mob->pt_bo;
502 struct vmw_piter save_pt_iter;
503 struct vmw_piter pt_iter;
504 const struct vmw_sg_table *vsgt;
507 ret = ttm_bo_reserve(bo, false, true, false, NULL);
510 vsgt = vmw_bo_sg_table(bo);
511 vmw_piter_start(&pt_iter, vsgt, 0);
512 BUG_ON(!vmw_piter_next(&pt_iter));
514 while (likely(num_data_pages > 1)) {
516 BUG_ON(mob->pt_level > 2);
517 save_pt_iter = pt_iter;
518 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
520 data_iter = save_pt_iter;
521 num_data_pages = num_pt_pages;
524 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
525 ttm_bo_unreserve(bo);
529 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
531 * @mob: Pointer to a mob to destroy.
533 void vmw_mob_destroy(struct vmw_mob *mob)
536 ttm_bo_unref(&mob->pt_bo);
541 * vmw_mob_unbind - Hide a mob from the device.
543 * @dev_priv: Pointer to a device private.
544 * @mob_id: Device id of the mob to unbind.
546 void vmw_mob_unbind(struct vmw_private *dev_priv,
550 SVGA3dCmdHeader header;
551 SVGA3dCmdDestroyGBMob body;
554 struct ttm_buffer_object *bo = mob->pt_bo;
557 ret = ttm_bo_reserve(bo, false, true, false, NULL);
559 * Noone else should be using this buffer.
564 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
565 if (unlikely(cmd == NULL)) {
566 DRM_ERROR("Failed reserving FIFO space for Memory "
567 "Object unbinding.\n");
569 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
570 cmd->header.size = sizeof(cmd->body);
571 cmd->body.mobid = mob->id;
572 vmw_fifo_commit(dev_priv, sizeof(*cmd));
575 vmw_fence_single_bo(bo, NULL);
576 ttm_bo_unreserve(bo);
578 vmw_fifo_resource_dec(dev_priv);
582 * vmw_mob_bind - Make a mob visible to the device after first
583 * populating it if necessary.
585 * @dev_priv: Pointer to a device private.
586 * @mob: Pointer to the mob we're making visible.
587 * @data_addr: Array of DMA addresses to the data pages of the underlying
589 * @num_data_pages: Number of data pages of the underlying buffer
591 * @mob_id: Device id of the mob to bind
593 * This function is intended to be interfaced with the ttm_tt backend
596 int vmw_mob_bind(struct vmw_private *dev_priv,
598 const struct vmw_sg_table *vsgt,
599 unsigned long num_data_pages,
603 bool pt_set_up = false;
604 struct vmw_piter data_iter;
606 SVGA3dCmdHeader header;
607 SVGA3dCmdDefineGBMob64 body;
611 vmw_piter_start(&data_iter, vsgt, 0);
612 if (unlikely(!vmw_piter_next(&data_iter)))
615 if (likely(num_data_pages == 1)) {
616 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
617 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
618 } else if (vsgt->num_regions == 1) {
619 mob->pt_level = SVGA3D_MOBFMT_RANGE;
620 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
621 } else if (unlikely(mob->pt_bo == NULL)) {
622 ret = vmw_mob_pt_populate(dev_priv, mob);
623 if (unlikely(ret != 0))
626 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
628 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
631 vmw_fifo_resource_inc(dev_priv);
633 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
634 if (unlikely(cmd == NULL)) {
635 DRM_ERROR("Failed reserving FIFO space for Memory "
636 "Object binding.\n");
637 goto out_no_cmd_space;
640 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
641 cmd->header.size = sizeof(cmd->body);
642 cmd->body.mobid = mob_id;
643 cmd->body.ptDepth = mob->pt_level;
644 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
645 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
647 vmw_fifo_commit(dev_priv, sizeof(*cmd));
652 vmw_fifo_resource_dec(dev_priv);
654 ttm_bo_unref(&mob->pt_bo);