OSDN Git Service

drm/ttm: cleanup BO size handling v3
authorChristian König <christian.koenig@amd.com>
Wed, 9 Dec 2020 14:07:50 +0000 (15:07 +0100)
committerChristian König <christian.koenig@amd.com>
Mon, 14 Dec 2020 13:20:46 +0000 (14:20 +0100)
Based on an idea from Dave, but cleaned up a bit.

We had multiple fields for essentially the same thing.

Now bo->base.size is the original size of the BO in
arbitrary units, usually bytes.

bo->mem.num_pages is the size in number of pages in the
resource domain of bo->mem.mem_type.

v2: use the GEM object size instead of the BO size
v3: fix printks in some places

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/406831/
36 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/nouveau/nv17_fence.c
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/qxl/qxl_object.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/radeon/radeon_trace.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_resource.h

index e5919ef..c4c93f1 100644 (file)
@@ -269,7 +269,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
        case TTM_PL_TT:
                sgt = drm_prime_pages_to_sg(obj->dev,
                                            bo->tbo.ttm->pages,
-                                           bo->tbo.num_pages);
+                                           bo->tbo.ttm->num_pages);
                if (IS_ERR(sgt))
                        return sgt;
 
index 056cb87..52bcd1b 100644 (file)
@@ -121,7 +121,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
-       if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
+       if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
                return AMDGPU_BO_INVALID_OFFSET;
 
        if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
index c6c9723..381ecc4 100644 (file)
@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
        if (r < 0)
                return r;
 
-       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
        if (r)
                return r;
 
index ed47cba..a99a5cd 100644 (file)
@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
 
 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
 {
-       return bo->tbo.num_pages << PAGE_SHIFT;
+       return bo->tbo.base.size;
 }
 
 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
 {
-       return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+       return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
 }
 
 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
index ee9480d..20715dd 100644 (file)
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
 
            TP_fast_assign(
                           __entry->bo = bo;
-                          __entry->pages = bo->tbo.num_pages;
+                          __entry->pages = bo->tbo.mem.num_pages;
                           __entry->type = bo->tbo.mem.mem_type;
                           __entry->prefer = bo->preferred_domains;
                           __entry->allow = bo->allowed_domains;
index ec93d4f..5346891 100644 (file)
@@ -636,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 
 out:
        /* update statistics */
-       atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+       atomic64_add(bo->base.size, &adev->num_bytes_moved);
        amdgpu_bo_move_notify(bo, evict, new_mem);
        return 0;
 }
@@ -2131,7 +2131,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        return r;
        }
 
-       num_pages = bo->tbo.num_pages;
+       num_pages = bo->tbo.mem.num_pages;
        mm_node = bo->tbo.mem.mm_node;
        num_loops = 0;
        while (num_pages) {
@@ -2161,7 +2161,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                }
        }
 
-       num_pages = bo->tbo.num_pages;
+       num_pages = bo->tbo.mem.num_pages;
        mm_node = bo->tbo.mem.mm_node;
 
        while (num_pages) {
index 985e454..7f30629 100644 (file)
@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
                return r;
        }
 
-       memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
+       memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
 
        amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
        amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
index bc542ac..c1a675b 100644 (file)
@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               drm->gem.vram_available -= bo->mem.size;
+               drm->gem.vram_available -= bo->base.size;
                break;
        case TTM_PL_TT:
-               drm->gem.gart_available -= bo->mem.size;
+               drm->gem.gart_available -= bo->base.size;
                break;
        default:
                break;
@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
        if (!nvbo->bo.pin_count) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
-                       drm->gem.vram_available += bo->mem.size;
+                       drm->gem.vram_available += bo->base.size;
                        break;
                case TTM_PL_TT:
-                       drm->gem.gart_available += bo->mem.size;
+                       drm->gem.gart_available += bo->base.size;
                        break;
                default:
                        break;
@@ -913,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
                return 0;
 
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
-               *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
+               *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
                                               nvbo->mode, nvbo->zeta);
        }
 
index bceb48a..17831ee 100644 (file)
@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
 
        bl_size = bw * bh * (1 << tile_mode) * gob_size;
 
-       DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+       DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
                      offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
-                     nvbo->bo.mem.size);
+                     nvbo->bo.base.size);
 
-       if (bl_size + offset > nvbo->bo.mem.size)
+       if (bl_size + offset > nvbo->bo.base.size)
                return -ERANGE;
 
        return 0;
@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
                } else {
                        uint32_t size = mode_cmd->pitches[i] * height;
 
-                       if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+                       if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
                                return -ERANGE;
                }
        }
index 2f16b52..3474886 100644 (file)
@@ -30,9 +30,9 @@
 struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-       int npages = nvbo->bo.num_pages;
 
-       return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
+       return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
+                                    nvbo->bo.ttm->num_pages);
 }
 
 struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
index 1253fde..b1cd8d7 100644 (file)
@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
        struct nv10_fence_chan *fctx;
        struct ttm_resource *reg = &priv->bo->bo.mem;
        u32 start = reg->start * PAGE_SIZE;
-       u32 limit = start + reg->size - 1;
+       u32 limit = start + priv->bo->bo.base.size - 1;
        int ret = 0;
 
        fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
index 447238e..1625826 100644 (file)
@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
        struct nv10_fence_chan *fctx;
        struct ttm_resource *reg = &priv->bo->bo.mem;
        u32 start = reg->start * PAGE_SIZE;
-       u32 limit = start + reg->size - 1;
+       u32 limit = start + priv->bo->bo.base.size - 1;
        int ret;
 
        fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
index ebf24c9..e60a8f8 100644 (file)
@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
 
 static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
 {
-       return bo->tbo.num_pages << PAGE_SHIFT;
+       return bo->tbo.base.size;
 }
 
 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
index 21ce2f9..4f35c32 100644 (file)
@@ -401,7 +401,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
        struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 
        /* Sort A before B if A is smaller. */
-       return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+       return (int)la->robj->tbo.mem.num_pages -
+               (int)lb->robj->tbo.mem.num_pages;
 }
 
 /**
index ab81e35..9a99993 100644 (file)
@@ -54,20 +54,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
                                       unsigned mem_type, int sign)
 {
        struct radeon_device *rdev = bo->rdev;
-       u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
 
        switch (mem_type) {
        case TTM_PL_TT:
                if (sign > 0)
-                       atomic64_add(size, &rdev->gtt_usage);
+                       atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
                else
-                       atomic64_sub(size, &rdev->gtt_usage);
+                       atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
                break;
        case TTM_PL_VRAM:
                if (sign > 0)
-                       atomic64_add(size, &rdev->vram_usage);
+                       atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
                else
-                       atomic64_sub(size, &rdev->vram_usage);
+                       atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
                break;
        }
 }
@@ -256,7 +255,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
                }
                return 0;
        }
-       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
        if (r) {
                return r;
        }
@@ -610,7 +609,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 out:
        radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
                               bo->tbo.mem.start << PAGE_SHIFT,
-                              bo->tbo.num_pages << PAGE_SHIFT);
+                              bo->tbo.base.size);
        return 0;
 }
 
index d606e9a..9896d82 100644 (file)
@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
 
 static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
 {
-       return bo->tbo.num_pages << PAGE_SHIFT;
+       return bo->tbo.base.size;
 }
 
 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
 {
-       return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+       return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
 }
 
 static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
index 088d39a..0a09dba 100644 (file)
@@ -34,9 +34,9 @@
 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct radeon_bo *bo = gem_to_radeon_bo(obj);
-       int npages = bo->tbo.num_pages;
 
-       return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
+       return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
+                                    bo->tbo.ttm->num_pages);
 }
 
 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
index c93f3ab..1729cb9 100644 (file)
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
 
            TP_fast_assign(
                           __entry->bo = bo;
-                          __entry->pages = bo->tbo.num_pages;
+                          __entry->pages = bo->tbo.mem.num_pages;
                           ),
            TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
 );
index a3432c6..ea365ac 100644 (file)
@@ -274,7 +274,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 
 out:
        /* update statistics */
-       atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+       atomic64_add(bo->base.size, &rdev->num_bytes_moved);
        radeon_bo_move_notify(bo, evict, new_mem);
        return 0;
 }
index 02cc5d2..2196e3d 100644 (file)
@@ -72,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
        struct ttm_resource_manager *man;
        int i, mem_type;
 
-       drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
-                  bo, bo->mem.num_pages, bo->mem.size >> 10,
-                  bo->mem.size >> 20);
+       drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
+                  bo, bo->mem.num_pages, bo->base.size >> 10,
+                  bo->base.size >> 20);
        for (i = 0; i < placement->num_placement; i++) {
                mem_type = placement->placement[i].mem_type;
                drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
@@ -268,7 +268,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                goto out_err;
        }
 
-       ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
+       ctx->bytes_moved += bo->base.size;
        return 0;
 
 out_err:
@@ -985,8 +985,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 
        memset(&hop, 0, sizeof(hop));
 
-       mem.num_pages = bo->num_pages;
-       mem.size = mem.num_pages << PAGE_SHIFT;
+       mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
        mem.bus.offset = 0;
        mem.bus.addr = NULL;
@@ -1102,7 +1101,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
 
 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                         struct ttm_buffer_object *bo,
-                        unsigned long size,
+                        size_t size,
                         enum ttm_bo_type type,
                         struct ttm_placement *placement,
                         uint32_t page_alignment,
@@ -1113,9 +1112,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                         void (*destroy) (struct ttm_buffer_object *))
 {
        struct ttm_mem_global *mem_glob = &ttm_mem_glob;
-       int ret = 0;
-       unsigned long num_pages;
        bool locked;
+       int ret = 0;
 
        ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
        if (ret) {
@@ -1127,16 +1125,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                return -ENOMEM;
        }
 
-       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (num_pages == 0) {
-               pr_err("Illegal buffer object size\n");
-               if (destroy)
-                       (*destroy)(bo);
-               else
-                       kfree(bo);
-               ttm_mem_global_free(mem_glob, acc_size);
-               return -EINVAL;
-       }
        bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
        kref_init(&bo->kref);
@@ -1145,10 +1133,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
        INIT_LIST_HEAD(&bo->swap);
        bo->bdev = bdev;
        bo->type = type;
-       bo->num_pages = num_pages;
-       bo->mem.size = num_pages << PAGE_SHIFT;
        bo->mem.mem_type = TTM_PL_SYSTEM;
-       bo->mem.num_pages = bo->num_pages;
+       bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        bo->mem.mm_node = NULL;
        bo->mem.page_alignment = page_alignment;
        bo->mem.bus.offset = 0;
@@ -1166,9 +1152,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
        }
        if (!ttm_bo_uses_embedded_gem_object(bo)) {
                /*
-                * bo.gem is not initialized, so we have to setup the
+                * bo.base is not initialized, so we have to setup the
                 * struct elements we want use regardless.
                 */
+               bo->base.size = size;
                dma_resv_init(&bo->base._resv);
                drm_vma_node_reset(&bo->base.vma_node);
        }
@@ -1210,7 +1197,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
 
 int ttm_bo_init(struct ttm_bo_device *bdev,
                struct ttm_buffer_object *bo,
-               unsigned long size,
+               size_t size,
                enum ttm_bo_type type,
                struct ttm_placement *placement,
                uint32_t page_alignment,
index 5bbc133..398d501 100644 (file)
@@ -431,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 
        map->virtual = NULL;
        map->bo = bo;
-       if (num_pages > bo->num_pages)
+       if (num_pages > bo->mem.num_pages)
                return -EINVAL;
-       if (start_page > bo->num_pages)
+       if ((start_page + num_pages) > bo->mem.num_pages)
                return -EINVAL;
 
        ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
@@ -485,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 
        if (mem->bus.is_iomem) {
                void __iomem *vaddr_iomem;
-               size_t size = bo->num_pages << PAGE_SHIFT;
 
                if (mem->bus.addr)
                        vaddr_iomem = (void __iomem *)mem->bus.addr;
                else if (mem->bus.caching == ttm_write_combined)
-                       vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+                       vaddr_iomem = ioremap_wc(mem->bus.offset,
+                                                bo->base.size);
                else
-                       vaddr_iomem = ioremap(mem->bus.offset, size);
+                       vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
 
                if (!vaddr_iomem)
                        return -ENOMEM;
@@ -517,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
                 * or to make the buffer object look contiguous.
                 */
                prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
-               vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+               vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
                if (!vaddr)
                        return -ENOMEM;
 
index 144a494..6dc96cf 100644 (file)
@@ -198,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
 
        /* Fault should not cross bo boundary. */
        page_offset &= ~(fault_page_size - 1);
-       if (page_offset + fault_page_size > bo->num_pages)
+       if (page_offset + fault_page_size > bo->mem.num_pages)
                goto out_fallback;
 
        if (bo->mem.bus.is_iomem)
@@ -306,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
        page_last = vma_pages(vma) + vma->vm_pgoff -
                drm_vma_node_start(&bo->base.vma_node);
 
-       if (unlikely(page_offset >= bo->num_pages))
+       if (unlikely(page_offset >= bo->mem.num_pages))
                return VM_FAULT_SIGBUS;
 
        prot = ttm_io_prot(bo, &bo->mem, prot);
@@ -469,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
                 << PAGE_SHIFT);
        int ret;
 
-       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
                return -EIO;
 
        ret = ttm_bo_reserve(bo, true, false, NULL);
index 77ba784..7f75a13 100644 (file)
@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
                               uint32_t page_flags,
                               enum ttm_caching caching)
 {
-       ttm->num_pages = bo->num_pages;
+       ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
        ttm->caching = ttm_cached;
        ttm->page_flags = page_flags;
        ttm->dma_address = NULL;
index f21881e..9f2779d 100644 (file)
@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
        d.src_addr = NULL;
        d.dst_pages = dst->ttm->pages;
        d.src_pages = src->ttm->pages;
-       d.dst_num_pages = dst->num_pages;
-       d.src_num_pages = src->num_pages;
+       d.dst_num_pages = dst->mem.num_pages;
+       d.src_num_pages = src->mem.num_pages;
        d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
        d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
        d.diff = diff;
index 263d76a..63dbc44 100644 (file)
@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
        uint32_t new_flags;
 
        place = vmw_vram_placement.placement[0];
-       place.lpfn = bo->num_pages;
+       place.lpfn = bo->mem.num_pages;
        placement.num_placement = 1;
        placement.placement = &place;
        placement.num_busy_placement = 1;
@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
         * that situation.
         */
        if (bo->mem.mem_type == TTM_PL_VRAM &&
-           bo->mem.start < bo->num_pages &&
+           bo->mem.start < bo->mem.num_pages &&
            bo->mem.start > 0 &&
            buf->base.pin_count == 0) {
                ctx.interruptible = false;
@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
        if (virtual)
                return virtual;
 
-       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+       ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
        if (ret)
                DRM_ERROR("Buffer object map failed: %d.\n", ret);
 
index 984d888..a077e42 100644 (file)
@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
         * Do a page by page copy of COTables. This eliminates slow vmap()s.
         * This should really be a TTM utility.
         */
-       for (i = 0; i < old_bo->num_pages; ++i) {
+       for (i = 0; i < old_bo->mem.num_pages; ++i) {
                bool dummy;
 
                ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
index e67e2e8..6c01600 100644 (file)
@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
        if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-               if (unlikely(new_query_bo->base.num_pages > 4)) {
+               if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
                        VMW_DEBUG_USER("Query buffer too large.\n");
                        return -EINVAL;
                }
@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                return ret;
 
        /* Make sure DMA doesn't cross BO boundaries. */
-       bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+       bo_size = vmw_bo->base.base.size;
        if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
                VMW_DEBUG_USER("Invalid DMA offset.\n");
                return -EINVAL;
index 8fe26e3..1774960 100644 (file)
@@ -64,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
        spin_lock(&gman->lock);
 
        if (gman->max_gmr_pages > 0) {
-               gman->used_gmr_pages += bo->num_pages;
+               gman->used_gmr_pages += mem->num_pages;
                if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
                        goto nospace;
        }
 
        mem->mm_node = gman;
        mem->start = id;
-       mem->num_pages = bo->num_pages;
 
        spin_unlock(&gman->lock);
        return 0;
 
 nospace:
-       gman->used_gmr_pages -= bo->num_pages;
+       gman->used_gmr_pages -= mem->num_pages;
        spin_unlock(&gman->lock);
        ida_free(&gman->gmr_ida, id);
        return -ENOSPC;
index bc67f2b..7dc9612 100644 (file)
@@ -1220,7 +1220,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
        int ret;
 
        requested_size = mode_cmd->height * mode_cmd->pitches[0];
-       if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
+       if (unlikely(requested_size > bo->base.base.size)) {
                DRM_ERROR("Screen buffer object size is too small "
                          "for requested mode.\n");
                return -EINVAL;
index 0b76b3d..0a900af 100644 (file)
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
 {
        struct vmw_bo_dirty *dirty = vbo->dirty;
-       pgoff_t num_pages = vbo->base.num_pages;
+       pgoff_t num_pages = vbo->base.mem.num_pages;
        size_t size, acc_size;
        int ret;
        static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
                return ret;
 
        page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
-       if (unlikely(page_offset >= bo->num_pages)) {
+       if (unlikely(page_offset >= bo->mem.num_pages)) {
                ret = VM_FAULT_SIGBUS;
                goto out_unlock;
        }
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
 
                page_offset = vmf->pgoff -
                        drm_vma_node_start(&bo->base.vma_node);
-               if (page_offset >= bo->num_pages ||
+               if (page_offset >= bo->mem.num_pages ||
                    vmw_resources_clean(vbo, page_offset,
                                        page_offset + PAGE_SIZE,
                                        &allowed_prefault)) {
@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
 
                page_offset = vmf->pgoff -
                        drm_vma_node_start(&bo->base.vma_node);
-               if (page_offset >= bo->num_pages ||
+               if (page_offset >= bo->mem.num_pages ||
                    vmw_resources_clean(vbo, page_offset,
                                        page_offset + PAGE_SIZE,
                                        &allowed_prefault)) {
index 00b5358..f6e8fdf 100644 (file)
@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
        int ret;
 
        if (likely(res->backup)) {
-               BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+               BUG_ON(res->backup->base.base.size < size);
                return 0;
        }
 
index f328aa5..e76a720 100644 (file)
@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
                        return ret;
                }
 
-               if ((u64)buffer->base.num_pages * PAGE_SIZE <
-                   (u64)size + (u64)offset) {
+               if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
                        VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
                        ret = -EINVAL;
                        goto out_bad_arg;
index 5b04ec0..27ab2c5 100644 (file)
@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
        cmd->body.host.mipmap = 0;
        cmd->body.transfer = ddirty->transfer;
        suffix->suffixSize = sizeof(*suffix);
-       suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+       suffix->maximumOffset = ddirty->buf->base.base.size;
 
        if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
                blit_size += sizeof(struct vmw_stdu_update);
@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane  *update,
        vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
 
        suffix->suffixSize = sizeof(*suffix);
-       suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+       suffix->maximumOffset = vfbbo->buffer->base.base.size;
 
        vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
                                 bb->y1, bb->y2);
index 3914bfe..fa9be30 100644 (file)
@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                                         &res->backup,
                                         &user_srf->backup_base);
                if (ret == 0) {
-                       if (res->backup->base.num_pages * PAGE_SIZE <
-                           res->backup_size) {
+                       if (res->backup->base.base.size < res->backup_size) {
                                VMW_DEBUG_USER("Surface backup buffer too small.\n");
                                vmw_bo_unreference(&res->backup);
                                ret = -EINVAL;
@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
        if (res->backup) {
                rep->buffer_map_handle =
                        drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
-               rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+               rep->buffer_size = res->backup->base.base.size;
                rep->buffer_handle = backup_handle;
        } else {
                rep->buffer_map_handle = 0;
@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
        rep->crep.buffer_handle = backup_handle;
        rep->crep.buffer_map_handle =
                drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
-       rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+       rep->crep.buffer_size = srf->res.backup->base.base.size;
 
        rep->creq.version = drm_vmw_gb_surface_v1;
        rep->creq.svga3d_flags_upper_32_bits =
index 79b9367..0d4e3fc 100644 (file)
@@ -125,7 +125,6 @@ struct ttm_buffer_object {
        struct ttm_bo_device *bdev;
        enum ttm_bo_type type;
        void (*destroy) (struct ttm_buffer_object *);
-       unsigned long num_pages;
        size_t acc_size;
 
        /**
@@ -397,13 +396,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
 
 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                         struct ttm_buffer_object *bo,
-                        unsigned long size,
-                        enum ttm_bo_type type,
+                        size_t size, enum ttm_bo_type type,
                         struct ttm_placement *placement,
                         uint32_t page_alignment,
                         struct ttm_operation_ctx *ctx,
-                        size_t acc_size,
-                        struct sg_table *sg,
+                        size_t acc_size, struct sg_table *sg,
                         struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *));
 
@@ -445,7 +442,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
  * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
  */
 int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
-               unsigned long size, enum ttm_bo_type type,
+               size_t size, enum ttm_bo_type type,
                struct ttm_placement *placement,
                uint32_t page_alignment, bool interrubtible, size_t acc_size,
                struct sg_table *sg, struct dma_resv *resv,
index f48a70d..ad6da99 100644 (file)
@@ -171,7 +171,6 @@ struct ttm_bus_placement {
 struct ttm_resource {
        void *mm_node;
        unsigned long start;
-       unsigned long size;
        unsigned long num_pages;
        uint32_t page_alignment;
        uint32_t mem_type;