From 7760a2e38a8324688e83b91f91ff7be710e70db1 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 1 Nov 2017 03:56:19 +1000 Subject: [PATCH] drm/nouveau: split various bo flags out into their own members It's far more convenient to deal with like this. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 71 +++++++++++++++++++--------------- drivers/gpu/drm/nouveau/nouveau_bo.h | 10 +++-- drivers/gpu/drm/nouveau/nouveau_gem.c | 11 +++++- drivers/gpu/drm/nouveau/nouveau_gem.h | 3 -- drivers/gpu/drm/nouveau/nouveau_ttm.c | 14 +++---- drivers/gpu/drm/nouveau/nv50_display.c | 8 ++-- 6 files changed, 66 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index e1eb8ba781fe..40a8c6589f3d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -97,7 +97,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, static struct nouveau_drm_tile * nv10_bo_set_tiling(struct drm_device *dev, u32 addr, - u32 size, u32 pitch, u32 flags) + u32 size, u32 pitch, u32 zeta) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_fb *fb = nvxx_fb(&drm->client.device); @@ -120,8 +120,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, } if (found) - nv10_bo_update_tile_region(dev, found, addr, size, - pitch, flags); + nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta); return found; } @@ -155,27 +154,27 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, struct nvif_device *device = &drm->client.device; if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { - if (nvbo->tile_mode) { + if (nvbo->mode) { if (device->info.chipset >= 0x40) { *align = 65536; - *size = roundup_64(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); } else if (device->info.chipset >= 0x30) { *align = 32768; - *size = roundup_64(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); } else if (device->info.chipset >= 0x20) { *align = 16384; - *size = roundup_64(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); } else if (device->info.chipset >= 0x10) { *align = 16384; - *size = roundup_64(*size, 32 * nvbo->tile_mode); + *size = roundup_64(*size, 32 * nvbo->mode); } } } else { - *size = roundup_64(*size, (1 << nvbo->page_shift)); - *align = max((1 << nvbo->page_shift), *align); + *size = roundup_64(*size, (1 << nvbo->page)); + *align = max((1 << nvbo->page), *align); } *size = roundup_64(*size, PAGE_SIZE); @@ -207,18 +206,34 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); - nvbo->tile_mode = tile_mode; - nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &drm->ttm.bdev; nvbo->cli = cli; if (!nvxx_device(&drm->client.device)->func->cpu_coherent) nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; - nvbo->page_shift = 12; + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { + nvbo->kind = (tile_flags & 0x0000ff00) >> 8; + nvbo->comp = gf100_pte_storage_type_map[nvbo->kind] != nvbo->kind; + } else + if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + nvbo->kind = (tile_flags & 0x00007f00) >> 8; + nvbo->comp = (tile_flags & 0x00030000) >> 16; + } else { + nvbo->zeta = (tile_flags & 0x00000007); + } + nvbo->mode = tile_mode; + nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); + + nvbo->page = 12; if (drm->client.vm) { if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) - nvbo->page_shift = drm->client.vm->mmu->lpg_shift; + nvbo->page = drm->client.vm->mmu->lpg_shift; + else { + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) + nvbo->kind = gf100_pte_storage_type_map[nvbo->kind]; + nvbo->comp = 0; + } } nouveau_bo_fixup_align(nvbo, flags, &align, &size); @@ -262,7 +277,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) unsigned i, fpfn, lpfn; if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && - nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->mode && (type & TTM_PL_FLAG_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled @@ -270,7 +285,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) * speed up when alpha-blending and depth-test are enabled * at the same time. */ - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { + if (nvbo->zeta) { fpfn = vram_pages / 2; lpfn = ~0; } else { @@ -321,14 +336,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && memtype == TTM_PL_FLAG_VRAM && contig) { - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { - if (bo->mem.mem_type == TTM_PL_VRAM) { - struct nvkm_mem *mem = bo->mem.mm_node; - if (!nvkm_mm_contiguous(mem->mem)) - evict = true; - } - nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; + if (!nvbo->contig) { + nvbo->contig = true; force = true; + evict = true; } } @@ -376,7 +387,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) out: if (force && ret) - nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG; + nvbo->contig = false; ttm_bo_unreserve(bo); return ret; } @@ -1210,7 +1221,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, list_for_each_entry(vma, &nvbo->vma_list, head) { if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM && (new_reg->mem_type == TTM_PL_VRAM || - nvbo->page_shift != vma->vm->mmu->lpg_shift)) { + nvbo->page != vma->vm->mmu->lpg_shift)) { nvkm_vm_map(vma, new_reg->mm_node); } else { WARN_ON(ttm_bo_wait(bo, false, false)); @@ -1234,8 +1245,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size, - nvbo->tile_mode, - nvbo->tile_flags); + nvbo->mode, nvbo->zeta); } return 0; @@ -1408,7 +1418,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) */ if (bo->mem.mem_type != TTM_PL_VRAM) { if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || - !nouveau_bo_tile_layout(nvbo)) + !nvbo->kind) return 0; if (bo->mem.mem_type == TTM_PL_SYSTEM) { @@ -1596,14 +1606,13 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm, const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; int ret; - ret = nvkm_vm_get(vm, size, nvbo->page_shift, - NV_MEM_ACCESS_RW, vma); + ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma); if (ret) return ret; if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && (nvbo->bo.mem.mem_type == TTM_PL_VRAM || - nvbo->page_shift != vma->vm->mmu->lpg_shift)) + nvbo->page != vma->vm->mmu->lpg_shift)) nvkm_vm_map(vma, nvbo->bo.mem.mm_node); list_add_tail(&vma->head, &nvbo->vma_list); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index b06a5385d6dd..a179bbf25dce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -24,12 +24,16 @@ struct nouveau_bo { bool validate_mapped; struct list_head vma_list; - unsigned page_shift; struct nouveau_cli *cli; - u32 tile_mode; - u32 tile_flags; + unsigned contig:1; + unsigned page:5; + unsigned kind:8; + unsigned comp:3; + unsigned zeta:3; + unsigned mode; + struct nouveau_drm_tile *tile; /* Only valid if allocated via nouveau_gem_new() and iff you hold a diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 23140c4f3ac5..2d924739997a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -246,8 +246,15 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); - rep->tile_mode = nvbo->tile_mode; - rep->tile_flags = nvbo->tile_flags; + rep->tile_mode = nvbo->mode; + rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) + rep->tile_flags |= nvbo->kind << 8; + else + if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) + rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16; + else + rep->tile_flags |= nvbo->zeta; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index 8fa6ed9ddd3a..d39f845dda87 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h @@ -6,9 +6,6 @@ #include "nouveau_drv.h" #include "nouveau_bo.h" -#define nouveau_bo_tile_layout(nvbo) \ - ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) - static inline struct nouveau_bo * nouveau_gem_object(struct drm_gem_object *gem) { diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 65511f320a3a..b8727413db12 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -88,18 +88,18 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, if (drm->client.device.info.ram_size == 0) return -ENOMEM; - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) - size_nc = 1 << nvbo->page_shift; + if (!nvbo->contig) + size_nc = 1 << nvbo->page; ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT, reg->page_alignment << PAGE_SHIFT, size_nc, - (nvbo->tile_flags >> 8) & 0x3ff, &node); + nvbo->comp << 8 | nvbo->kind, &node); if (ret) { reg->mm_node = NULL; return (ret == -ENOSPC) ? 0 : ret; } - node->page_shift = nvbo->page_shift; + node->page_shift = nvbo->page; reg->mm_node = node; reg->start = node->offset >> PAGE_SHIFT; @@ -158,14 +158,12 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, case NV_DEVICE_INFO_V0_CURIE: break; case NV_DEVICE_INFO_V0_TESLA: - if (drm->client.device.info.chipset != 0x50) - node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; - break; case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: case NV_DEVICE_INFO_V0_MAXWELL: case NV_DEVICE_INFO_V0_PASCAL: - node->memtype = (nvbo->tile_flags & 0xff00) >> 8; + if (drm->client.device.info.chipset != 0x50) + node->memtype = nvbo->kind; break; default: NV_WARN(drm, "%s: unhandled family type %x\n", __func__, diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 1f3872f438c9..92d46222c79d 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb) { struct nouveau_drm *drm = nouveau_drm(fb->base.dev); struct nv50_dmac_ctxdma *ctxdma; - const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; + const u8 kind = fb->nvbo->kind; const u32 handle = 0xfb000000 | kind; struct { struct nv_dma_v0 base; @@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, asyw->image.w = fb->base.width; asyw->image.h = fb->base.height; - asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; + asyw->image.kind = fb->nvbo->kind; if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) asyw->interval = 0; @@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, if (asyw->image.kind) { asyw->image.layout = 0; if (drm->client.device.info.chipset >= 0xc0) - asyw->image.block = fb->nvbo->tile_mode >> 4; + asyw->image.block = fb->nvbo->mode >> 4; else - asyw->image.block = fb->nvbo->tile_mode; + asyw->image.block = fb->nvbo->mode; asyw->image.pitch = (fb->base.pitches[0] / 4) << 4; } else { asyw->image.layout = 1; -- 2.11.0