5 * \author Gareth Hughes <gareth@valinux.com>
9 * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
31 * DEALINGS IN THE SOFTWARE.
36 # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
37 # define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
39 #define ATI_PCIE_WRITE 0x4
40 #define ATI_PCIE_READ 0x8
42 static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, u32 *pci_gart)
46 page_base = (u32)addr & ATI_PCIGART_PAGE_MASK;
47 switch(gart_info->gart_reg_if) {
48 case DRM_ATI_GART_IGP:
49 page_base |= (upper_32_bits(addr) & 0xff) << 4;
52 case DRM_ATI_GART_PCIE:
54 page_base |= (upper_32_bits(addr) & 0xff) << 24;
55 page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
58 case DRM_ATI_GART_PCI:
61 *pci_gart = cpu_to_le32(page_base);
64 static __inline__ dma_addr_t gart_get_page_from_table(struct drm_ati_pcigart_info *gart_info, u32 *pci_gart)
67 switch(gart_info->gart_reg_if) {
68 case DRM_ATI_GART_IGP:
69 retval = (*pci_gart & ATI_PCIGART_PAGE_MASK);
70 retval += (((*pci_gart & 0xf0) >> 4) << 16) << 16;
72 case DRM_ATI_GART_PCIE:
73 retval = (*pci_gart & ~0xc);
76 case DRM_ATI_GART_PCI:
84 int drm_ati_alloc_pcigart_table(struct drm_device *dev,
85 struct drm_ati_pcigart_info *gart_info)
87 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
89 gart_info->table_mask);
90 if (gart_info->table_handle == NULL)
94 /* IGPs only exist on x86 in any case */
95 if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
96 set_memory_uc(gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT);
99 memset(gart_info->table_handle->vaddr, 0, gart_info->table_size);
102 EXPORT_SYMBOL(drm_ati_alloc_pcigart_table);
104 static void drm_ati_free_pcigart_table(struct drm_device *dev,
105 struct drm_ati_pcigart_info *gart_info)
108 /* IGPs only exist on x86 in any case */
109 if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
110 set_memory_wb(gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT);
112 drm_pci_free(dev, gart_info->table_handle);
113 gart_info->table_handle = NULL;
116 int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
118 struct drm_sg_mem *entry = dev->sg;
123 /* we need to support large memory configurations */
128 if (gart_info->bus_addr) {
130 max_pages = (gart_info->table_size / sizeof(u32));
131 pages = (entry->pages <= max_pages)
132 ? entry->pages : max_pages;
134 for (i = 0; i < pages; i++) {
135 if (!entry->busaddr[i])
137 pci_unmap_page(dev->pdev, entry->busaddr[i],
138 PAGE_SIZE, PCI_DMA_TODEVICE);
141 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
142 gart_info->bus_addr = 0;
146 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
147 && gart_info->table_handle) {
149 drm_ati_free_pcigart_table(dev, gart_info);
154 EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
156 int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
158 struct drm_sg_mem *entry = dev->sg;
159 void *address = NULL;
162 dma_addr_t bus_address = 0;
165 dma_addr_t entry_addr;
168 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && gart_info->table_handle == NULL) {
169 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
171 ret = drm_ati_alloc_pcigart_table(dev, gart_info);
173 DRM_ERROR("cannot allocate PCI GART page!\n");
178 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
179 address = gart_info->table_handle->vaddr;
180 bus_address = gart_info->table_handle->busaddr;
182 address = gart_info->addr;
183 bus_address = gart_info->bus_addr;
187 DRM_ERROR("no scatter/gather memory!\n");
191 pci_gart = (u32 *) address;
193 max_pages = (gart_info->table_size / sizeof(u32));
194 pages = (entry->pages <= max_pages)
195 ? entry->pages : max_pages;
197 for (i = 0; i < pages; i++) {
198 /* we need to support large memory configurations */
199 entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
200 0, PAGE_SIZE, PCI_DMA_TODEVICE);
201 if (entry->busaddr[i] == 0) {
202 DRM_ERROR("unable to map PCIGART pages!\n");
203 drm_ati_pcigart_cleanup(dev, gart_info);
209 entry_addr = entry->busaddr[i];
210 for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
211 gart_insert_page_into_table(gart_info, entry_addr, pci_gart);
213 entry_addr += ATI_PCIGART_PAGE_SIZE;
222 gart_info->addr = address;
223 gart_info->bus_addr = bus_address;
226 EXPORT_SYMBOL(drm_ati_pcigart_init);
228 static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
230 return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
233 static int ati_pcigart_populate(struct drm_ttm_backend *backend,
234 unsigned long num_pages,
236 struct page *dummy_read_page)
238 struct ati_pcigart_ttm_backend *atipci_be =
239 container_of(backend, struct ati_pcigart_ttm_backend, backend);
241 atipci_be->pages = pages;
242 atipci_be->num_pages = num_pages;
243 atipci_be->populated = 1;
247 static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
248 struct drm_bo_mem_reg *bo_mem)
250 struct ati_pcigart_ttm_backend *atipci_be =
251 container_of(backend, struct ati_pcigart_ttm_backend, backend);
254 struct drm_ati_pcigart_info *info = atipci_be->gart_info;
256 dma_addr_t offset = bo_mem->mm_node->start;
257 dma_addr_t page_base;
259 pci_gart = info->addr;
262 while (j < (offset + atipci_be->num_pages)) {
263 if (gart_get_page_from_table(info, pci_gart+j))
268 for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
269 struct page *cur_page = atipci_be->pages[i];
271 page_base = page_to_phys(cur_page);
272 gart_insert_page_into_table(info, page_base, pci_gart + j);
277 atipci_be->gart_flush_fn(atipci_be->dev);
279 atipci_be->bound = 1;
280 atipci_be->offset = offset;
281 /* need to traverse table and add entries */
286 static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
288 struct ati_pcigart_ttm_backend *atipci_be =
289 container_of(backend, struct ati_pcigart_ttm_backend, backend);
290 struct drm_ati_pcigart_info *info = atipci_be->gart_info;
291 unsigned long offset = atipci_be->offset;
294 u32 *pci_gart = info->addr;
296 if (atipci_be->bound != 1)
299 for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
302 atipci_be->gart_flush_fn(atipci_be->dev);
303 atipci_be->bound = 0;
304 atipci_be->offset = 0;
308 static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend)
310 struct ati_pcigart_ttm_backend *atipci_be =
311 container_of(backend, struct ati_pcigart_ttm_backend, backend);
314 if (atipci_be->pages) {
315 backend->func->unbind(backend);
316 atipci_be->pages = NULL;
319 atipci_be->num_pages = 0;
322 static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend)
324 struct ati_pcigart_ttm_backend *atipci_be;
327 atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend);
329 if (atipci_be->pages) {
330 backend->func->clear(backend);
332 drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM);
337 static struct drm_ttm_backend_func ati_pcigart_ttm_backend =
339 .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust,
340 .populate = ati_pcigart_populate,
341 .clear = ati_pcigart_clear_ttm,
342 .bind = ati_pcigart_bind_ttm,
343 .unbind = ati_pcigart_unbind_ttm,
344 .destroy = ati_pcigart_destroy_ttm,
347 struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
349 struct ati_pcigart_ttm_backend *atipci_be;
351 atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM);
355 atipci_be->populated = 0;
356 atipci_be->backend.func = &ati_pcigart_ttm_backend;
357 // atipci_be->backend.mem_type = DRM_BO_MEM_TT;
358 atipci_be->gart_info = info;
359 atipci_be->gart_flush_fn = gart_flush_fn;
360 atipci_be->dev = dev;
362 return &atipci_be->backend;
364 EXPORT_SYMBOL(ati_pcigart_init_ttm);