if (cache == KGSL_CACHEMODE_WRITEBACK
|| cache == KGSL_CACHEMODE_WRITETHROUGH) {
- struct scatterlist *s;
int i;
unsigned long addr = vma->vm_start;
+ struct kgsl_memdesc *m = &entry->memdesc;
- for_each_sg(entry->memdesc.sgt->sgl, s,
- entry->memdesc.sgt->nents, i) {
- int j;
- for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
- struct page *page = sg_page(s);
- page = nth_page(page, j);
- vm_insert_page(vma, addr, page);
- addr += PAGE_SIZE;
- }
+ for (i = 0; i < m->page_count; i++) {
+ struct page *page = m->pages[i];
+
+ vm_insert_page(vma, addr, page);
+ addr += PAGE_SIZE;
}
}
* @ops: Function hooks for the memdesc memory type
* @flags: Flags set from userspace
* @dev: Pointer to the struct device that owns this memory
- * @memmap: bitmap of pages for mmapsize
- * @memmap_len: Number of bits for memmap
+ * @attrs: dma attributes for this memory
+ * @pages: An array of pointers to allocated pages
+ * @page_count: Total number of pages allocated
*/
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
uint64_t flags;
struct device *dev;
struct dma_attrs attrs;
+ struct page **pages;
+ unsigned int page_count;
};
/*
(unsigned long *) m->useraddr,
m->size, entry->id, flags,
memtype_str(kgsl_memdesc_usermem_type(m)),
- usage, m->sgt->nents, m->mapsize);
+ usage, (m->sgt ? m->sgt->nents : 0), m->mapsize);
if (entry->metadata[0] != 0)
seq_printf(s, " %s", entry->metadata);
uint64_t addr = memdesc->gpuaddr;
uint64_t size = memdesc->size;
unsigned int flags = _get_protection_flags(memdesc);
+ struct sg_table *sgt = NULL;
- ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, memdesc->sgt->sgl,
- memdesc->sgt->nents, flags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
+ sgt->nents, flags);
if (ret)
- return ret;
+ goto done;
ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
if (ret)
_iommu_unmap_sync_pc(pt, memdesc, addr, size);
+done:
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
return ret;
}
{
int pg_sz;
unsigned int protflags = _get_protection_flags(memdesc);
+ int ret;
+ struct sg_table *sgt = NULL;
pg_sz = (1 << kgsl_memdesc_get_align(memdesc));
if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
if (size == 0)
return -EINVAL;
- return _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
- memdesc, memdesc->sgt->sgl, memdesc->sgt->nents,
- physoffset, size, protflags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
+ memdesc, sgt->sgl, sgt->nents,
+ physoffset, size, protflags);
+
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
+ return ret;
}
/* This function must be called with context bank attached */
}
}
+/**
+ * kgsl_pool_free_pages() - Free pages in the pages array
+ * @pages: pointer of the pages array
+ *
+ * Free the pages by collapsing any physical adjacent pages.
+ * Pages are added back to the pool, if pool has sufficient space
+ * otherwise they are given back to system.
+ */
+void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
+{
+ int i;
+
+ if (pages == NULL || pcount == 0)
+ return;
+
+ for (i = 0; i < pcount;) {
+ /*
+ * Free each page or compound page group individually.
+ */
+ struct page *p = pages[i];
+
+ i += 1 << compound_order(p);
+ kgsl_pool_free_page(p);
+ }
+}
static int kgsl_pool_idx_lookup(unsigned int order)
{
int i;
}
void kgsl_pool_free_sgt(struct sg_table *sgt);
+void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
void kgsl_init_page_pools(void);
void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- int i, pgoff;
- struct scatterlist *s = memdesc->sgt->sgl;
+ int pgoff;
unsigned int offset;
offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
pgoff = offset >> PAGE_SHIFT;
- /*
- * The sglist might be comprised of mixed blocks of memory depending
- * on how many 64K pages were allocated. This means we have to do math
- * to find the actual 4K page to map in user space
- */
-
- for (i = 0; i < memdesc->sgt->nents; i++) {
- int npages = s->length >> PAGE_SHIFT;
+ if (pgoff < memdesc->page_count) {
+ struct page *page = memdesc->pages[pgoff];
- if (pgoff < npages) {
- struct page *page = sg_page(s);
+ get_page(page);
+ vmf->page = page;
- page = nth_page(page, pgoff);
+ memdesc->mapsize += PAGE_SIZE;
- get_page(page);
- vmf->page = page;
-
- memdesc->mapsize += PAGE_SIZE;
-
- return 0;
- }
-
- pgoff -= npages;
- s = sg_next(s);
+ return 0;
}
return VM_FAULT_SIGBUS;
for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
memdesc->sgt->nents, 0)
ClearPagePrivate(sg_page_iter_page(&sg_iter));
+
}
- kgsl_pool_free_sgt(memdesc->sgt);
+ /* Free pages using the pages array for non secure paged memory */
+ if (memdesc->pages != NULL)
+ kgsl_pool_free_pages(memdesc->pages, memdesc->page_count);
+ else
+ kgsl_pool_free_sgt(memdesc->sgt);
+
}
/*
return -ENOMEM;
mutex_lock(&kernel_map_global_lock);
- if (!memdesc->hostptr) {
+ if ((!memdesc->hostptr) && (memdesc->pages != NULL)) {
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
- struct page **pages = NULL;
- struct scatterlist *sg;
- int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
- int sglen = memdesc->sgt->nents;
- int i, count = 0;
-
- /* create a list of pages to call vmap */
- pages = kgsl_malloc(npages * sizeof(struct page *));
- if (pages == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- for_each_sg(memdesc->sgt->sgl, sg, sglen, i) {
- struct page *page = sg_page(sg);
- int j;
-
- for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
- pages[count++] = page++;
- }
-
- memdesc->hostptr = vmap(pages, count,
+ memdesc->hostptr = vmap(memdesc->pages, memdesc->page_count,
VM_IOREMAP, page_prot);
if (memdesc->hostptr)
KGSL_STATS_ADD(memdesc->size,
&kgsl_driver.stats.vmalloc_max);
else
ret = -ENOMEM;
- kgsl_free(pages);
}
if (memdesc->hostptr)
memdesc->hostptr_count++;
-done:
+
mutex_unlock(&kernel_map_global_lock);
return ret;
unsigned int j, page_size, len_alloc;
unsigned int pcount = 0;
size_t len;
- struct page **pages = NULL;
unsigned int align;
size = PAGE_ALIGN(size);
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_page_alloc_ops;
- memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (memdesc->sgt == NULL)
- return -ENOMEM;
-
/*
- * Allocate space to store the list of pages to send to vmap. This is an
- * array of pointers so we can track 1024 pages per page of allocation
+ * Allocate space to store the list of pages. This is an array of
+ * pointers so we can track 1024 pages per page of allocation.
+ * Keep this array around for non global non secure buffers that
+ * are allocated by kgsl. This helps with improving the vm fault
+ * routine by finding the faulted page in constant time.
*/
- pages = kgsl_malloc(len_alloc * sizeof(struct page *));
+ memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
- if (pages == NULL) {
+ if (memdesc->pages == NULL) {
ret = -ENOMEM;
goto done;
}
int page_count;
page_count = kgsl_pool_alloc_page(&page_size,
- pages + pcount, len_alloc - pcount,
+ memdesc->pages + pcount,
+ len_alloc - pcount,
&align);
-
if (page_count <= 0) {
if (page_count == -EAGAIN)
continue;
pcount += page_count;
len -= page_size;
memdesc->size += page_size;
+ memdesc->page_count += page_count;
/* Get the needed page size for the next iteration */
page_size = get_page_size(len, align);
}
- ret = sg_alloc_table_from_pages(memdesc->sgt, pages, pcount, 0,
- memdesc->size, GFP_KERNEL);
- if (ret)
- goto done;
-
/* Call to the hypervisor to lock any secure buffer allocations */
if (memdesc->flags & KGSL_MEMFLAGS_SECURE) {
unsigned int i;
int source_vm = VMID_HLOS;
int dest_vm = VMID_CP_PIXEL;
+ memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (memdesc->sgt == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = sg_alloc_table_from_pages(memdesc->sgt, memdesc->pages,
+ memdesc->page_count, 0, memdesc->size, GFP_KERNEL);
+ if (ret) {
+ kfree(memdesc->sgt);
+ goto done;
+ }
+
ret = hyp_assign_table(memdesc->sgt, &source_vm, 1,
&dest_vm, &dest_perms, 1);
- if (ret)
+ if (ret) {
+ sg_free_table(memdesc->sgt);
+ kfree(memdesc->sgt);
+ memdesc->sgt = NULL;
goto done;
+ }
/* Set private bit for each sg to indicate that its secured */
for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i)
KGSL_STATS_ADD(memdesc->size, &kgsl_driver.stats.secure,
&kgsl_driver.stats.secure_max);
+ /*
+ * We don't need the array for secure buffers because they are
+ * not mapped to CPU
+ */
+ kgsl_free(memdesc->pages);
+ memdesc->pages = NULL;
+ memdesc->page_count = 0;
+
/* Don't map and zero the locked secure buffer */
goto done;
}
done:
if (ret) {
- if (pages) {
+ if (memdesc->pages) {
unsigned int count = 1;
for (j = 0; j < pcount; j += count) {
- count = 1 << compound_order(pages[j]);
- kgsl_pool_free_page(pages[j]);
+ count = 1 << compound_order(memdesc->pages[j]);
+ kgsl_pool_free_page(memdesc->pages[j]);
}
}
- kfree(memdesc->sgt);
+ kgsl_free(memdesc->pages);
memset(memdesc, 0, sizeof(*memdesc));
}
- kgsl_free(pages);
return ret;
}
kfree(memdesc->sgt);
}
+ if (memdesc->pages)
+ kgsl_free(memdesc->pages);
+
memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);
-/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
void kgsl_sharedmem_set_noretry(bool val);
bool kgsl_sharedmem_get_noretry(void);
+/**
+ * kgsl_alloc_sgt_from_pages() - Allocate a sg table
+ *
+ * @memdesc: memory descriptor of the allocation
+ *
+ * Allocate and return pointer to a sg table
+ */
+static inline struct sg_table *kgsl_alloc_sgt_from_pages(
+ struct kgsl_memdesc *m)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (sgt == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sgt, m->pages, m->page_count, 0,
+ m->size, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
+
+/**
+ * kgsl_free_sgt() - Free a sg table structure
+ *
+ * @sgt: sg table pointer to be freed
+ *
+ * Free the sg table allocated using sgt and free the
+ * sgt structure itself
+ */
+static inline void kgsl_free_sgt(struct sg_table *sgt)
+{
+ if (sgt != NULL) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
#endif /* __KGSL_SHAREDMEM_H */