}
/* Frees older cached buffers. Called under table_lock */
-drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
+drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time)
{
int i;
- if (dev->time == time)
+ if (cache->time == time)
return;
- for (i = 0; i < dev->num_buckets; i++) {
- struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
+ for (i = 0; i < cache->num_buckets; i++) {
+ struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
struct fd_bo *bo;
while (!LIST_IS_EMPTY(&bucket->list)) {
}
}
- dev->time = time;
+ cache->time = time;
}
-static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
+static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
{
int i;
/* hmm, this is what intel does, but I suppose we could calculate our
* way to the correct bucket size rather than looping..
*/
- for (i = 0; i < dev->num_buckets; i++) {
- struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
+ for (i = 0; i < cache->num_buckets; i++) {
+ struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
if (bucket->size >= size) {
return bucket;
}
DRM_FREEDRENO_PREP_NOSYNC) == 0;
}
-static struct fd_bo *find_in_bucket(struct fd_device *dev,
- struct fd_bo_bucket *bucket, uint32_t flags)
+static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
{
struct fd_bo *bo = NULL;
int ret;
size = ALIGN(size, 4096);
- bucket = get_bucket(dev, size);
+ bucket = get_bucket(&dev->bo_cache, size);
/* see if we can be green and recycle: */
if (bucket) {
size = bucket->size;
- bo = find_in_bucket(dev, bucket, flags);
+ bo = find_in_bucket(bucket, flags);
if (bo) {
atomic_set(&bo->refcnt, 1);
fd_device_ref(bo->dev);
pthread_mutex_lock(&table_lock);
if (bo->bo_reuse) {
- struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);
+ struct fd_bo_bucket *bucket = get_bucket(&dev->bo_cache, bo->size);
/* see if we can be green and recycle: */
if (bucket) {
bo->free_time = time.tv_sec;
list_addtail(&bo->list, &bucket->list);
- fd_cleanup_bo_cache(dev, time.tv_sec);
+ fd_cleanup_bo_cache(&dev->bo_cache, time.tv_sec);
/* bo's in the bucket cache don't have a ref and
* don't hold a ref to the dev:
struct fd_device * msm_device_new(int fd);
static void
-add_bucket(struct fd_device *dev, int size)
+add_bucket(struct fd_bo_cache *cache, int size)
{
- unsigned int i = dev->num_buckets;
+ unsigned int i = cache->num_buckets;
- assert(i < ARRAY_SIZE(dev->cache_bucket));
+ assert(i < ARRAY_SIZE(cache->cache_bucket));
- list_inithead(&dev->cache_bucket[i].list);
- dev->cache_bucket[i].size = size;
- dev->num_buckets++;
+ list_inithead(&cache->cache_bucket[i].list);
+ cache->cache_bucket[i].size = size;
+ cache->num_buckets++;
}
static void
-init_cache_buckets(struct fd_device *dev)
+fd_bo_cache_init(struct fd_bo_cache *cache)
{
unsigned long size, cache_max_size = 64 * 1024 * 1024;
* width/height alignment and rounding of sizes to pages will
* get us useful cache hit rates anyway)
*/
- add_bucket(dev, 4096);
- add_bucket(dev, 4096 * 2);
- add_bucket(dev, 4096 * 3);
+ add_bucket(cache, 4096);
+ add_bucket(cache, 4096 * 2);
+ add_bucket(cache, 4096 * 3);
/* Initialize the linked lists for BO reuse cache. */
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
- add_bucket(dev, size);
- add_bucket(dev, size + size * 1 / 4);
- add_bucket(dev, size + size * 2 / 4);
- add_bucket(dev, size + size * 3 / 4);
+ add_bucket(cache, size);
+ add_bucket(cache, size + size * 1 / 4);
+ add_bucket(cache, size + size * 2 / 4);
+ add_bucket(cache, size + size * 3 / 4);
}
}
dev->fd = fd;
dev->handle_table = drmHashCreate();
dev->name_table = drmHashCreate();
- init_cache_buckets(dev);
+ fd_bo_cache_init(&dev->bo_cache);
return dev;
}
static void fd_device_del_impl(struct fd_device *dev)
{
- fd_cleanup_bo_cache(dev, 0);
+ fd_cleanup_bo_cache(&dev->bo_cache, 0);
drmHashDestroy(dev->handle_table);
drmHashDestroy(dev->name_table);
if (dev->closefd)
struct list_head list;
};
+struct fd_bo_cache {
+ struct fd_bo_bucket cache_bucket[14 * 4];
+ int num_buckets;
+ time_t time;
+};
+
struct fd_device {
int fd;
atomic_t refcnt;
const struct fd_device_funcs *funcs;
- struct fd_bo_bucket cache_bucket[14 * 4];
- int num_buckets;
- time_t time;
+ struct fd_bo_cache bo_cache;
int closefd; /* call close(fd) upon destruction */
};
-drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time);
+drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time);
/* for where @table_lock is already held: */
drm_private void fd_device_del_locked(struct fd_device *dev);