OSDN Git Service

freedreno: move bo-cache to it's own file
authorRob Clark <robclark@freedesktop.org>
Tue, 31 May 2016 14:46:59 +0000 (10:46 -0400)
committerRob Clark <robclark@freedesktop.org>
Wed, 20 Jul 2016 23:42:21 +0000 (19:42 -0400)
Signed-off-by: Rob Clark <robclark@freedesktop.org>
freedreno/Makefile.sources
freedreno/freedreno_bo.c
freedreno/freedreno_bo_cache.c [new file with mode: 0644]
freedreno/freedreno_device.c

index 57a8bf1..68a679b 100644 (file)
@@ -4,6 +4,7 @@ LIBDRM_FREEDRENO_FILES := \
        freedreno_priv.h \
        freedreno_ringbuffer.c \
        freedreno_bo.c \
+       freedreno_bo_cache.c \
        msm/msm_bo.c \
        msm/msm_device.c \
        msm/msm_drm.h \
index da56398..cf2d7cb 100644 (file)
@@ -33,9 +33,8 @@
 #include "freedreno_drmif.h"
 #include "freedreno_priv.h"
 
-static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
-
-static void bo_del(struct fd_bo *bo);
+drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
+drm_private void bo_del(struct fd_bo *bo);
 
 /* set buffer name, and add to table, call w/ table_lock held: */
 static void set_name(struct fd_bo *bo, uint32_t name)
@@ -83,116 +82,6 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
        return bo;
 }
 
-/* Frees older cached buffers.  Called under table_lock */
-drm_private void
-fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
-{
-       int i;
-
-       if (cache->time == time)
-               return;
-
-       for (i = 0; i < cache->num_buckets; i++) {
-               struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
-               struct fd_bo *bo;
-
-               while (!LIST_IS_EMPTY(&bucket->list)) {
-                       bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
-
-                       /* keep things in cache for at least 1 second: */
-                       if (time && ((time - bo->free_time) <= 1))
-                               break;
-
-                       list_del(&bo->list);
-                       bo_del(bo);
-               }
-       }
-
-       cache->time = time;
-}
-
-static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
-{
-       int i;
-
-       /* hmm, this is what intel does, but I suppose we could calculate our
-        * way to the correct bucket size rather than looping..
-        */
-       for (i = 0; i < cache->num_buckets; i++) {
-               struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
-               if (bucket->size >= size) {
-                       return bucket;
-               }
-       }
-
-       return NULL;
-}
-
-static int is_idle(struct fd_bo *bo)
-{
-       return fd_bo_cpu_prep(bo, NULL,
-                       DRM_FREEDRENO_PREP_READ |
-                       DRM_FREEDRENO_PREP_WRITE |
-                       DRM_FREEDRENO_PREP_NOSYNC) == 0;
-}
-
-static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
-{
-       struct fd_bo *bo = NULL;
-
-       /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
-        * skip the busy check.. if it is only going to be a render target
-        * then we probably don't need to stall..
-        *
-        * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
-        * (MRU, since likely to be in GPU cache), rather than head (LRU)..
-        */
-       pthread_mutex_lock(&table_lock);
-       while (!LIST_IS_EMPTY(&bucket->list)) {
-               bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
-               if (0 /* TODO: if madvise tells us bo is gone... */) {
-                       list_del(&bo->list);
-                       bo_del(bo);
-                       bo = NULL;
-                       continue;
-               }
-               /* TODO check for compatible flags? */
-               if (is_idle(bo)) {
-                       list_del(&bo->list);
-                       break;
-               }
-               bo = NULL;
-               break;
-       }
-       pthread_mutex_unlock(&table_lock);
-
-       return bo;
-}
-
-/* NOTE: size is potentially rounded up to bucket size: */
-drm_private struct fd_bo *
-fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
-{
-       struct fd_bo *bo = NULL;
-       struct fd_bo_bucket *bucket;
-
-       *size = ALIGN(*size, 4096);
-       bucket = get_bucket(cache, *size);
-
-       /* see if we can be green and recycle: */
-       if (bucket) {
-               *size = bucket->size;
-               bo = find_in_bucket(bucket, flags);
-               if (bo) {
-                       atomic_set(&bo->refcnt, 1);
-                       fd_device_ref(bo->dev);
-                       return bo;
-               }
-       }
-
-       return NULL;
-}
-
 struct fd_bo *
 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
 {
@@ -303,32 +192,6 @@ struct fd_bo * fd_bo_ref(struct fd_bo *bo)
        return bo;
 }
 
-drm_private int
-fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
-{
-       struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
-
-       /* see if we can be green and recycle: */
-       if (bucket) {
-               struct timespec time;
-
-               clock_gettime(CLOCK_MONOTONIC, &time);
-
-               bo->free_time = time.tv_sec;
-               list_addtail(&bo->list, &bucket->list);
-               fd_bo_cache_cleanup(cache, time.tv_sec);
-
-               /* bo's in the bucket cache don't have a ref and
-                * don't hold a ref to the dev:
-                */
-               fd_device_del_locked(bo->dev);
-
-               return 0;
-       }
-
-       return -1;
-}
-
 void fd_bo_del(struct fd_bo *bo)
 {
        struct fd_device *dev = bo->dev;
@@ -348,7 +211,7 @@ out:
 }
 
 /* Called under table_lock */
-static void bo_del(struct fd_bo *bo)
+drm_private void bo_del(struct fd_bo *bo)
 {
        if (bo->map)
                drm_munmap(bo->map, bo->size);
diff --git a/freedreno/freedreno_bo_cache.c b/freedreno/freedreno_bo_cache.c
new file mode 100644 (file)
index 0000000..17199d2
--- /dev/null
@@ -0,0 +1,205 @@
+/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
+
+/*
+ * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Rob Clark <robclark@freedesktop.org>
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "freedreno_drmif.h"
+#include "freedreno_priv.h"
+
+
+drm_private void bo_del(struct fd_bo *bo);
+drm_private extern pthread_mutex_t table_lock;
+
+static void
+add_bucket(struct fd_bo_cache *cache, int size)
+{
+       unsigned int i = cache->num_buckets;
+
+       assert(i < ARRAY_SIZE(cache->cache_bucket));
+
+       list_inithead(&cache->cache_bucket[i].list);
+       cache->cache_bucket[i].size = size;
+       cache->num_buckets++;
+}
+
+drm_private void
+fd_bo_cache_init(struct fd_bo_cache *cache)
+{
+       unsigned long size, cache_max_size = 64 * 1024 * 1024;
+
+       /* OK, so power of two buckets was too wasteful of memory.
+        * Give 3 other sizes between each power of two, to hopefully
+        * cover things accurately enough.  (The alternative is
+        * probably to just go for exact matching of sizes, and assume
+        * that for things like composited window resize the tiled
+        * width/height alignment and rounding of sizes to pages will
+        * get us useful cache hit rates anyway)
+        */
+       add_bucket(cache, 4096);
+       add_bucket(cache, 4096 * 2);
+       add_bucket(cache, 4096 * 3);
+
+       /* Initialize the linked lists for BO reuse cache. */
+       for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
+               add_bucket(cache, size);
+               add_bucket(cache, size + size * 1 / 4);
+               add_bucket(cache, size + size * 2 / 4);
+               add_bucket(cache, size + size * 3 / 4);
+       }
+}
+
+/* Frees older cached buffers.  Called under table_lock */
+drm_private void
+fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
+{
+       int i;
+
+       if (cache->time == time)
+               return;
+
+       for (i = 0; i < cache->num_buckets; i++) {
+               struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
+               struct fd_bo *bo;
+
+               while (!LIST_IS_EMPTY(&bucket->list)) {
+                       bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
+
+                       /* keep things in cache for at least 1 second: */
+                       if (time && ((time - bo->free_time) <= 1))
+                               break;
+
+                       list_del(&bo->list);
+                       bo_del(bo);
+               }
+       }
+
+       cache->time = time;
+}
+
+static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
+{
+       int i;
+
+       /* hmm, this is what intel does, but I suppose we could calculate our
+        * way to the correct bucket size rather than looping..
+        */
+       for (i = 0; i < cache->num_buckets; i++) {
+               struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
+               if (bucket->size >= size) {
+                       return bucket;
+               }
+       }
+
+       return NULL;
+}
+
+static int is_idle(struct fd_bo *bo)
+{
+       return fd_bo_cpu_prep(bo, NULL,
+                       DRM_FREEDRENO_PREP_READ |
+                       DRM_FREEDRENO_PREP_WRITE |
+                       DRM_FREEDRENO_PREP_NOSYNC) == 0;
+}
+
+static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
+{
+       struct fd_bo *bo = NULL;
+
+       /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
+        * skip the busy check.. if it is only going to be a render target
+        * then we probably don't need to stall..
+        *
+        * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
+        * (MRU, since likely to be in GPU cache), rather than head (LRU)..
+        */
+       pthread_mutex_lock(&table_lock);
+       if (!LIST_IS_EMPTY(&bucket->list)) {
+               bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
+               /* TODO check for compatible flags? */
+               if (is_idle(bo)) {
+                       list_del(&bo->list);
+               } else {
+                       bo = NULL;
+               }
+       }
+       pthread_mutex_unlock(&table_lock);
+
+       return bo;
+}
+
+/* NOTE: size is potentially rounded up to bucket size: */
+drm_private struct fd_bo *
+fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
+{
+       struct fd_bo *bo = NULL;
+       struct fd_bo_bucket *bucket;
+
+       *size = ALIGN(*size, 4096);
+       bucket = get_bucket(cache, *size);
+
+       /* see if we can be green and recycle: */
+       if (bucket) {
+               *size = bucket->size;
+               bo = find_in_bucket(bucket, flags);
+               if (bo) {
+                       atomic_set(&bo->refcnt, 1);
+                       fd_device_ref(bo->dev);
+                       return bo;
+               }
+       }
+
+       return NULL;
+}
+
+drm_private int
+fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
+{
+       struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
+
+       /* see if we can be green and recycle: */
+       if (bucket) {
+               struct timespec time;
+
+               clock_gettime(CLOCK_MONOTONIC, &time);
+
+               bo->free_time = time.tv_sec;
+               list_addtail(&bo->list, &bucket->list);
+               fd_bo_cache_cleanup(cache, time.tv_sec);
+
+               /* bo's in the bucket cache don't have a ref and
+                * don't hold a ref to the dev:
+                */
+               fd_device_del_locked(bo->dev);
+
+               return 0;
+       }
+
+       return -1;
+}
index bd57c24..15e41f0 100644 (file)
@@ -42,44 +42,6 @@ static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
 struct fd_device * kgsl_device_new(int fd);
 struct fd_device * msm_device_new(int fd);
 
-static void
-add_bucket(struct fd_bo_cache *cache, int size)
-{
-       unsigned int i = cache->num_buckets;
-
-       assert(i < ARRAY_SIZE(cache->cache_bucket));
-
-       list_inithead(&cache->cache_bucket[i].list);
-       cache->cache_bucket[i].size = size;
-       cache->num_buckets++;
-}
-
-drm_private void
-fd_bo_cache_init(struct fd_bo_cache *cache)
-{
-       unsigned long size, cache_max_size = 64 * 1024 * 1024;
-
-       /* OK, so power of two buckets was too wasteful of memory.
-        * Give 3 other sizes between each power of two, to hopefully
-        * cover things accurately enough.  (The alternative is
-        * probably to just go for exact matching of sizes, and assume
-        * that for things like composited window resize the tiled
-        * width/height alignment and rounding of sizes to pages will
-        * get us useful cache hit rates anyway)
-        */
-       add_bucket(cache, 4096);
-       add_bucket(cache, 4096 * 2);
-       add_bucket(cache, 4096 * 3);
-
-       /* Initialize the linked lists for BO reuse cache. */
-       for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
-               add_bucket(cache, size);
-               add_bucket(cache, size + size * 1 / 4);
-               add_bucket(cache, size + size * 2 / 4);
-               add_bucket(cache, size + size * 3 / 4);
-       }
-}
-
 struct fd_device * fd_device_new(int fd)
 {
        struct fd_device *dev;