1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
33 #include "freedreno_drmif.h"
34 #include "freedreno_priv.h"
36 static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
38 static void bo_del(struct fd_bo *bo);
40 /* set buffer name, and add to table, call w/ table_lock held: */
41 static void set_name(struct fd_bo *bo, uint32_t name)
44 /* add ourself into the handle table: */
45 drmHashInsert(bo->dev->name_table, name, bo);
48 /* lookup a buffer, call w/ table_lock held: */
49 static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
51 struct fd_bo *bo = NULL;
52 if (!drmHashLookup(tbl, key, (void **)&bo)) {
53 /* found, incr refcnt and return: */
56 /* don't break the bucket if this bo was found in one */
57 list_delinit(&bo->list);
62 /* allocate a new buffer object, call w/ table_lock held */
63 static struct fd_bo * bo_from_handle(struct fd_device *dev,
64 uint32_t size, uint32_t handle)
68 bo = dev->funcs->bo_from_handle(dev, size, handle);
70 struct drm_gem_close req = {
73 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
76 bo->dev = fd_device_ref(dev);
79 atomic_set(&bo->refcnt, 1);
80 list_inithead(&bo->list);
81 /* add ourself into the handle table: */
82 drmHashInsert(dev->handle_table, handle, bo);
86 /* Frees older cached buffers. Called under table_lock */
87 drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
91 if (dev->time == time)
94 for (i = 0; i < dev->num_buckets; i++) {
95 struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
98 while (!LIST_IS_EMPTY(&bucket->list)) {
99 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
101 /* keep things in cache for at least 1 second: */
102 if (time && ((time - bo->free_time) <= 1))
113 static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
117 /* hmm, this is what intel does, but I suppose we could calculate our
118 * way to the correct bucket size rather than looping..
120 for (i = 0; i < dev->num_buckets; i++) {
121 struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
122 if (bucket->size >= size) {
130 static int is_idle(struct fd_bo *bo)
132 return fd_bo_cpu_prep(bo, NULL,
133 DRM_FREEDRENO_PREP_READ |
134 DRM_FREEDRENO_PREP_WRITE |
135 DRM_FREEDRENO_PREP_NOSYNC) == 0;
138 static struct fd_bo *find_in_bucket(struct fd_device *dev,
139 struct fd_bo_bucket *bucket, uint32_t flags)
141 struct fd_bo *bo = NULL;
143 /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
144 * skip the busy check.. if it is only going to be a render target
145 * then we probably don't need to stall..
147 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
148 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
150 pthread_mutex_lock(&table_lock);
151 while (!LIST_IS_EMPTY(&bucket->list)) {
152 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
153 if (0 /* TODO: if madvise tells us bo is gone... */) {
159 /* TODO check for compatible flags? */
167 pthread_mutex_unlock(&table_lock);
174 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
176 struct fd_bo *bo = NULL;
177 struct fd_bo_bucket *bucket;
181 size = ALIGN(size, 4096);
182 bucket = get_bucket(dev, size);
184 /* see if we can be green and recycle: */
187 bo = find_in_bucket(dev, bucket, flags);
189 atomic_set(&bo->refcnt, 1);
190 fd_device_ref(bo->dev);
195 ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
199 pthread_mutex_lock(&table_lock);
200 bo = bo_from_handle(dev, size, handle);
202 pthread_mutex_unlock(&table_lock);
208 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
210 struct fd_bo *bo = NULL;
212 pthread_mutex_lock(&table_lock);
214 bo = lookup_bo(dev->handle_table, handle);
218 bo = bo_from_handle(dev, size, handle);
221 pthread_mutex_unlock(&table_lock);
227 fd_bo_from_dmabuf(struct fd_device *dev, int fd)
233 pthread_mutex_lock(&table_lock);
234 ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
239 bo = lookup_bo(dev->handle_table, handle);
243 /* lseek() to get bo size */
244 size = lseek(fd, 0, SEEK_END);
245 lseek(fd, 0, SEEK_CUR);
247 bo = bo_from_handle(dev, size, handle);
251 pthread_mutex_unlock(&table_lock);
256 struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
258 struct drm_gem_open req = {
263 pthread_mutex_lock(&table_lock);
265 /* check name table first, to see if bo is already open: */
266 bo = lookup_bo(dev->name_table, name);
270 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
271 ERROR_MSG("gem-open failed: %s", strerror(errno));
275 bo = lookup_bo(dev->handle_table, req.handle);
279 bo = bo_from_handle(dev, req.size, req.handle);
284 pthread_mutex_unlock(&table_lock);
289 struct fd_bo * fd_bo_ref(struct fd_bo *bo)
291 atomic_inc(&bo->refcnt);
295 void fd_bo_del(struct fd_bo *bo)
297 struct fd_device *dev = bo->dev;
299 if (!atomic_dec_and_test(&bo->refcnt))
307 pthread_mutex_lock(&table_lock);
310 struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);
312 /* see if we can be green and recycle: */
314 struct timespec time;
316 clock_gettime(CLOCK_MONOTONIC, &time);
318 bo->free_time = time.tv_sec;
319 list_addtail(&bo->list, &bucket->list);
320 fd_cleanup_bo_cache(dev, time.tv_sec);
322 /* bo's in the bucket cache don't have a ref and
323 * don't hold a ref to the dev:
332 fd_device_del_locked(dev);
333 pthread_mutex_unlock(&table_lock);
336 /* Called under table_lock */
337 static void bo_del(struct fd_bo *bo)
340 drm_munmap(bo->map, bo->size);
342 /* TODO probably bo's in bucket list get removed from
347 struct drm_gem_close req = {
348 .handle = bo->handle,
350 drmHashDelete(bo->dev->handle_table, bo->handle);
352 drmHashDelete(bo->dev->name_table, bo->name);
353 drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
356 bo->funcs->destroy(bo);
359 int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
362 struct drm_gem_flink req = {
363 .handle = bo->handle,
367 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
372 pthread_mutex_lock(&table_lock);
373 set_name(bo, req.name);
374 pthread_mutex_unlock(&table_lock);
382 uint32_t fd_bo_handle(struct fd_bo *bo)
387 int fd_bo_dmabuf(struct fd_bo *bo)
392 ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
403 uint32_t fd_bo_size(struct fd_bo *bo)
408 void * fd_bo_map(struct fd_bo *bo)
414 ret = bo->funcs->offset(bo, &offset);
419 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
420 bo->dev->fd, offset);
421 if (bo->map == MAP_FAILED) {
422 ERROR_MSG("mmap failed: %s", strerror(errno));
429 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
430 int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
432 return bo->funcs->cpu_prep(bo, pipe, op);
435 void fd_bo_cpu_fini(struct fd_bo *bo)
437 bo->funcs->cpu_fini(bo);