OSDN Git Service

drm mode : fix memory leak when freeing drmModePropertyPtr
[android-x86/external-libdrm.git] / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 /**
25  * \file amdgpu_device.c
26  *
27  *  Implementation of functions for AMD GPU device
28  *
29  */
30
31 #include <sys/stat.h>
32 #include <errno.h>
33 #include <string.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <fcntl.h>
38
39 #include "xf86drm.h"
40 #include "amdgpu_drm.h"
41 #include "amdgpu_internal.h"
42 #include "util_math.h"
43
44 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
45
46 static pthread_mutex_t dev_mutex = PTHREAD_MUTEX_INITIALIZER;
47 static amdgpu_device_handle dev_list;
48
49 static int fd_compare(int fd1, int fd2)
50 {
51         char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
52         char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
53         int result;
54
55         if (name1 == NULL || name2 == NULL) {
56                 free(name1);
57                 free(name2);
58                 return 0;
59         }
60
61         result = strcmp(name1, name2);
62         free(name1);
63         free(name2);
64
65         return result;
66 }
67
68 /**
69 * Get the authenticated form fd,
70 *
71 * \param   fd   - \c [in]  File descriptor for AMD GPU device
72 * \param   auth - \c [out] Pointer to output the fd is authenticated or not
73 *                          A render node fd, output auth = 0
74 *                          A legacy fd, get the authenticated for compatibility root
75 *
76 * \return   0 on success\n
77 *          >0 - AMD specific error code\n
78 *          <0 - Negative POSIX Error code
79 */
80 static int amdgpu_get_auth(int fd, int *auth)
81 {
82         int r = 0;
83         drm_client_t client = {};
84
85         if (drmGetNodeTypeFromFd(fd) == DRM_NODE_RENDER)
86                 *auth = 0;
87         else {
88                 client.idx = 0;
89                 r = drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client);
90                 if (!r)
91                         *auth = client.auth;
92         }
93         return r;
94 }
95
96 static void amdgpu_device_free_internal(amdgpu_device_handle dev)
97 {
98         amdgpu_device_handle *node = &dev_list;
99
100         pthread_mutex_lock(&dev_mutex);
101         while (*node != dev && (*node)->next)
102                 node = &(*node)->next;
103         *node = (*node)->next;
104         pthread_mutex_unlock(&dev_mutex);
105
106         close(dev->fd);
107         if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
108                 close(dev->flink_fd);
109
110         amdgpu_vamgr_deinit(&dev->vamgr_32);
111         amdgpu_vamgr_deinit(&dev->vamgr);
112         amdgpu_vamgr_deinit(&dev->vamgr_high_32);
113         amdgpu_vamgr_deinit(&dev->vamgr_high);
114         handle_table_fini(&dev->bo_handles);
115         handle_table_fini(&dev->bo_flink_names);
116         pthread_mutex_destroy(&dev->bo_table_mutex);
117         free(dev->marketing_name);
118         free(dev);
119 }
120
121 /**
122  * Assignment between two amdgpu_device pointers with reference counting.
123  *
124  * Usage:
125  *    struct amdgpu_device *dst = ... , *src = ...;
126  *
127  *    dst = src;
128  *    // No reference counting. Only use this when you need to move
129  *    // a reference from one pointer to another.
130  *
131  *    amdgpu_device_reference(&dst, src);
132  *    // Reference counters are updated. dst is decremented and src is
133  *    // incremented. dst is freed if its reference counter is 0.
134  */
135 static void amdgpu_device_reference(struct amdgpu_device **dst,
136                                     struct amdgpu_device *src)
137 {
138         if (update_references(&(*dst)->refcount, &src->refcount))
139                 amdgpu_device_free_internal(*dst);
140         *dst = src;
141 }
142
143 drm_public int amdgpu_device_initialize(int fd,
144                                         uint32_t *major_version,
145                                         uint32_t *minor_version,
146                                         amdgpu_device_handle *device_handle)
147 {
148         struct amdgpu_device *dev;
149         drmVersionPtr version;
150         int r;
151         int flag_auth = 0;
152         int flag_authexist=0;
153         uint32_t accel_working = 0;
154         uint64_t start, max;
155
156         *device_handle = NULL;
157
158         pthread_mutex_lock(&dev_mutex);
159         r = amdgpu_get_auth(fd, &flag_auth);
160         if (r) {
161                 fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
162                         __func__, r);
163                 pthread_mutex_unlock(&dev_mutex);
164                 return r;
165         }
166
167         for (dev = dev_list; dev; dev = dev->next)
168                 if (fd_compare(dev->fd, fd) == 0)
169                         break;
170
171         if (dev) {
172                 r = amdgpu_get_auth(dev->fd, &flag_authexist);
173                 if (r) {
174                         fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
175                                 __func__, r);
176                         pthread_mutex_unlock(&dev_mutex);
177                         return r;
178                 }
179                 if ((flag_auth) && (!flag_authexist)) {
180                         dev->flink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
181                 }
182                 *major_version = dev->major_version;
183                 *minor_version = dev->minor_version;
184                 amdgpu_device_reference(device_handle, dev);
185                 pthread_mutex_unlock(&dev_mutex);
186                 return 0;
187         }
188
189         dev = calloc(1, sizeof(struct amdgpu_device));
190         if (!dev) {
191                 fprintf(stderr, "%s: calloc failed\n", __func__);
192                 pthread_mutex_unlock(&dev_mutex);
193                 return -ENOMEM;
194         }
195
196         dev->fd = -1;
197         dev->flink_fd = -1;
198
199         atomic_set(&dev->refcount, 1);
200
201         version = drmGetVersion(fd);
202         if (version->version_major != 3) {
203                 fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
204                         "only compatible with 3.x.x.\n",
205                         __func__,
206                         version->version_major,
207                         version->version_minor,
208                         version->version_patchlevel);
209                 drmFreeVersion(version);
210                 r = -EBADF;
211                 goto cleanup;
212         }
213
214         dev->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
215         dev->flink_fd = dev->fd;
216         dev->major_version = version->version_major;
217         dev->minor_version = version->version_minor;
218         drmFreeVersion(version);
219
220         pthread_mutex_init(&dev->bo_table_mutex, NULL);
221
222         /* Check if acceleration is working. */
223         r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
224         if (r) {
225                 fprintf(stderr, "%s: amdgpu_query_info(ACCEL_WORKING) failed (%i)\n",
226                         __func__, r);
227                 goto cleanup;
228         }
229         if (!accel_working) {
230                 fprintf(stderr, "%s: AMDGPU_INFO_ACCEL_WORKING = 0\n", __func__);
231                 r = -EBADF;
232                 goto cleanup;
233         }
234
235         r = amdgpu_query_gpu_info_init(dev);
236         if (r) {
237                 fprintf(stderr, "%s: amdgpu_query_gpu_info_init failed\n", __func__);
238                 goto cleanup;
239         }
240
241         start = dev->dev_info.virtual_address_offset;
242         max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
243         amdgpu_vamgr_init(&dev->vamgr_32, start, max,
244                           dev->dev_info.virtual_address_alignment);
245
246         start = max;
247         max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
248         amdgpu_vamgr_init(&dev->vamgr, start, max,
249                           dev->dev_info.virtual_address_alignment);
250
251         start = dev->dev_info.high_va_offset;
252         max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
253                    0x100000000ULL);
254         amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
255                           dev->dev_info.virtual_address_alignment);
256
257         start = max;
258         max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
259                    0x100000000ULL);
260         amdgpu_vamgr_init(&dev->vamgr_high, start, max,
261                           dev->dev_info.virtual_address_alignment);
262
263         amdgpu_parse_asic_ids(dev);
264
265         *major_version = dev->major_version;
266         *minor_version = dev->minor_version;
267         *device_handle = dev;
268         dev->next = dev_list;
269         dev_list = dev;
270         pthread_mutex_unlock(&dev_mutex);
271
272         return 0;
273
274 cleanup:
275         if (dev->fd >= 0)
276                 close(dev->fd);
277         free(dev);
278         pthread_mutex_unlock(&dev_mutex);
279         return r;
280 }
281
282 drm_public int amdgpu_device_deinitialize(amdgpu_device_handle dev)
283 {
284         amdgpu_device_reference(&dev, NULL);
285         return 0;
286 }
287
288 drm_public const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
289 {
290         return dev->marketing_name;
291 }
292
293 drm_public int amdgpu_query_sw_info(amdgpu_device_handle dev,
294                                     enum amdgpu_sw_info info,
295                                     void *value)
296 {
297         uint32_t *val32 = (uint32_t*)value;
298
299         switch (info) {
300         case amdgpu_sw_info_address32_hi:
301                 if (dev->vamgr_high_32.va_max)
302                         *val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
303                 else
304                         *val32 = (dev->vamgr_32.va_max - 1) >> 32;
305                 return 0;
306         }
307         return -EINVAL;
308 }