OSDN Git Service

minigbm: amdgpu: align the stride to 256
[android-x86/external-minigbm.git] / amdgpu.c
1 /*
2  * Copyright 2016 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 #ifdef DRV_AMDGPU
7 #include <amdgpu.h>
8 #include <amdgpu_drm.h>
9 #include <errno.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <xf86drm.h>
15
16 #include "dri.h"
17 #include "drv_priv.h"
18 #include "helpers.h"
19 #include "util.h"
20
21 #ifdef __ANDROID__
22 #define DRI_PATH "/vendor/lib/dri/radeonsi_dri.so"
23 #else
24 #define DRI_PATH "/usr/lib64/dri/radeonsi_dri.so"
25 #endif
26
27 #define TILE_TYPE_LINEAR 0
28 /* DRI backend decides tiling in this case. */
29 #define TILE_TYPE_DRI 1
30
31 struct amdgpu_priv {
32         struct dri_driver dri;
33         int drm_version;
34 };
35
36 const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
37                                                   DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
38                                                   DRM_FORMAT_XRGB8888 };
39
40 const static uint32_t texture_source_formats[] = { DRM_FORMAT_BGR888, DRM_FORMAT_GR88,
41                                                    DRM_FORMAT_R8,     DRM_FORMAT_NV21,
42                                                    DRM_FORMAT_NV12,   DRM_FORMAT_YVU420_ANDROID };
43
44 static int amdgpu_init(struct driver *drv)
45 {
46         struct amdgpu_priv *priv;
47         drmVersionPtr drm_version;
48         struct format_metadata metadata;
49         uint64_t use_flags = BO_USE_RENDER_MASK;
50
51         priv = calloc(1, sizeof(struct amdgpu_priv));
52         if (!priv)
53                 return -ENOMEM;
54
55         drm_version = drmGetVersion(drv_get_fd(drv));
56         if (!drm_version) {
57                 free(priv);
58                 return -ENODEV;
59         }
60
61         priv->drm_version = drm_version->version_minor;
62         drmFreeVersion(drm_version);
63
64         drv->priv = priv;
65
66         if (dri_init(drv, DRI_PATH, "radeonsi")) {
67                 free(priv);
68                 drv->priv = NULL;
69                 return -ENODEV;
70         }
71
72         metadata.tiling = TILE_TYPE_LINEAR;
73         metadata.priority = 1;
74         metadata.modifier = DRM_FORMAT_MOD_LINEAR;
75
76         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
77                              &metadata, use_flags);
78
79         drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
80                              &metadata, BO_USE_TEXTURE_MASK);
81
82         /* Linear formats supported by display. */
83         drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
84         drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
85         drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
86
87         /* YUV formats for camera and display. */
88         drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
89                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
90                                    BO_USE_HW_VIDEO_DECODER);
91
92         drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
93
94         /*
95          * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
96          * from camera.
97          */
98         drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
99                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
100
101         /*
102          * The following formats will be allocated by the DRI backend and may be potentially tiled.
103          * Since format modifier support hasn't been implemented fully yet, it's not
104          * possible to enumerate the different types of buffers (like i915 can).
105          */
106         use_flags &= ~BO_USE_RENDERSCRIPT;
107         use_flags &= ~BO_USE_SW_WRITE_OFTEN;
108         use_flags &= ~BO_USE_SW_READ_OFTEN;
109         use_flags &= ~BO_USE_LINEAR;
110
111         metadata.tiling = TILE_TYPE_DRI;
112         metadata.priority = 2;
113
114         drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
115                              &metadata, use_flags);
116
117         /* Potentially tiled formats supported by display. */
118         drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
119         drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
120         drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
121         return 0;
122 }
123
124 static void amdgpu_close(struct driver *drv)
125 {
126         dri_close(drv);
127         free(drv->priv);
128         drv->priv = NULL;
129 }
130
131 static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
132                             uint64_t use_flags)
133 {
134         int ret;
135         uint32_t plane, stride;
136         struct combination *combo;
137         union drm_amdgpu_gem_create gem_create;
138         struct amdgpu_priv *priv = bo->drv->priv;
139
140         combo = drv_get_combination(bo->drv, format, use_flags);
141         if (!combo)
142                 return -EINVAL;
143
144         if (combo->metadata.tiling == TILE_TYPE_DRI)
145                 return dri_bo_create(bo, width, height, format, use_flags);
146
147         stride = drv_stride_from_format(format, width, 0);
148         stride = ALIGN(stride,256);
149
150         drv_bo_from_format(bo, stride, height, format);
151
152         memset(&gem_create, 0, sizeof(gem_create));
153         gem_create.in.bo_size = bo->total_size;
154         gem_create.in.alignment = 256;
155         gem_create.in.domain_flags = 0;
156
157         if (use_flags & (BO_USE_LINEAR | BO_USE_SW))
158                 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
159
160         gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
161         if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
162                 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
163
164         /* If drm_version >= 21 everything exposes explicit synchronization primitives
165            and chromeos/arc++ will use them. Disable implicit synchronization. */
166         if (priv->drm_version >= 21) {
167                 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
168         }
169
170         /* Allocate the buffer with the preferred heap. */
171         ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
172                                   sizeof(gem_create));
173         if (ret < 0)
174                 return ret;
175
176         for (plane = 0; plane < bo->num_planes; plane++)
177                 bo->handles[plane].u32 = gem_create.out.handle;
178
179         return 0;
180 }
181
182 static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
183 {
184         struct combination *combo;
185         combo = drv_get_combination(bo->drv, data->format, data->use_flags);
186         if (!combo)
187                 return -EINVAL;
188
189         if (combo->metadata.tiling == TILE_TYPE_DRI)
190                 return dri_bo_import(bo, data);
191         else
192                 return drv_prime_bo_import(bo, data);
193 }
194
195 static int amdgpu_destroy_bo(struct bo *bo)
196 {
197         if (bo->priv)
198                 return dri_bo_destroy(bo);
199         else
200                 return drv_gem_bo_destroy(bo);
201 }
202
203 static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
204 {
205         int ret;
206         union drm_amdgpu_gem_mmap gem_map;
207
208         if (bo->priv)
209                 return dri_bo_map(bo, vma, plane, map_flags);
210
211         memset(&gem_map, 0, sizeof(gem_map));
212         gem_map.in.handle = bo->handles[plane].u32;
213
214         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
215         if (ret) {
216                 drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
217                 return MAP_FAILED;
218         }
219
220         vma->length = bo->total_size;
221
222         return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
223                     gem_map.out.addr_ptr);
224 }
225
226 static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
227 {
228         if (bo->priv)
229                 return dri_bo_unmap(bo, vma);
230         else
231                 return munmap(vma->addr, vma->length);
232 }
233
234 static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags)
235 {
236         switch (format) {
237         case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
238                 /* Camera subsystem requires NV12. */
239                 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
240                         return DRM_FORMAT_NV12;
241                 /*HACK: See b/28671744 */
242                 return DRM_FORMAT_XBGR8888;
243         case DRM_FORMAT_FLEX_YCbCr_420_888:
244                 return DRM_FORMAT_NV12;
245         default:
246                 return format;
247         }
248 }
249
250 const struct backend backend_amdgpu = {
251         .name = "amdgpu",
252         .init = amdgpu_init,
253         .close = amdgpu_close,
254         .bo_create = amdgpu_create_bo,
255         .bo_destroy = amdgpu_destroy_bo,
256         .bo_import = amdgpu_import_bo,
257         .bo_map = amdgpu_map_bo,
258         .bo_unmap = amdgpu_unmap_bo,
259         .resolve_format = amdgpu_resolve_format,
260 };
261
262 #endif