OSDN Git Service

e1e41efabec1d51c4b68b23b682d61d0c936c10d
[uclinux-h8/linux.git] / drivers / gpu / drm / virtio / virtgpu_fb.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <drm/drmP.h>
27 #include <drm/drm_fb_helper.h>
28 #include "virtgpu_drv.h"
29
30 #define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
31
32 static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
33                                    bool store, int x, int y,
34                                    int width, int height)
35 {
36         struct drm_device *dev = fb->base.dev;
37         struct virtio_gpu_device *vgdev = dev->dev_private;
38         bool store_for_later = false;
39         int bpp = fb->base.format->cpp[0];
40         int x2, y2;
41         unsigned long flags;
42         struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
43
44         if ((width <= 0) ||
45             (x + width > fb->base.width) ||
46             (y + height > fb->base.height)) {
47                 DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
48                           width, height, x, y,
49                           fb->base.width, fb->base.height);
50                 return -EINVAL;
51         }
52
53         /*
54          * Can be called with pretty much any context (console output
55          * path).  If we are in atomic just store the dirty rect info
56          * to send out the update later.
57          *
58          * Can't test inside spin lock.
59          */
60         if (in_atomic() || store)
61                 store_for_later = true;
62
63         x2 = x + width - 1;
64         y2 = y + height - 1;
65
66         spin_lock_irqsave(&fb->dirty_lock, flags);
67
68         if (fb->y1 < y)
69                 y = fb->y1;
70         if (fb->y2 > y2)
71                 y2 = fb->y2;
72         if (fb->x1 < x)
73                 x = fb->x1;
74         if (fb->x2 > x2)
75                 x2 = fb->x2;
76
77         if (store_for_later) {
78                 fb->x1 = x;
79                 fb->x2 = x2;
80                 fb->y1 = y;
81                 fb->y2 = y2;
82                 spin_unlock_irqrestore(&fb->dirty_lock, flags);
83                 return 0;
84         }
85
86         fb->x1 = fb->y1 = INT_MAX;
87         fb->x2 = fb->y2 = 0;
88
89         spin_unlock_irqrestore(&fb->dirty_lock, flags);
90
91         {
92                 uint32_t offset;
93                 uint32_t w = x2 - x + 1;
94                 uint32_t h = y2 - y + 1;
95
96                 offset = (y * fb->base.pitches[0]) + x * bpp;
97
98                 virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj,
99                                                    offset,
100                                                    cpu_to_le32(w),
101                                                    cpu_to_le32(h),
102                                                    cpu_to_le32(x),
103                                                    cpu_to_le32(y),
104                                                    NULL);
105
106         }
107         virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
108                                       x, y, x2 - x + 1, y2 - y + 1);
109         return 0;
110 }
111
112 int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
113                              struct drm_clip_rect *clips,
114                              unsigned int num_clips)
115 {
116         struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
117         struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
118         struct drm_clip_rect norect;
119         struct drm_clip_rect *clips_ptr;
120         int left, right, top, bottom;
121         int i;
122         int inc = 1;
123
124         if (!num_clips) {
125                 num_clips = 1;
126                 clips = &norect;
127                 norect.x1 = norect.y1 = 0;
128                 norect.x2 = vgfb->base.width;
129                 norect.y2 = vgfb->base.height;
130         }
131         left = clips->x1;
132         right = clips->x2;
133         top = clips->y1;
134         bottom = clips->y2;
135
136         /* skip the first clip rect */
137         for (i = 1, clips_ptr = clips + inc;
138              i < num_clips; i++, clips_ptr += inc) {
139                 left = min_t(int, left, (int)clips_ptr->x1);
140                 right = max_t(int, right, (int)clips_ptr->x2);
141                 top = min_t(int, top, (int)clips_ptr->y1);
142                 bottom = max_t(int, bottom, (int)clips_ptr->y2);
143         }
144
145         if (obj->dumb)
146                 return virtio_gpu_dirty_update(vgfb, false, left, top,
147                                                right - left, bottom - top);
148
149         virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
150                                       left, top, right - left, bottom - top);
151         return 0;
152 }
153
154 static void virtio_gpu_fb_dirty_work(struct work_struct *work)
155 {
156         struct delayed_work *delayed_work = to_delayed_work(work);
157         struct virtio_gpu_fbdev *vfbdev =
158                 container_of(delayed_work, struct virtio_gpu_fbdev, work);
159         struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
160
161         virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
162                                 vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
163 }
164
165 static void virtio_gpu_3d_fillrect(struct fb_info *info,
166                                    const struct fb_fillrect *rect)
167 {
168         struct virtio_gpu_fbdev *vfbdev = info->par;
169
170         drm_fb_helper_sys_fillrect(info, rect);
171         virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
172                              rect->width, rect->height);
173         schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
174 }
175
176 static void virtio_gpu_3d_copyarea(struct fb_info *info,
177                                    const struct fb_copyarea *area)
178 {
179         struct virtio_gpu_fbdev *vfbdev = info->par;
180
181         drm_fb_helper_sys_copyarea(info, area);
182         virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
183                            area->width, area->height);
184         schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
185 }
186
187 static void virtio_gpu_3d_imageblit(struct fb_info *info,
188                                     const struct fb_image *image)
189 {
190         struct virtio_gpu_fbdev *vfbdev = info->par;
191
192         drm_fb_helper_sys_imageblit(info, image);
193         virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
194                              image->width, image->height);
195         schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
196 }
197
198 static struct fb_ops virtio_gpufb_ops = {
199         .owner = THIS_MODULE,
200         DRM_FB_HELPER_DEFAULT_OPS,
201         .fb_fillrect = virtio_gpu_3d_fillrect,
202         .fb_copyarea = virtio_gpu_3d_copyarea,
203         .fb_imageblit = virtio_gpu_3d_imageblit,
204 };
205
206 static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
207                               struct virtio_gpu_object *obj)
208 {
209         return virtio_gpu_object_kmap(obj, NULL);
210 }
211
212 static int virtio_gpufb_create(struct drm_fb_helper *helper,
213                                struct drm_fb_helper_surface_size *sizes)
214 {
215         struct virtio_gpu_fbdev *vfbdev =
216                 container_of(helper, struct virtio_gpu_fbdev, helper);
217         struct drm_device *dev = helper->dev;
218         struct virtio_gpu_device *vgdev = dev->dev_private;
219         struct fb_info *info;
220         struct drm_framebuffer *fb;
221         struct drm_mode_fb_cmd2 mode_cmd = {};
222         struct virtio_gpu_object *obj;
223         uint32_t resid, format, size;
224         int ret;
225
226         mode_cmd.width = sizes->surface_width;
227         mode_cmd.height = sizes->surface_height;
228         mode_cmd.pitches[0] = mode_cmd.width * 4;
229         mode_cmd.pixel_format = DRM_FORMAT_HOST_XRGB8888;
230
231         format = virtio_gpu_translate_format(mode_cmd.pixel_format);
232         if (format == 0)
233                 return -EINVAL;
234
235         size = mode_cmd.pitches[0] * mode_cmd.height;
236         obj = virtio_gpu_alloc_object(dev, size, false, true);
237         if (IS_ERR(obj))
238                 return PTR_ERR(obj);
239
240         virtio_gpu_resource_id_get(vgdev, &resid);
241         virtio_gpu_cmd_create_resource(vgdev, resid, format,
242                                        mode_cmd.width, mode_cmd.height);
243
244         ret = virtio_gpu_vmap_fb(vgdev, obj);
245         if (ret) {
246                 DRM_ERROR("failed to vmap fb %d\n", ret);
247                 goto err_obj_vmap;
248         }
249
250         /* attach the object to the resource */
251         ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
252         if (ret)
253                 goto err_obj_attach;
254
255         info = drm_fb_helper_alloc_fbi(helper);
256         if (IS_ERR(info)) {
257                 ret = PTR_ERR(info);
258                 goto err_fb_alloc;
259         }
260
261         info->par = helper;
262
263         ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
264                                           &mode_cmd, &obj->gem_base);
265         if (ret)
266                 goto err_fb_alloc;
267
268         fb = &vfbdev->vgfb.base;
269
270         vfbdev->helper.fb = fb;
271
272         strcpy(info->fix.id, "virtiodrmfb");
273         info->fbops = &virtio_gpufb_ops;
274         info->pixmap.flags = FB_PIXMAP_SYSTEM;
275
276         info->screen_buffer = obj->vmap;
277         info->screen_size = obj->gem_base.size;
278         drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
279         drm_fb_helper_fill_var(info, &vfbdev->helper,
280                                sizes->fb_width, sizes->fb_height);
281
282         info->fix.mmio_start = 0;
283         info->fix.mmio_len = 0;
284         return 0;
285
286 err_fb_alloc:
287         virtio_gpu_object_detach(vgdev, obj);
288 err_obj_attach:
289 err_obj_vmap:
290         virtio_gpu_gem_free_object(&obj->gem_base);
291         return ret;
292 }
293
294 static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
295                                     struct virtio_gpu_fbdev *vgfbdev)
296 {
297         struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
298
299         drm_fb_helper_unregister_fbi(&vgfbdev->helper);
300
301         if (vgfb->base.obj[0])
302                 vgfb->base.obj[0] = NULL;
303         drm_fb_helper_fini(&vgfbdev->helper);
304         drm_framebuffer_cleanup(&vgfb->base);
305
306         return 0;
307 }
308 static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
309         .fb_probe = virtio_gpufb_create,
310 };
311
312 int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
313 {
314         struct virtio_gpu_fbdev *vgfbdev;
315         int bpp_sel = 32; /* TODO: parameter from somewhere? */
316         int ret;
317
318         vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
319         if (!vgfbdev)
320                 return -ENOMEM;
321
322         vgfbdev->vgdev = vgdev;
323         vgdev->vgfbdev = vgfbdev;
324         INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
325
326         drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
327                               &virtio_gpu_fb_helper_funcs);
328         ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
329                                  VIRTIO_GPUFB_CONN_LIMIT);
330         if (ret) {
331                 kfree(vgfbdev);
332                 return ret;
333         }
334
335         drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
336         drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
337         return 0;
338 }
339
340 void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
341 {
342         if (!vgdev->vgfbdev)
343                 return;
344
345         virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
346         kfree(vgdev->vgfbdev);
347         vgdev->vgfbdev = NULL;
348 }