OSDN Git Service

clk: at91: fix masterck name
[uclinux-h8/linux.git] / drivers / gpu / drm / vkms / vkms_gem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License as published by
5  * the Free Software Foundation; either version 2 of the License, or
6  * (at your option) any later version.
7  */
8
9 #include <linux/shmem_fs.h>
10
11 #include "vkms_drv.h"
12
13 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
14                                                  u64 size)
15 {
16         struct vkms_gem_object *obj;
17         int ret;
18
19         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
20         if (!obj)
21                 return ERR_PTR(-ENOMEM);
22
23         size = roundup(size, PAGE_SIZE);
24         ret = drm_gem_object_init(dev, &obj->gem, size);
25         if (ret) {
26                 kfree(obj);
27                 return ERR_PTR(ret);
28         }
29
30         mutex_init(&obj->pages_lock);
31
32         return obj;
33 }
34
35 void vkms_gem_free_object(struct drm_gem_object *obj)
36 {
37         struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
38                                                    gem);
39
40         WARN_ON(gem->pages);
41         WARN_ON(gem->vaddr);
42
43         mutex_destroy(&gem->pages_lock);
44         drm_gem_object_release(obj);
45         kfree(gem);
46 }
47
48 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
49 {
50         struct vm_area_struct *vma = vmf->vma;
51         struct vkms_gem_object *obj = vma->vm_private_data;
52         unsigned long vaddr = vmf->address;
53         pgoff_t page_offset;
54         loff_t num_pages;
55         vm_fault_t ret = VM_FAULT_SIGBUS;
56
57         page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
58         num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
59
60         if (page_offset > num_pages)
61                 return VM_FAULT_SIGBUS;
62
63         mutex_lock(&obj->pages_lock);
64         if (obj->pages) {
65                 get_page(obj->pages[page_offset]);
66                 vmf->page = obj->pages[page_offset];
67                 ret = 0;
68         }
69         mutex_unlock(&obj->pages_lock);
70         if (ret) {
71                 struct page *page;
72                 struct address_space *mapping;
73
74                 mapping = file_inode(obj->gem.filp)->i_mapping;
75                 page = shmem_read_mapping_page(mapping, page_offset);
76
77                 if (!IS_ERR(page)) {
78                         vmf->page = page;
79                         ret = 0;
80                 } else {
81                         switch (PTR_ERR(page)) {
82                         case -ENOSPC:
83                         case -ENOMEM:
84                                 ret = VM_FAULT_OOM;
85                                 break;
86                         case -EBUSY:
87                                 ret = VM_FAULT_RETRY;
88                                 break;
89                         case -EFAULT:
90                         case -EINVAL:
91                                 ret = VM_FAULT_SIGBUS;
92                                 break;
93                         default:
94                                 WARN_ON(PTR_ERR(page));
95                                 ret = VM_FAULT_SIGBUS;
96                                 break;
97                         }
98                 }
99         }
100         return ret;
101 }
102
103 struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
104                                        struct drm_file *file,
105                                        u32 *handle,
106                                        u64 size)
107 {
108         struct vkms_gem_object *obj;
109         int ret;
110
111         if (!file || !dev || !handle)
112                 return ERR_PTR(-EINVAL);
113
114         obj = __vkms_gem_create(dev, size);
115         if (IS_ERR(obj))
116                 return ERR_CAST(obj);
117
118         ret = drm_gem_handle_create(file, &obj->gem, handle);
119         drm_gem_object_put_unlocked(&obj->gem);
120         if (ret) {
121                 drm_gem_object_release(&obj->gem);
122                 kfree(obj);
123                 return ERR_PTR(ret);
124         }
125
126         return &obj->gem;
127 }
128
129 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
130                      struct drm_mode_create_dumb *args)
131 {
132         struct drm_gem_object *gem_obj;
133         u64 pitch, size;
134
135         if (!args || !dev || !file)
136                 return -EINVAL;
137
138         pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
139         size = pitch * args->height;
140
141         if (!size)
142                 return -EINVAL;
143
144         gem_obj = vkms_gem_create(dev, file, &args->handle, size);
145         if (IS_ERR(gem_obj))
146                 return PTR_ERR(gem_obj);
147
148         args->size = gem_obj->size;
149         args->pitch = pitch;
150
151         DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
152
153         return 0;
154 }
155
156 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
157 {
158         struct drm_gem_object *gem_obj = &vkms_obj->gem;
159
160         if (!vkms_obj->pages) {
161                 struct page **pages = drm_gem_get_pages(gem_obj);
162
163                 if (IS_ERR(pages))
164                         return pages;
165
166                 if (cmpxchg(&vkms_obj->pages, NULL, pages))
167                         drm_gem_put_pages(gem_obj, pages, false, true);
168         }
169
170         return vkms_obj->pages;
171 }
172
173 void vkms_gem_vunmap(struct drm_gem_object *obj)
174 {
175         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
176
177         mutex_lock(&vkms_obj->pages_lock);
178         if (vkms_obj->vmap_count < 1) {
179                 WARN_ON(vkms_obj->vaddr);
180                 WARN_ON(vkms_obj->pages);
181                 mutex_unlock(&vkms_obj->pages_lock);
182                 return;
183         }
184
185         vkms_obj->vmap_count--;
186
187         if (vkms_obj->vmap_count == 0) {
188                 vunmap(vkms_obj->vaddr);
189                 vkms_obj->vaddr = NULL;
190                 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
191                 vkms_obj->pages = NULL;
192         }
193
194         mutex_unlock(&vkms_obj->pages_lock);
195 }
196
197 int vkms_gem_vmap(struct drm_gem_object *obj)
198 {
199         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
200         int ret = 0;
201
202         mutex_lock(&vkms_obj->pages_lock);
203
204         if (!vkms_obj->vaddr) {
205                 unsigned int n_pages = obj->size >> PAGE_SHIFT;
206                 struct page **pages = _get_pages(vkms_obj);
207
208                 if (IS_ERR(pages)) {
209                         ret = PTR_ERR(pages);
210                         goto out;
211                 }
212
213                 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
214                 if (!vkms_obj->vaddr)
215                         goto err_vmap;
216         }
217
218         vkms_obj->vmap_count++;
219         goto out;
220
221 err_vmap:
222         ret = -ENOMEM;
223         drm_gem_put_pages(obj, vkms_obj->pages, false, true);
224         vkms_obj->pages = NULL;
225 out:
226         mutex_unlock(&vkms_obj->pages_lock);
227         return ret;
228 }