OSDN Git Service

drm/vmwgfx: Use a per-device semaphore for reservation protection
[android-x86/kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_shader.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
31
32 #define VMW_COMPAT_SHADER_HT_ORDER 12
33
34 struct vmw_shader {
35         struct vmw_resource res;
36         SVGA3dShaderType type;
37         uint32_t size;
38 };
39
40 struct vmw_user_shader {
41         struct ttm_base_object base;
42         struct vmw_shader shader;
43 };
44
45 /**
46  * enum vmw_compat_shader_state - Staging state for compat shaders
47  */
48 enum vmw_compat_shader_state {
49         VMW_COMPAT_COMMITED,
50         VMW_COMPAT_ADD,
51         VMW_COMPAT_DEL
52 };
53
54 /**
55  * struct vmw_compat_shader - Metadata for compat shaders.
56  *
57  * @handle: The TTM handle of the guest backed shader.
58  * @tfile: The struct ttm_object_file the guest backed shader is registered
59  * with.
60  * @hash: Hash item for lookup.
61  * @head: List head for staging lists or the compat shader manager list.
62  * @state: Staging state.
63  *
64  * The structure is protected by the cmdbuf lock.
65  */
66 struct vmw_compat_shader {
67         u32 handle;
68         struct ttm_object_file *tfile;
69         struct drm_hash_item hash;
70         struct list_head head;
71         enum vmw_compat_shader_state state;
72 };
73
74 /**
75  * struct vmw_compat_shader_manager - Compat shader manager.
76  *
77  * @shaders: Hash table containing staged and commited compat shaders
78  * @list: List of commited shaders.
79  * @dev_priv: Pointer to a device private structure.
80  *
81  * @shaders and @list are protected by the cmdbuf mutex for now.
82  */
83 struct vmw_compat_shader_manager {
84         struct drm_open_hash shaders;
85         struct list_head list;
86         struct vmw_private *dev_priv;
87 };
88
89 static void vmw_user_shader_free(struct vmw_resource *res);
90 static struct vmw_resource *
91 vmw_user_shader_base_to_res(struct ttm_base_object *base);
92
93 static int vmw_gb_shader_create(struct vmw_resource *res);
94 static int vmw_gb_shader_bind(struct vmw_resource *res,
95                                struct ttm_validate_buffer *val_buf);
96 static int vmw_gb_shader_unbind(struct vmw_resource *res,
97                                  bool readback,
98                                  struct ttm_validate_buffer *val_buf);
99 static int vmw_gb_shader_destroy(struct vmw_resource *res);
100
101 static uint64_t vmw_user_shader_size;
102
103 static const struct vmw_user_resource_conv user_shader_conv = {
104         .object_type = VMW_RES_SHADER,
105         .base_obj_to_res = vmw_user_shader_base_to_res,
106         .res_free = vmw_user_shader_free
107 };
108
109 const struct vmw_user_resource_conv *user_shader_converter =
110         &user_shader_conv;
111
112
113 static const struct vmw_res_func vmw_gb_shader_func = {
114         .res_type = vmw_res_shader,
115         .needs_backup = true,
116         .may_evict = true,
117         .type_name = "guest backed shaders",
118         .backup_placement = &vmw_mob_placement,
119         .create = vmw_gb_shader_create,
120         .destroy = vmw_gb_shader_destroy,
121         .bind = vmw_gb_shader_bind,
122         .unbind = vmw_gb_shader_unbind
123 };
124
125 /**
126  * Shader management:
127  */
128
129 static inline struct vmw_shader *
130 vmw_res_to_shader(struct vmw_resource *res)
131 {
132         return container_of(res, struct vmw_shader, res);
133 }
134
135 static void vmw_hw_shader_destroy(struct vmw_resource *res)
136 {
137         (void) vmw_gb_shader_destroy(res);
138 }
139
140 static int vmw_gb_shader_init(struct vmw_private *dev_priv,
141                               struct vmw_resource *res,
142                               uint32_t size,
143                               uint64_t offset,
144                               SVGA3dShaderType type,
145                               struct vmw_dma_buffer *byte_code,
146                               void (*res_free) (struct vmw_resource *res))
147 {
148         struct vmw_shader *shader = vmw_res_to_shader(res);
149         int ret;
150
151         ret = vmw_resource_init(dev_priv, res, true,
152                                 res_free, &vmw_gb_shader_func);
153
154
155         if (unlikely(ret != 0)) {
156                 if (res_free)
157                         res_free(res);
158                 else
159                         kfree(res);
160                 return ret;
161         }
162
163         res->backup_size = size;
164         if (byte_code) {
165                 res->backup = vmw_dmabuf_reference(byte_code);
166                 res->backup_offset = offset;
167         }
168         shader->size = size;
169         shader->type = type;
170
171         vmw_resource_activate(res, vmw_hw_shader_destroy);
172         return 0;
173 }
174
175 static int vmw_gb_shader_create(struct vmw_resource *res)
176 {
177         struct vmw_private *dev_priv = res->dev_priv;
178         struct vmw_shader *shader = vmw_res_to_shader(res);
179         int ret;
180         struct {
181                 SVGA3dCmdHeader header;
182                 SVGA3dCmdDefineGBShader body;
183         } *cmd;
184
185         if (likely(res->id != -1))
186                 return 0;
187
188         ret = vmw_resource_alloc_id(res);
189         if (unlikely(ret != 0)) {
190                 DRM_ERROR("Failed to allocate a shader id.\n");
191                 goto out_no_id;
192         }
193
194         if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
195                 ret = -EBUSY;
196                 goto out_no_fifo;
197         }
198
199         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
200         if (unlikely(cmd == NULL)) {
201                 DRM_ERROR("Failed reserving FIFO space for shader "
202                           "creation.\n");
203                 ret = -ENOMEM;
204                 goto out_no_fifo;
205         }
206
207         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
208         cmd->header.size = sizeof(cmd->body);
209         cmd->body.shid = res->id;
210         cmd->body.type = shader->type;
211         cmd->body.sizeInBytes = shader->size;
212         vmw_fifo_commit(dev_priv, sizeof(*cmd));
213         (void) vmw_3d_resource_inc(dev_priv, false);
214
215         return 0;
216
217 out_no_fifo:
218         vmw_resource_release_id(res);
219 out_no_id:
220         return ret;
221 }
222
223 static int vmw_gb_shader_bind(struct vmw_resource *res,
224                               struct ttm_validate_buffer *val_buf)
225 {
226         struct vmw_private *dev_priv = res->dev_priv;
227         struct {
228                 SVGA3dCmdHeader header;
229                 SVGA3dCmdBindGBShader body;
230         } *cmd;
231         struct ttm_buffer_object *bo = val_buf->bo;
232
233         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
234
235         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
236         if (unlikely(cmd == NULL)) {
237                 DRM_ERROR("Failed reserving FIFO space for shader "
238                           "binding.\n");
239                 return -ENOMEM;
240         }
241
242         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
243         cmd->header.size = sizeof(cmd->body);
244         cmd->body.shid = res->id;
245         cmd->body.mobid = bo->mem.start;
246         cmd->body.offsetInBytes = 0;
247         res->backup_dirty = false;
248         vmw_fifo_commit(dev_priv, sizeof(*cmd));
249
250         return 0;
251 }
252
253 static int vmw_gb_shader_unbind(struct vmw_resource *res,
254                                 bool readback,
255                                 struct ttm_validate_buffer *val_buf)
256 {
257         struct vmw_private *dev_priv = res->dev_priv;
258         struct {
259                 SVGA3dCmdHeader header;
260                 SVGA3dCmdBindGBShader body;
261         } *cmd;
262         struct vmw_fence_obj *fence;
263
264         BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
265
266         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
267         if (unlikely(cmd == NULL)) {
268                 DRM_ERROR("Failed reserving FIFO space for shader "
269                           "unbinding.\n");
270                 return -ENOMEM;
271         }
272
273         cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
274         cmd->header.size = sizeof(cmd->body);
275         cmd->body.shid = res->id;
276         cmd->body.mobid = SVGA3D_INVALID_ID;
277         cmd->body.offsetInBytes = 0;
278         vmw_fifo_commit(dev_priv, sizeof(*cmd));
279
280         /*
281          * Create a fence object and fence the backup buffer.
282          */
283
284         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
285                                           &fence, NULL);
286
287         vmw_fence_single_bo(val_buf->bo, fence);
288
289         if (likely(fence != NULL))
290                 vmw_fence_obj_unreference(&fence);
291
292         return 0;
293 }
294
295 static int vmw_gb_shader_destroy(struct vmw_resource *res)
296 {
297         struct vmw_private *dev_priv = res->dev_priv;
298         struct {
299                 SVGA3dCmdHeader header;
300                 SVGA3dCmdDestroyGBShader body;
301         } *cmd;
302
303         if (likely(res->id == -1))
304                 return 0;
305
306         mutex_lock(&dev_priv->binding_mutex);
307         vmw_context_binding_res_list_scrub(&res->binding_head);
308
309         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
310         if (unlikely(cmd == NULL)) {
311                 DRM_ERROR("Failed reserving FIFO space for shader "
312                           "destruction.\n");
313                 mutex_unlock(&dev_priv->binding_mutex);
314                 return -ENOMEM;
315         }
316
317         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
318         cmd->header.size = sizeof(cmd->body);
319         cmd->body.shid = res->id;
320         vmw_fifo_commit(dev_priv, sizeof(*cmd));
321         mutex_unlock(&dev_priv->binding_mutex);
322         vmw_resource_release_id(res);
323         vmw_3d_resource_dec(dev_priv, false);
324
325         return 0;
326 }
327
328 /**
329  * User-space shader management:
330  */
331
332 static struct vmw_resource *
333 vmw_user_shader_base_to_res(struct ttm_base_object *base)
334 {
335         return &(container_of(base, struct vmw_user_shader, base)->
336                  shader.res);
337 }
338
339 static void vmw_user_shader_free(struct vmw_resource *res)
340 {
341         struct vmw_user_shader *ushader =
342                 container_of(res, struct vmw_user_shader, shader.res);
343         struct vmw_private *dev_priv = res->dev_priv;
344
345         ttm_base_object_kfree(ushader, base);
346         ttm_mem_global_free(vmw_mem_glob(dev_priv),
347                             vmw_user_shader_size);
348 }
349
350 /**
351  * This function is called when user space has no more references on the
352  * base object. It releases the base-object's reference on the resource object.
353  */
354
355 static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
356 {
357         struct ttm_base_object *base = *p_base;
358         struct vmw_resource *res = vmw_user_shader_base_to_res(base);
359
360         *p_base = NULL;
361         vmw_resource_unreference(&res);
362 }
363
364 int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
365                               struct drm_file *file_priv)
366 {
367         struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
368         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
369
370         return ttm_ref_object_base_unref(tfile, arg->handle,
371                                          TTM_REF_USAGE);
372 }
373
374 static int vmw_shader_alloc(struct vmw_private *dev_priv,
375                             struct vmw_dma_buffer *buffer,
376                             size_t shader_size,
377                             size_t offset,
378                             SVGA3dShaderType shader_type,
379                             struct ttm_object_file *tfile,
380                             u32 *handle)
381 {
382         struct vmw_user_shader *ushader;
383         struct vmw_resource *res, *tmp;
384         int ret;
385
386         /*
387          * Approximate idr memory usage with 128 bytes. It will be limited
388          * by maximum number_of shaders anyway.
389          */
390         if (unlikely(vmw_user_shader_size == 0))
391                 vmw_user_shader_size =
392                         ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
393
394         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
395                                    vmw_user_shader_size,
396                                    false, true);
397         if (unlikely(ret != 0)) {
398                 if (ret != -ERESTARTSYS)
399                         DRM_ERROR("Out of graphics memory for shader "
400                                   "creation.\n");
401                 goto out;
402         }
403
404         ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
405         if (unlikely(ushader == NULL)) {
406                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
407                                     vmw_user_shader_size);
408                 ret = -ENOMEM;
409                 goto out;
410         }
411
412         res = &ushader->shader.res;
413         ushader->base.shareable = false;
414         ushader->base.tfile = NULL;
415
416         /*
417          * From here on, the destructor takes over resource freeing.
418          */
419
420         ret = vmw_gb_shader_init(dev_priv, res, shader_size,
421                                  offset, shader_type, buffer,
422                                  vmw_user_shader_free);
423         if (unlikely(ret != 0))
424                 goto out;
425
426         tmp = vmw_resource_reference(res);
427         ret = ttm_base_object_init(tfile, &ushader->base, false,
428                                    VMW_RES_SHADER,
429                                    &vmw_user_shader_base_release, NULL);
430
431         if (unlikely(ret != 0)) {
432                 vmw_resource_unreference(&tmp);
433                 goto out_err;
434         }
435
436         if (handle)
437                 *handle = ushader->base.hash.key;
438 out_err:
439         vmw_resource_unreference(&res);
440 out:
441         return ret;
442 }
443
444
445 int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
446                              struct drm_file *file_priv)
447 {
448         struct vmw_private *dev_priv = vmw_priv(dev);
449         struct drm_vmw_shader_create_arg *arg =
450                 (struct drm_vmw_shader_create_arg *)data;
451         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
452         struct vmw_dma_buffer *buffer = NULL;
453         SVGA3dShaderType shader_type;
454         int ret;
455
456         if (arg->buffer_handle != SVGA3D_INVALID_ID) {
457                 ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
458                                              &buffer);
459                 if (unlikely(ret != 0)) {
460                         DRM_ERROR("Could not find buffer for shader "
461                                   "creation.\n");
462                         return ret;
463                 }
464
465                 if ((u64)buffer->base.num_pages * PAGE_SIZE <
466                     (u64)arg->size + (u64)arg->offset) {
467                         DRM_ERROR("Illegal buffer- or shader size.\n");
468                         ret = -EINVAL;
469                         goto out_bad_arg;
470                 }
471         }
472
473         switch (arg->shader_type) {
474         case drm_vmw_shader_type_vs:
475                 shader_type = SVGA3D_SHADERTYPE_VS;
476                 break;
477         case drm_vmw_shader_type_ps:
478                 shader_type = SVGA3D_SHADERTYPE_PS;
479                 break;
480         case drm_vmw_shader_type_gs:
481                 shader_type = SVGA3D_SHADERTYPE_GS;
482                 break;
483         default:
484                 DRM_ERROR("Illegal shader type.\n");
485                 ret = -EINVAL;
486                 goto out_bad_arg;
487         }
488
489         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
490         if (unlikely(ret != 0))
491                 goto out_bad_arg;
492
493         ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
494                                shader_type, tfile, &arg->shader_handle);
495
496         ttm_read_unlock(&dev_priv->reservation_sem);
497 out_bad_arg:
498         vmw_dmabuf_unreference(&buffer);
499         return ret;
500 }
501
502 /**
503  * vmw_compat_shader_lookup - Look up a compat shader
504  *
505  * @man: Pointer to the compat shader manager.
506  * @shader_type: The shader type, that combined with the user_key identifies
507  * the shader.
508  * @user_key: On entry, this should be a pointer to the user_key.
509  * On successful exit, it will contain the guest-backed shader's TTM handle.
510  *
511  * Returns 0 on success. Non-zero on failure, in which case the value pointed
512  * to by @user_key is unmodified.
513  */
514 int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
515                              SVGA3dShaderType shader_type,
516                              u32 *user_key)
517 {
518         struct drm_hash_item *hash;
519         int ret;
520         unsigned long key = *user_key | (shader_type << 24);
521
522         ret = drm_ht_find_item(&man->shaders, key, &hash);
523         if (unlikely(ret != 0))
524                 return ret;
525
526         *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
527                                    hash)->handle;
528
529         return 0;
530 }
531
532 /**
533  * vmw_compat_shader_free - Free a compat shader.
534  *
535  * @man: Pointer to the compat shader manager.
536  * @entry: Pointer to a struct vmw_compat_shader.
537  *
538  * Frees a struct vmw_compat_shder entry and drops its reference to the
539  * guest backed shader.
540  */
541 static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
542                                    struct vmw_compat_shader *entry)
543 {
544         list_del(&entry->head);
545         WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
546         WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
547                                           TTM_REF_USAGE));
548         kfree(entry);
549 }
550
551 /**
552  * vmw_compat_shaders_commit - Commit a list of compat shader actions.
553  *
554  * @man: Pointer to the compat shader manager.
555  * @list: Caller's list of compat shader actions.
556  *
557  * This function commits a list of compat shader additions or removals.
558  * It is typically called when the execbuf ioctl call triggering these
559  * actions has commited the fifo contents to the device.
560  */
561 void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
562                                struct list_head *list)
563 {
564         struct vmw_compat_shader *entry, *next;
565
566         list_for_each_entry_safe(entry, next, list, head) {
567                 list_del(&entry->head);
568                 switch (entry->state) {
569                 case VMW_COMPAT_ADD:
570                         entry->state = VMW_COMPAT_COMMITED;
571                         list_add_tail(&entry->head, &man->list);
572                         break;
573                 case VMW_COMPAT_DEL:
574                         ttm_ref_object_base_unref(entry->tfile, entry->handle,
575                                                   TTM_REF_USAGE);
576                         kfree(entry);
577                         break;
578                 default:
579                         BUG();
580                         break;
581                 }
582         }
583 }
584
585 /**
586  * vmw_compat_shaders_revert - Revert a list of compat shader actions
587  *
588  * @man: Pointer to the compat shader manager.
589  * @list: Caller's list of compat shader actions.
590  *
591  * This function reverts a list of compat shader additions or removals.
592  * It is typically called when the execbuf ioctl call triggering these
593  * actions failed for some reason, and the command stream was never
594  * submitted.
595  */
596 void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
597                                struct list_head *list)
598 {
599         struct vmw_compat_shader *entry, *next;
600         int ret;
601
602         list_for_each_entry_safe(entry, next, list, head) {
603                 switch (entry->state) {
604                 case VMW_COMPAT_ADD:
605                         vmw_compat_shader_free(man, entry);
606                         break;
607                 case VMW_COMPAT_DEL:
608                         ret = drm_ht_insert_item(&man->shaders, &entry->hash);
609                         list_del(&entry->head);
610                         list_add_tail(&entry->head, &man->list);
611                         entry->state = VMW_COMPAT_COMMITED;
612                         break;
613                 default:
614                         BUG();
615                         break;
616                 }
617         }
618 }
619
620 /**
621  * vmw_compat_shader_remove - Stage a compat shader for removal.
622  *
623  * @man: Pointer to the compat shader manager
624  * @user_key: The key that is used to identify the shader. The key is
625  * unique to the shader type.
626  * @shader_type: Shader type.
627  * @list: Caller's list of staged shader actions.
628  *
629  * This function stages a compat shader for removal and removes the key from
630  * the shader manager's hash table. If the shader was previously only staged
631  * for addition it is completely removed (But the execbuf code may keep a
632  * reference if it was bound to a context between addition and removal). If
633  * it was previously commited to the manager, it is staged for removal.
634  */
635 int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
636                              u32 user_key, SVGA3dShaderType shader_type,
637                              struct list_head *list)
638 {
639         struct vmw_compat_shader *entry;
640         struct drm_hash_item *hash;
641         int ret;
642
643         ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
644                                &hash);
645         if (likely(ret != 0))
646                 return -EINVAL;
647
648         entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
649
650         switch (entry->state) {
651         case VMW_COMPAT_ADD:
652                 vmw_compat_shader_free(man, entry);
653                 break;
654         case VMW_COMPAT_COMMITED:
655                 (void) drm_ht_remove_item(&man->shaders, &entry->hash);
656                 list_del(&entry->head);
657                 entry->state = VMW_COMPAT_DEL;
658                 list_add_tail(&entry->head, list);
659                 break;
660         default:
661                 BUG();
662                 break;
663         }
664
665         return 0;
666 }
667
668 /**
669  * vmw_compat_shader_add - Create a compat shader and add the
670  * key to the manager
671  *
672  * @man: Pointer to the compat shader manager
673  * @user_key: The key that is used to identify the shader. The key is
674  * unique to the shader type.
675  * @bytecode: Pointer to the bytecode of the shader.
676  * @shader_type: Shader type.
677  * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
678  * to be created with.
679  * @list: Caller's list of staged shader actions.
680  *
681  * Note that only the key is added to the shader manager's hash table.
682  * The shader is not yet added to the shader manager's list of shaders.
683  */
684 int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
685                           u32 user_key, const void *bytecode,
686                           SVGA3dShaderType shader_type,
687                           size_t size,
688                           struct ttm_object_file *tfile,
689                           struct list_head *list)
690 {
691         struct vmw_dma_buffer *buf;
692         struct ttm_bo_kmap_obj map;
693         bool is_iomem;
694         struct vmw_compat_shader *compat;
695         u32 handle;
696         int ret;
697
698         if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
699                 return -EINVAL;
700
701         /* Allocate and pin a DMA buffer */
702         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
703         if (unlikely(buf == NULL))
704                 return -ENOMEM;
705
706         ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
707                               true, vmw_dmabuf_bo_free);
708         if (unlikely(ret != 0))
709                 goto out;
710
711         ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
712         if (unlikely(ret != 0))
713                 goto no_reserve;
714
715         /* Map and copy shader bytecode. */
716         ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
717                           &map);
718         if (unlikely(ret != 0)) {
719                 ttm_bo_unreserve(&buf->base);
720                 goto no_reserve;
721         }
722
723         memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
724         WARN_ON(is_iomem);
725
726         ttm_bo_kunmap(&map);
727         ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
728         WARN_ON(ret != 0);
729         ttm_bo_unreserve(&buf->base);
730
731         /* Create a guest-backed shader container backed by the dma buffer */
732         ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
733                                tfile, &handle);
734         vmw_dmabuf_unreference(&buf);
735         if (unlikely(ret != 0))
736                 goto no_reserve;
737         /*
738          * Create a compat shader structure and stage it for insertion
739          * in the manager
740          */
741         compat = kzalloc(sizeof(*compat), GFP_KERNEL);
742         if (compat == NULL)
743                 goto no_compat;
744
745         compat->hash.key = user_key |  (shader_type << 24);
746         ret = drm_ht_insert_item(&man->shaders, &compat->hash);
747         if (unlikely(ret != 0))
748                 goto out_invalid_key;
749
750         compat->state = VMW_COMPAT_ADD;
751         compat->handle = handle;
752         compat->tfile = tfile;
753         list_add_tail(&compat->head, list);
754
755         return 0;
756
757 out_invalid_key:
758         kfree(compat);
759 no_compat:
760         ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
761 no_reserve:
762 out:
763         return ret;
764 }
765
766 /**
767  * vmw_compat_shader_man_create - Create a compat shader manager
768  *
769  * @dev_priv: Pointer to a device private structure.
770  *
771  * Typically done at file open time. If successful returns a pointer to a
772  * compat shader manager. Otherwise returns an error pointer.
773  */
774 struct vmw_compat_shader_manager *
775 vmw_compat_shader_man_create(struct vmw_private *dev_priv)
776 {
777         struct vmw_compat_shader_manager *man;
778         int ret;
779
780         man = kzalloc(sizeof(*man), GFP_KERNEL);
781         if (man == NULL)
782                 return ERR_PTR(-ENOMEM);
783
784         man->dev_priv = dev_priv;
785         INIT_LIST_HEAD(&man->list);
786         ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
787         if (ret == 0)
788                 return man;
789
790         kfree(man);
791         return ERR_PTR(ret);
792 }
793
794 /**
795  * vmw_compat_shader_man_destroy - Destroy a compat shader manager
796  *
797  * @man: Pointer to the shader manager to destroy.
798  *
799  * Typically done at file close time.
800  */
801 void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
802 {
803         struct vmw_compat_shader *entry, *next;
804
805         mutex_lock(&man->dev_priv->cmdbuf_mutex);
806         list_for_each_entry_safe(entry, next, &man->list, head)
807                 vmw_compat_shader_free(man, entry);
808
809         mutex_unlock(&man->dev_priv->cmdbuf_mutex);
810         kfree(man);
811 }