1 /* Copyright (c) 2008-2016, 2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/msm_kgsl.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
22 #include <linux/interrupt.h>
23 #include <linux/mutex.h>
24 #include <linux/cdev.h>
25 #include <linux/regulator/consumer.h>
27 #include <linux/dma-attrs.h>
28 #include <linux/uaccess.h>
29 #include <linux/kthread.h>
30 #include <asm/cacheflush.h>
33 * --- kgsl drawobj flags ---
34 * These flags are same as --- drawobj flags ---
35 * but renamed to reflect that cmdbatch is renamed to drawobj.
37 #define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
38 #define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
39 #define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
40 #define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
41 #define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
42 #define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
43 #define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
44 #define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
45 #define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
46 #define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
48 #define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
51 /* The number of memstore arrays limits the number of contexts allowed.
52 * If more contexts are needed, update multiple for MEMSTORE_SIZE
54 #define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 8))
55 #define KGSL_MEMSTORE_GLOBAL (0)
56 #define KGSL_PRIORITY_MAX_RB_LEVELS 4
57 #define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
58 sizeof(struct kgsl_devmemstore) - 1 - KGSL_PRIORITY_MAX_RB_LEVELS)
59 #define KGSL_MAX_CONTEXTS_PER_PROC 200
61 #define MEMSTORE_RB_OFFSET(rb, field) \
62 KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
64 #define MEMSTORE_ID_GPU_ADDR(dev, iter, field) \
65 ((dev)->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(iter, field))
67 #define MEMSTORE_RB_GPU_ADDR(dev, rb, field) \
68 ((dev)->memstore.gpuaddr + \
69 KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field))
72 * SCRATCH MEMORY: The scratch memory is one page worth of data that
73 * is mapped into the GPU. This allows for some 'shared' data between
74 * the GPU and CPU. For example, it will be used by the GPU to write
75 * each updated RPTR for each RB.
78 * Offset: Length(bytes): What
79 * 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR
82 /* Shadow global helpers */
83 #define SCRATCH_RPTR_OFFSET(id) ((id) * sizeof(unsigned int))
84 #define SCRATCH_RPTR_GPU_ADDR(dev, id) \
85 ((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
87 /* Timestamp window used to detect rollovers (half of integer range) */
88 #define KGSL_TIMESTAMP_WINDOW 0x80000000
90 /* A macro for memory statistics - add the new size to the stat and if
91 the statisic is greater then _max, set _max
94 static inline void KGSL_STATS_ADD(uint64_t size, atomic_long_t *stat,
97 uint64_t ret = atomic_long_add_return(size, stat);
99 if (ret > atomic_long_read(max))
100 atomic_long_set(max, ret);
103 #define KGSL_MAX_NUMIBS 100000
104 #define KGSL_MAX_SYNCPOINTS 32
105 #define KGSL_MAX_SPARSE 1000
111 * struct kgsl_driver - main container for global KGSL things
112 * @cdev: Character device struct
113 * @major: Major ID for the KGSL device
114 * @class: Pointer to the class struct for the core KGSL sysfs entries
115 * @virtdev: Virtual device for managing the core
116 * @ptkobj: kobject for storing the pagetable statistics
117 * @prockobj: kobject for storing the process statistics
118 * @devp: Array of pointers to the individual KGSL device structs
119 * @process_list: List of open processes
120 * @pagetable_list: LIst of open pagetables
121 * @ptlock: Lock for accessing the pagetable list
122 * @process_mutex: Mutex for accessing the process list
123 * @devlock: Mutex protecting the device list
124 * @stats: Struct containing atomic memory statistics
125 * @full_cache_threshold: the threshold that triggers a full cache flush
126 * @workqueue: Pointer to a single threaded workqueue
127 * @mem_workqueue: Pointer to a workqueue for deferring memory entries
133 struct device virtdev;
134 struct kobject *ptkobj;
135 struct kobject *prockobj;
136 struct kgsl_device *devp[KGSL_DEVICE_MAX];
137 struct list_head process_list;
138 struct list_head pagetable_list;
140 struct mutex process_mutex;
141 struct mutex devlock;
143 atomic_long_t vmalloc;
144 atomic_long_t vmalloc_max;
145 atomic_long_t page_alloc;
146 atomic_long_t page_alloc_max;
147 atomic_long_t coherent;
148 atomic_long_t coherent_max;
149 atomic_long_t secure;
150 atomic_long_t secure_max;
151 atomic_long_t mapped;
152 atomic_long_t mapped_max;
154 unsigned int full_cache_threshold;
155 struct workqueue_struct *workqueue;
156 struct workqueue_struct *mem_workqueue;
157 struct kthread_worker worker;
158 struct task_struct *worker_thread;
161 extern struct kgsl_driver kgsl_driver;
162 extern struct mutex kgsl_mmu_sync;
164 struct kgsl_pagetable;
167 struct kgsl_memdesc_ops {
168 unsigned int vmflags;
169 int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
171 void (*free)(struct kgsl_memdesc *memdesc);
172 int (*map_kernel)(struct kgsl_memdesc *);
173 void (*unmap_kernel)(struct kgsl_memdesc *);
176 /* Internal definitions for memdesc->priv */
177 #define KGSL_MEMDESC_GUARD_PAGE BIT(0)
178 /* Set if the memdesc is mapped into all pagetables */
179 #define KGSL_MEMDESC_GLOBAL BIT(1)
180 /* The memdesc is frozen during a snapshot */
181 #define KGSL_MEMDESC_FROZEN BIT(2)
182 /* The memdesc is mapped into a pagetable */
183 #define KGSL_MEMDESC_MAPPED BIT(3)
184 /* The memdesc is secured for content protection */
185 #define KGSL_MEMDESC_SECURE BIT(4)
186 /* Memory is accessible in privileged mode */
187 #define KGSL_MEMDESC_PRIVILEGED BIT(6)
188 /* The memdesc is TZ locked content protection */
189 #define KGSL_MEMDESC_TZ_LOCKED BIT(7)
190 /* The memdesc is allocated through contiguous memory */
191 #define KGSL_MEMDESC_CONTIG BIT(8)
194 * struct kgsl_memdesc - GPU memory object descriptor
195 * @pagetable: Pointer to the pagetable that the object is mapped in
196 * @hostptr: Kernel virtual address
197 * @hostptr_count: Number of threads using hostptr
198 * @useraddr: User virtual address (if applicable)
199 * @gpuaddr: GPU virtual address
200 * @physaddr: Physical address of the memory object
201 * @size: Size of the memory object
202 * @mapsize: Size of memory mapped in userspace
203 * @priv: Internal flags and settings
204 * @sgt: Scatter gather table for allocated pages
205 * @ops: Function hooks for the memdesc memory type
206 * @flags: Flags set from userspace
207 * @dev: Pointer to the struct device that owns this memory
208 * @attrs: dma attributes for this memory
209 * @pages: An array of pointers to allocated pages
210 * @page_count: Total number of pages allocated
211 * @cur_bindings: Number of sparse pages actively bound
213 struct kgsl_memdesc {
214 struct kgsl_pagetable *pagetable;
216 unsigned int hostptr_count;
217 unsigned long useraddr;
219 phys_addr_t physaddr;
223 struct sg_table *sgt;
224 struct kgsl_memdesc_ops *ops;
227 struct dma_attrs attrs;
229 unsigned int page_count;
230 unsigned int cur_bindings;
234 * List of different memory entry types. The usermem enum
235 * starts at 0, which we use for allocated memory, so 1 is
236 * added to the enum values.
238 #define KGSL_MEM_ENTRY_KERNEL 0
239 #define KGSL_MEM_ENTRY_USER (KGSL_USER_MEM_TYPE_ADDR + 1)
240 #define KGSL_MEM_ENTRY_ION (KGSL_USER_MEM_TYPE_ION + 1)
241 #define KGSL_MEM_ENTRY_MAX (KGSL_USER_MEM_TYPE_MAX + 1)
243 /* symbolic table for trace and debugfs */
244 #define KGSL_MEM_TYPES \
245 { KGSL_MEM_ENTRY_KERNEL, "gpumem" }, \
246 { KGSL_MEM_ENTRY_USER, "usermem" }, \
247 { KGSL_MEM_ENTRY_ION, "ion" }
250 * struct kgsl_mem_entry - a userspace memory allocation
251 * @refcount: reference count. Currently userspace can only
252 * hold a single reference count, but the kernel may hold more.
253 * @memdesc: description of the memory
254 * @priv_data: type-specific data, such as the dma-buf attachment pointer.
255 * @node: rb_node for the gpu address lookup rb tree
256 * @id: idr index for this entry, can be used to find memory that does not have
257 * a valid GPU address.
258 * @priv: back pointer to the process that owns this memory
259 * @pending_free: if !0, userspace requested that his memory be freed, but there
260 * are still references to it.
261 * @dev_priv: back pointer to the device file that created this entry.
262 * @metadata: String containing user specified metadata for the entry
263 * @work: Work struct used to schedule a kgsl_mem_entry_put in atomic contexts
264 * @bind_lock: Lock for sparse memory bindings
265 * @bind_tree: RB Tree for sparse memory bindings
267 struct kgsl_mem_entry {
268 struct kref refcount;
269 struct kgsl_memdesc memdesc;
273 struct kgsl_process_private *priv;
275 char metadata[KGSL_GPUOBJ_ALLOC_METADATA_MAX + 1];
276 struct work_struct work;
277 spinlock_t bind_lock;
278 struct rb_root bind_tree;
281 struct kgsl_device_private;
282 struct kgsl_event_group;
284 typedef void (*kgsl_event_func)(struct kgsl_device *, struct kgsl_event_group *,
288 * struct kgsl_event - KGSL GPU timestamp event
289 * @device: Pointer to the KGSL device that owns the event
290 * @context: Pointer to the context that owns the event
291 * @timestamp: Timestamp for the event to expire
292 * @func: Callback function for for the event when it expires
293 * @priv: Private data passed to the callback function
294 * @node: List node for the kgsl_event_group list
295 * @created: Jiffies when the event was created
296 * @work: Work struct for dispatching the callback
297 * @result: KGSL event result type to pass to the callback
298 * group: The event group this event belongs to
301 struct kgsl_device *device;
302 struct kgsl_context *context;
303 unsigned int timestamp;
304 kgsl_event_func func;
306 struct list_head node;
307 unsigned int created;
308 struct kthread_work work;
310 struct kgsl_event_group *group;
313 typedef int (*readtimestamp_func)(struct kgsl_device *, void *,
314 enum kgsl_timestamp_type, unsigned int *);
317 * struct event_group - A list of GPU events
318 * @context: Pointer to the active context for the events
319 * @lock: Spinlock for protecting the list
320 * @events: List of active GPU events
321 * @group: Node for the master group list
322 * @processed: Last processed timestamp
323 * @name: String name for the group (for the debugfs file)
324 * @readtimestamp: Function pointer to read a timestamp
325 * @priv: Priv member to pass to the readtimestamp function
327 struct kgsl_event_group {
328 struct kgsl_context *context;
330 struct list_head events;
331 struct list_head group;
332 unsigned int processed;
334 readtimestamp_func readtimestamp;
339 * struct kgsl_protected_registers - Protected register range
340 * @base: Offset of the range to be protected
341 * @range: Range (# of registers = 2 ** range)
343 struct kgsl_protected_registers {
349 * struct sparse_bind_object - Bind metadata
350 * @node: Node for the rb tree
351 * @p_memdesc: Physical memdesc bound to
352 * @v_off: Offset of bind in the virtual entry
353 * @p_off: Offset of bind in the physical memdesc
354 * @size: Size of the bind
355 * @flags: Flags for the bind
357 struct sparse_bind_object {
359 struct kgsl_memdesc *p_memdesc;
366 long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
367 unsigned int cmd, void *data);
368 long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
369 unsigned int cmd, void *data);
370 long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
371 *dev_priv, unsigned int cmd, void *data);
372 long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
373 unsigned int cmd, void *data);
374 long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
375 unsigned int cmd, void *data);
376 long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
377 *dev_priv, unsigned int cmd,
379 long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
380 struct kgsl_device_private
381 *dev_priv, unsigned int cmd,
383 long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
384 unsigned int cmd, void *data);
385 long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
386 unsigned int cmd, void *data);
387 long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
388 unsigned int cmd, void *data);
389 long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
390 unsigned int cmd, void *data);
391 long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
392 unsigned int cmd, void *data);
393 long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
394 unsigned int cmd, void *data);
395 long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
396 unsigned int cmd, void *data);
397 long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
398 unsigned int cmd, void *data);
399 long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
400 unsigned int cmd, void *data);
401 long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
402 unsigned int cmd, void *data);
403 long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
404 unsigned int cmd, void *data);
405 long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
406 unsigned int cmd, void *data);
407 long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
408 unsigned int cmd, void *data);
409 long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
410 unsigned int cmd, void *data);
411 long kgsl_ioctl_cff_sync_gpuobj(struct kgsl_device_private *dev_priv,
412 unsigned int cmd, void *data);
413 long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
414 unsigned int cmd, void *data);
415 long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
416 unsigned int cmd, void *data);
417 long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
418 unsigned int cmd, void *data);
419 long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
420 unsigned int cmd, void *data);
421 long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
422 unsigned int cmd, void *data);
423 long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
424 unsigned int cmd, void *data);
425 long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
426 unsigned int cmd, void *data);
428 long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
429 unsigned int cmd, void *data);
430 long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
431 unsigned int cmd, void *data);
432 long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
433 unsigned int cmd, void *data);
434 long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
435 unsigned int cmd, void *data);
436 long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
437 unsigned int cmd, void *data);
438 long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
439 unsigned int cmd, void *data);
440 long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
441 unsigned int cmd, void *data);
443 void kgsl_mem_entry_destroy(struct kref *kref);
445 void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
446 int *egl_surface_count, int *egl_image_count);
448 struct kgsl_mem_entry * __must_check
449 kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr);
451 struct kgsl_mem_entry * __must_check
452 kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id);
454 extern const struct dev_pm_ops kgsl_pm_ops;
456 int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
457 int kgsl_resume_driver(struct platform_device *pdev);
459 static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
460 uint64_t gpuaddr, uint64_t size)
462 /* set a minimum size to search for */
467 if (size > U64_MAX - gpuaddr)
470 if (gpuaddr >= memdesc->gpuaddr &&
471 ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
477 static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
479 if (memdesc->ops && memdesc->ops->map_kernel)
480 memdesc->ops->map_kernel(memdesc);
482 return memdesc->hostptr;
485 static inline void kgsl_memdesc_unmap(struct kgsl_memdesc *memdesc)
487 if (memdesc->ops && memdesc->ops->unmap_kernel)
488 memdesc->ops->unmap_kernel(memdesc);
491 static inline void *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
494 void *hostptr = NULL;
496 if ((gpuaddr >= memdesc->gpuaddr) &&
497 (gpuaddr < (memdesc->gpuaddr + memdesc->size)))
498 hostptr = kgsl_memdesc_map(memdesc);
500 return hostptr != NULL ? hostptr + (gpuaddr - memdesc->gpuaddr) : NULL;
503 static inline int timestamp_cmp(unsigned int a, unsigned int b)
505 /* check for equal */
509 /* check for greater-than for non-rollover case */
510 if ((a > b) && (a - b < KGSL_TIMESTAMP_WINDOW))
513 /* check for greater-than for rollover case
514 * note that <= is required to ensure that consistent
515 * results are returned for values whose difference is
516 * equal to the window size
518 a += KGSL_TIMESTAMP_WINDOW;
519 b += KGSL_TIMESTAMP_WINDOW;
520 return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1;
524 * kgsl_schedule_work() - Schedule a work item on the KGSL workqueue
525 * @work: work item to schedule
527 static inline void kgsl_schedule_work(struct work_struct *work)
529 queue_work(kgsl_driver.workqueue, work);
533 kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
536 return kref_get_unless_zero(&entry->refcount);
541 kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
544 kref_put(&entry->refcount, kgsl_mem_entry_destroy);
548 * kgsl_addr_range_overlap() - Checks if 2 ranges overlap
549 * @gpuaddr1: Start of first address range
550 * @size1: Size of first address range
551 * @gpuaddr2: Start of second address range
552 * @size2: Size of second address range
554 * Function returns true if the 2 given address ranges overlap
557 static inline bool kgsl_addr_range_overlap(uint64_t gpuaddr1,
558 uint64_t size1, uint64_t gpuaddr2, uint64_t size2)
560 if ((size1 > (U64_MAX - gpuaddr1)) || (size2 > (U64_MAX - gpuaddr2)))
562 return !(((gpuaddr1 + size1) <= gpuaddr2) ||
563 (gpuaddr1 >= (gpuaddr2 + size2)));
567 * kgsl_malloc() - Use either kzalloc or vmalloc to allocate memory
568 * @size: Size of the desired allocation
570 * Allocate a block of memory for the driver - if it is small try to allocate it
571 * from kmalloc (fast!) otherwise we need to go with vmalloc (safe!)
573 static inline void *kgsl_malloc(size_t size)
575 if (size <= PAGE_SIZE)
576 return kzalloc(size, GFP_KERNEL);
578 return vmalloc(size);
582 * kgsl_free() - Free memory allocated by kgsl_malloc()
583 * @ptr: Pointer to the memory to free
585 * Free the memory be it in vmalloc or kmalloc space
587 static inline void kgsl_free(void *ptr)
589 if (ptr != NULL && is_vmalloc_addr(ptr))
595 static inline int _copy_from_user(void *dest, void __user *src,
596 unsigned int ksize, unsigned int usize)
598 unsigned int copy = ksize < usize ? ksize : usize;
603 return copy_from_user(dest, src, copy) ? -EFAULT : 0;
606 static inline void __user *to_user_ptr(uint64_t address)
608 return (void __user *)(uintptr_t)address;
611 static inline void kgsl_gpu_sysfs_add_link(struct kobject *dst,
612 struct kobject *src, const char *src_name,
613 const char *dst_name)
615 struct kernfs_node *old;
617 if (dst == NULL || src == NULL)
620 old = sysfs_get_dirent(src->sd, src_name);
621 if (IS_ERR_OR_NULL(old))
624 kernfs_create_link(dst->sd, dst_name, old);
626 #endif /* __KGSL_H */