OSDN Git Service

radeon: Fix type in check for tmds type.
[android-x86/external-libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33
34 /*
35  * Convenience function to be called by fence::wait methods that
36  * need polling.
37  */
38
39 int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
40                            int interruptible, uint32_t mask, 
41                            unsigned long end_jiffies)
42 {
43         struct drm_device *dev = fence->dev;
44         struct drm_fence_manager *fm = &dev->fm;
45         struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
46         uint32_t count = 0;
47         int ret;
48
49         DECLARE_WAITQUEUE(entry, current);
50         add_wait_queue(&fc->fence_queue, &entry);
51
52         ret = 0;
53         
54         for (;;) {
55                 __set_current_state((interruptible) ? 
56                                     TASK_INTERRUPTIBLE :
57                                     TASK_UNINTERRUPTIBLE);
58                 if (drm_fence_object_signaled(fence, mask))
59                         break;
60                 if (time_after_eq(jiffies, end_jiffies)) {
61                         ret = -EBUSY;
62                         break;
63                 }
64                 if (lazy)
65                         schedule_timeout(1);
66                 else if ((++count & 0x0F) == 0){
67                         __set_current_state(TASK_RUNNING);
68                         schedule();
69                         __set_current_state((interruptible) ? 
70                                             TASK_INTERRUPTIBLE :
71                                             TASK_UNINTERRUPTIBLE);
72                 }                       
73                 if (interruptible && signal_pending(current)) {
74                         ret = -EAGAIN;
75                         break;
76                 }
77         }
78         __set_current_state(TASK_RUNNING);
79         remove_wait_queue(&fc->fence_queue, &entry);
80         return ret;
81 }
82 EXPORT_SYMBOL(drm_fence_wait_polling);
83
84 /*
85  * Typically called by the IRQ handler.
86  */
87
88 void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
89                        uint32_t sequence, uint32_t type, uint32_t error)
90 {
91         int wake = 0;
92         uint32_t diff;
93         uint32_t relevant_type;
94         uint32_t new_type;
95         struct drm_fence_manager *fm = &dev->fm;
96         struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
97         struct drm_fence_driver *driver = dev->driver->fence_driver;
98         struct list_head *head;
99         struct drm_fence_object *fence, *next;
100         int found = 0;
101
102         if (list_empty(&fc->ring))
103                 return;
104
105         list_for_each_entry(fence, &fc->ring, ring) {
106                 diff = (sequence - fence->sequence) & driver->sequence_mask;
107                 if (diff > driver->wrap_diff) {
108                         found = 1;
109                         break;
110                 }
111         }
112
113         fc->waiting_types &= ~type;
114         head = (found) ? &fence->ring : &fc->ring;
115
116         list_for_each_entry_safe_reverse(fence, next, head, ring) {
117                 if (&fence->ring == &fc->ring)
118                         break;
119
120                 if (error) {
121                         fence->error = error;
122                         fence->signaled_types = fence->type;
123                         list_del_init(&fence->ring);
124                         wake = 1;
125                         break;
126                 }
127
128                 if (type & DRM_FENCE_TYPE_EXE)
129                         type |= fence->native_types;
130
131                 relevant_type = type & fence->type;
132                 new_type = (fence->signaled_types | relevant_type) ^
133                         fence->signaled_types;
134
135                 if (new_type) {
136                         fence->signaled_types |= new_type;
137                         DRM_DEBUG("Fence %p signaled 0x%08x\n",
138                                   fence, fence->signaled_types);
139
140                         if (driver->needed_flush)
141                                 fc->pending_flush |= driver->needed_flush(fence);
142
143                         if (new_type & fence->waiting_types)
144                                 wake = 1;
145                 }
146
147                 fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
148
149                 if (!(fence->type & ~fence->signaled_types)) {
150                         DRM_DEBUG("Fence completely signaled %p\n",
151                                   fence);
152                         list_del_init(&fence->ring);
153                 }
154         }
155
156         /*
157          * Reinstate lost waiting types.
158          */
159
160         if ((fc->waiting_types & type) != type) {
161                 head = head->prev;
162                 list_for_each_entry(fence, head, ring) {
163                         if (&fence->ring == &fc->ring)
164                                 break;
165                         diff = (fc->highest_waiting_sequence - fence->sequence) &
166                                 driver->sequence_mask;
167                         if (diff > driver->wrap_diff)
168                                 break;
169                         
170                         fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
171                 }
172         }
173
174         if (wake) 
175                 wake_up_all(&fc->fence_queue);
176 }
177 EXPORT_SYMBOL(drm_fence_handler);
178
179 static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
180 {
181         struct drm_fence_manager *fm = &dev->fm;
182         unsigned long flags;
183
184         write_lock_irqsave(&fm->lock, flags);
185         list_del_init(ring);
186         write_unlock_irqrestore(&fm->lock, flags);
187 }
188
189 void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
190 {
191         struct drm_fence_object *tmp_fence = *fence;
192         struct drm_device *dev = tmp_fence->dev;
193         struct drm_fence_manager *fm = &dev->fm;
194
195         DRM_ASSERT_LOCKED(&dev->struct_mutex);
196         *fence = NULL;
197         if (atomic_dec_and_test(&tmp_fence->usage)) {
198                 drm_fence_unring(dev, &tmp_fence->ring);
199                 DRM_DEBUG("Destroyed a fence object %p\n",
200                           tmp_fence);
201                 atomic_dec(&fm->count);
202                 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
203         }
204 }
205 EXPORT_SYMBOL(drm_fence_usage_deref_locked);
206
207 void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
208 {
209         struct drm_fence_object *tmp_fence = *fence;
210         struct drm_device *dev = tmp_fence->dev;
211         struct drm_fence_manager *fm = &dev->fm;
212
213         *fence = NULL;
214         if (atomic_dec_and_test(&tmp_fence->usage)) {
215                 mutex_lock(&dev->struct_mutex);
216                 if (atomic_read(&tmp_fence->usage) == 0) {
217                         drm_fence_unring(dev, &tmp_fence->ring);
218                         atomic_dec(&fm->count);
219                         drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
220                 }
221                 mutex_unlock(&dev->struct_mutex);
222         }
223 }
224 EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
225
226 struct drm_fence_object
227 *drm_fence_reference_locked(struct drm_fence_object *src)
228 {
229         DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
230
231         atomic_inc(&src->usage);
232         return src;
233 }
234
235 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
236                                   struct drm_fence_object *src)
237 {
238         mutex_lock(&src->dev->struct_mutex);
239         *dst = src;
240         atomic_inc(&src->usage);
241         mutex_unlock(&src->dev->struct_mutex);
242 }
243 EXPORT_SYMBOL(drm_fence_reference_unlocked);
244
245 int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
246 {
247         unsigned long flags;
248         int signaled;
249         struct drm_device *dev = fence->dev;
250         struct drm_fence_manager *fm = &dev->fm;
251         struct drm_fence_driver *driver = dev->driver->fence_driver;
252         
253         mask &= fence->type;
254         read_lock_irqsave(&fm->lock, flags);
255         signaled = (mask & fence->signaled_types) == mask;
256         read_unlock_irqrestore(&fm->lock, flags);
257         if (!signaled && driver->poll) {
258                 write_lock_irqsave(&fm->lock, flags);
259                 driver->poll(dev, fence->fence_class, mask);
260                 signaled = (mask & fence->signaled_types) == mask;
261                 write_unlock_irqrestore(&fm->lock, flags);
262         }
263         return signaled;
264 }
265 EXPORT_SYMBOL(drm_fence_object_signaled);
266
267
268 int drm_fence_object_flush(struct drm_fence_object *fence,
269                            uint32_t type)
270 {
271         struct drm_device *dev = fence->dev;
272         struct drm_fence_manager *fm = &dev->fm;
273         struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
274         struct drm_fence_driver *driver = dev->driver->fence_driver;
275         unsigned long irq_flags;
276         uint32_t saved_pending_flush;
277         uint32_t diff;
278         int call_flush;
279
280         if (type & ~fence->type) {
281                 DRM_ERROR("Flush trying to extend fence type, "
282                           "0x%x, 0x%x\n", type, fence->type);
283                 return -EINVAL;
284         }
285
286         write_lock_irqsave(&fm->lock, irq_flags);
287         fence->waiting_types |= type;
288         fc->waiting_types |= fence->waiting_types;
289         diff = (fence->sequence - fc->highest_waiting_sequence) & 
290                 driver->sequence_mask;
291
292         if (diff < driver->wrap_diff)
293                 fc->highest_waiting_sequence = fence->sequence;
294
295         /*
296          * fence->waiting_types has changed. Determine whether
297          * we need to initiate some kind of flush as a result of this.
298          */
299
300         saved_pending_flush = fc->pending_flush;
301         if (driver->needed_flush) 
302                 fc->pending_flush |= driver->needed_flush(fence);
303
304         if (driver->poll)
305                 driver->poll(dev, fence->fence_class, fence->waiting_types);
306
307         call_flush = fc->pending_flush;
308         write_unlock_irqrestore(&fm->lock, irq_flags);
309
310         if (call_flush && driver->flush)
311                 driver->flush(dev, fence->fence_class);
312
313         return 0;
314 }
315 EXPORT_SYMBOL(drm_fence_object_flush);
316
317 /*
318  * Make sure old fence objects are signaled before their fence sequences are
319  * wrapped around and reused.
320  */
321
322 void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
323                          uint32_t sequence)
324 {
325         struct drm_fence_manager *fm = &dev->fm;
326         struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
327         struct drm_fence_object *fence;
328         unsigned long irq_flags;
329         struct drm_fence_driver *driver = dev->driver->fence_driver;
330         int call_flush;
331
332         uint32_t diff;
333
334         write_lock_irqsave(&fm->lock, irq_flags);
335
336         list_for_each_entry_reverse(fence, &fc->ring, ring) {
337                 diff = (sequence - fence->sequence) & driver->sequence_mask;
338                 if (diff <= driver->flush_diff)
339                         break;
340         
341                 fence->waiting_types = fence->type;
342                 fc->waiting_types |= fence->type;
343
344                 if (driver->needed_flush)
345                         fc->pending_flush |= driver->needed_flush(fence);
346         }       
347         
348         if (driver->poll)
349                 driver->poll(dev, fence_class, fc->waiting_types);
350
351         call_flush = fc->pending_flush;
352         write_unlock_irqrestore(&fm->lock, irq_flags);
353
354         if (call_flush && driver->flush)
355                 driver->flush(dev, fence->fence_class);
356
357         /*
358          * FIXME: Shold we implement a wait here for really old fences?
359          */
360
361 }
362 EXPORT_SYMBOL(drm_fence_flush_old);
363
364 int drm_fence_object_wait(struct drm_fence_object *fence,
365                           int lazy, int ignore_signals, uint32_t mask)
366 {
367         struct drm_device *dev = fence->dev;
368         struct drm_fence_driver *driver = dev->driver->fence_driver;
369         struct drm_fence_manager *fm = &dev->fm;
370         struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
371         int ret = 0;
372         unsigned long _end = 3 * DRM_HZ;
373
374         if (mask & ~fence->type) {
375                 DRM_ERROR("Wait trying to extend fence type"
376                           " 0x%08x 0x%08x\n", mask, fence->type);
377                 BUG();
378                 return -EINVAL;
379         }
380
381         if (driver->wait)
382                 return driver->wait(fence, lazy, !ignore_signals, mask);
383
384         drm_fence_object_flush(fence, mask);
385         if (driver->has_irq(dev, fence->fence_class, mask)) {
386                 if (!ignore_signals)
387                         ret = wait_event_interruptible_timeout
388                                 (fc->fence_queue, 
389                                  drm_fence_object_signaled(fence, mask), 
390                                  3 * DRM_HZ);
391                 else 
392                         ret = wait_event_timeout
393                                 (fc->fence_queue, 
394                                  drm_fence_object_signaled(fence, mask), 
395                                  3 * DRM_HZ);
396
397                 if (unlikely(ret == -ERESTARTSYS))
398                         return -EAGAIN;
399
400                 if (unlikely(ret == 0))
401                         return -EBUSY;
402
403                 return 0;
404         }
405
406         return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
407                                       _end);
408 }
409 EXPORT_SYMBOL(drm_fence_object_wait);
410
411 int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
412                           uint32_t fence_class, uint32_t type)
413 {
414         struct drm_device *dev = fence->dev;
415         struct drm_fence_manager *fm = &dev->fm;
416         struct drm_fence_driver *driver = dev->driver->fence_driver;
417         struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
418         unsigned long flags;
419         uint32_t sequence;
420         uint32_t native_types;
421         int ret;
422
423         drm_fence_unring(dev, &fence->ring);
424         ret = driver->emit(dev, fence_class, fence_flags, &sequence,
425                            &native_types);
426         if (ret)
427                 return ret;
428
429         write_lock_irqsave(&fm->lock, flags);
430         fence->fence_class = fence_class;
431         fence->type = type;
432         fence->waiting_types = 0;
433         fence->signaled_types = 0;
434         fence->error = 0;
435         fence->sequence = sequence;
436         fence->native_types = native_types;
437         if (list_empty(&fc->ring))
438                 fc->highest_waiting_sequence = sequence - 1;
439         list_add_tail(&fence->ring, &fc->ring);
440         fc->latest_queued_sequence = sequence;
441         write_unlock_irqrestore(&fm->lock, flags);
442         return 0;
443 }
444 EXPORT_SYMBOL(drm_fence_object_emit);
445
446 static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
447                                  uint32_t type,
448                                  uint32_t fence_flags,
449                                  struct drm_fence_object *fence)
450 {
451         int ret = 0;
452         unsigned long flags;
453         struct drm_fence_manager *fm = &dev->fm;
454
455         mutex_lock(&dev->struct_mutex);
456         atomic_set(&fence->usage, 1);
457         mutex_unlock(&dev->struct_mutex);
458
459         write_lock_irqsave(&fm->lock, flags);
460         INIT_LIST_HEAD(&fence->ring);
461
462         /*
463          *  Avoid hitting BUG() for kernel-only fence objects.
464          */
465
466         fence->fence_class = fence_class;
467         fence->type = type;
468         fence->signaled_types = 0;
469         fence->waiting_types = 0;
470         fence->sequence = 0;
471         fence->error = 0;
472         fence->dev = dev;
473         write_unlock_irqrestore(&fm->lock, flags);
474         if (fence_flags & DRM_FENCE_FLAG_EMIT) {
475                 ret = drm_fence_object_emit(fence, fence_flags,
476                                             fence->fence_class, type);
477         }
478         return ret;
479 }
480
481 int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
482                             uint32_t type, unsigned flags,
483                             struct drm_fence_object **c_fence)
484 {
485         struct drm_fence_object *fence;
486         int ret;
487         struct drm_fence_manager *fm = &dev->fm;
488
489         fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
490         if (!fence) {
491                 DRM_ERROR("Out of memory creating fence object\n");
492                 return -ENOMEM;
493         }
494         ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
495         if (ret) {
496                 drm_fence_usage_deref_unlocked(&fence);
497                 return ret;
498         }
499         *c_fence = fence;
500         atomic_inc(&fm->count);
501
502         return 0;
503 }
504 EXPORT_SYMBOL(drm_fence_object_create);
505
506 void drm_fence_manager_init(struct drm_device *dev)
507 {
508         struct drm_fence_manager *fm = &dev->fm;
509         struct drm_fence_class_manager *fence_class;
510         struct drm_fence_driver *fed = dev->driver->fence_driver;
511         int i;
512         unsigned long flags;
513
514         rwlock_init(&fm->lock);
515         write_lock_irqsave(&fm->lock, flags);
516         fm->initialized = 0;
517         if (!fed)
518             goto out_unlock;
519
520         fm->initialized = 1;
521         fm->num_classes = fed->num_classes;
522         BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
523
524         for (i = 0; i < fm->num_classes; ++i) {
525             fence_class = &fm->fence_class[i];
526
527             memset(fence_class, 0, sizeof(*fence_class));
528             INIT_LIST_HEAD(&fence_class->ring);
529             DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
530         }
531
532         atomic_set(&fm->count, 0);
533  out_unlock:
534         write_unlock_irqrestore(&fm->lock, flags);
535 }
536
537 void drm_fence_manager_takedown(struct drm_device *dev)
538 {
539 }
540