1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35 * Convenience function to be called by fence::wait methods that
39 int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
40 int interruptible, uint32_t mask,
41 unsigned long end_jiffies)
43 struct drm_device *dev = fence->dev;
44 struct drm_fence_manager *fm = &dev->fm;
45 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
49 DECLARE_WAITQUEUE(entry, current);
50 add_wait_queue(&fc->fence_queue, &entry);
55 __set_current_state((interruptible) ?
57 TASK_UNINTERRUPTIBLE);
58 if (drm_fence_object_signaled(fence, mask))
60 if (time_after_eq(jiffies, end_jiffies)) {
66 else if ((++count & 0x0F) == 0){
67 __set_current_state(TASK_RUNNING);
69 __set_current_state((interruptible) ?
71 TASK_UNINTERRUPTIBLE);
73 if (interruptible && signal_pending(current)) {
78 __set_current_state(TASK_RUNNING);
79 remove_wait_queue(&fc->fence_queue, &entry);
82 EXPORT_SYMBOL(drm_fence_wait_polling);
85 * Typically called by the IRQ handler.
88 void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
89 uint32_t sequence, uint32_t type, uint32_t error)
93 uint32_t relevant_type;
95 struct drm_fence_manager *fm = &dev->fm;
96 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
97 struct drm_fence_driver *driver = dev->driver->fence_driver;
98 struct list_head *head;
99 struct drm_fence_object *fence, *next;
102 if (list_empty(&fc->ring))
105 list_for_each_entry(fence, &fc->ring, ring) {
106 diff = (sequence - fence->sequence) & driver->sequence_mask;
107 if (diff > driver->wrap_diff) {
113 fc->waiting_types &= ~type;
114 head = (found) ? &fence->ring : &fc->ring;
116 list_for_each_entry_safe_reverse(fence, next, head, ring) {
117 if (&fence->ring == &fc->ring)
121 fence->error = error;
122 fence->signaled_types = fence->type;
123 list_del_init(&fence->ring);
128 if (type & DRM_FENCE_TYPE_EXE)
129 type |= fence->native_types;
131 relevant_type = type & fence->type;
132 new_type = (fence->signaled_types | relevant_type) ^
133 fence->signaled_types;
136 fence->signaled_types |= new_type;
137 DRM_DEBUG("Fence %p signaled 0x%08x\n",
138 fence, fence->signaled_types);
140 if (driver->needed_flush)
141 fc->pending_flush |= driver->needed_flush(fence);
143 if (new_type & fence->waiting_types)
147 fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
149 if (!(fence->type & ~fence->signaled_types)) {
150 DRM_DEBUG("Fence completely signaled %p\n",
152 list_del_init(&fence->ring);
157 * Reinstate lost waiting types.
160 if ((fc->waiting_types & type) != type) {
162 list_for_each_entry(fence, head, ring) {
163 if (&fence->ring == &fc->ring)
165 diff = (fc->highest_waiting_sequence - fence->sequence) &
166 driver->sequence_mask;
167 if (diff > driver->wrap_diff)
170 fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
175 wake_up_all(&fc->fence_queue);
177 EXPORT_SYMBOL(drm_fence_handler);
179 static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
181 struct drm_fence_manager *fm = &dev->fm;
184 write_lock_irqsave(&fm->lock, flags);
186 write_unlock_irqrestore(&fm->lock, flags);
189 void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
191 struct drm_fence_object *tmp_fence = *fence;
192 struct drm_device *dev = tmp_fence->dev;
193 struct drm_fence_manager *fm = &dev->fm;
195 DRM_ASSERT_LOCKED(&dev->struct_mutex);
197 if (atomic_dec_and_test(&tmp_fence->usage)) {
198 drm_fence_unring(dev, &tmp_fence->ring);
199 DRM_DEBUG("Destroyed a fence object %p\n",
201 atomic_dec(&fm->count);
202 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
205 EXPORT_SYMBOL(drm_fence_usage_deref_locked);
207 void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
209 struct drm_fence_object *tmp_fence = *fence;
210 struct drm_device *dev = tmp_fence->dev;
211 struct drm_fence_manager *fm = &dev->fm;
214 if (atomic_dec_and_test(&tmp_fence->usage)) {
215 mutex_lock(&dev->struct_mutex);
216 if (atomic_read(&tmp_fence->usage) == 0) {
217 drm_fence_unring(dev, &tmp_fence->ring);
218 atomic_dec(&fm->count);
219 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
221 mutex_unlock(&dev->struct_mutex);
224 EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
226 struct drm_fence_object
227 *drm_fence_reference_locked(struct drm_fence_object *src)
229 DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
231 atomic_inc(&src->usage);
235 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
236 struct drm_fence_object *src)
238 mutex_lock(&src->dev->struct_mutex);
240 atomic_inc(&src->usage);
241 mutex_unlock(&src->dev->struct_mutex);
243 EXPORT_SYMBOL(drm_fence_reference_unlocked);
245 int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
249 struct drm_device *dev = fence->dev;
250 struct drm_fence_manager *fm = &dev->fm;
251 struct drm_fence_driver *driver = dev->driver->fence_driver;
254 read_lock_irqsave(&fm->lock, flags);
255 signaled = (mask & fence->signaled_types) == mask;
256 read_unlock_irqrestore(&fm->lock, flags);
257 if (!signaled && driver->poll) {
258 write_lock_irqsave(&fm->lock, flags);
259 driver->poll(dev, fence->fence_class, mask);
260 signaled = (mask & fence->signaled_types) == mask;
261 write_unlock_irqrestore(&fm->lock, flags);
265 EXPORT_SYMBOL(drm_fence_object_signaled);
268 int drm_fence_object_flush(struct drm_fence_object *fence,
271 struct drm_device *dev = fence->dev;
272 struct drm_fence_manager *fm = &dev->fm;
273 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
274 struct drm_fence_driver *driver = dev->driver->fence_driver;
275 unsigned long irq_flags;
276 uint32_t saved_pending_flush;
280 if (type & ~fence->type) {
281 DRM_ERROR("Flush trying to extend fence type, "
282 "0x%x, 0x%x\n", type, fence->type);
286 write_lock_irqsave(&fm->lock, irq_flags);
287 fence->waiting_types |= type;
288 fc->waiting_types |= fence->waiting_types;
289 diff = (fence->sequence - fc->highest_waiting_sequence) &
290 driver->sequence_mask;
292 if (diff < driver->wrap_diff)
293 fc->highest_waiting_sequence = fence->sequence;
296 * fence->waiting_types has changed. Determine whether
297 * we need to initiate some kind of flush as a result of this.
300 saved_pending_flush = fc->pending_flush;
301 if (driver->needed_flush)
302 fc->pending_flush |= driver->needed_flush(fence);
305 driver->poll(dev, fence->fence_class, fence->waiting_types);
307 call_flush = fc->pending_flush;
308 write_unlock_irqrestore(&fm->lock, irq_flags);
310 if (call_flush && driver->flush)
311 driver->flush(dev, fence->fence_class);
315 EXPORT_SYMBOL(drm_fence_object_flush);
318 * Make sure old fence objects are signaled before their fence sequences are
319 * wrapped around and reused.
322 void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
325 struct drm_fence_manager *fm = &dev->fm;
326 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
327 struct drm_fence_object *fence;
328 unsigned long irq_flags;
329 struct drm_fence_driver *driver = dev->driver->fence_driver;
334 write_lock_irqsave(&fm->lock, irq_flags);
336 list_for_each_entry_reverse(fence, &fc->ring, ring) {
337 diff = (sequence - fence->sequence) & driver->sequence_mask;
338 if (diff <= driver->flush_diff)
341 fence->waiting_types = fence->type;
342 fc->waiting_types |= fence->type;
344 if (driver->needed_flush)
345 fc->pending_flush |= driver->needed_flush(fence);
349 driver->poll(dev, fence_class, fc->waiting_types);
351 call_flush = fc->pending_flush;
352 write_unlock_irqrestore(&fm->lock, irq_flags);
354 if (call_flush && driver->flush)
355 driver->flush(dev, fence->fence_class);
358 * FIXME: Shold we implement a wait here for really old fences?
362 EXPORT_SYMBOL(drm_fence_flush_old);
364 int drm_fence_object_wait(struct drm_fence_object *fence,
365 int lazy, int ignore_signals, uint32_t mask)
367 struct drm_device *dev = fence->dev;
368 struct drm_fence_driver *driver = dev->driver->fence_driver;
369 struct drm_fence_manager *fm = &dev->fm;
370 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
372 unsigned long _end = 3 * DRM_HZ;
374 if (mask & ~fence->type) {
375 DRM_ERROR("Wait trying to extend fence type"
376 " 0x%08x 0x%08x\n", mask, fence->type);
382 return driver->wait(fence, lazy, !ignore_signals, mask);
384 drm_fence_object_flush(fence, mask);
385 if (driver->has_irq(dev, fence->fence_class, mask)) {
387 ret = wait_event_interruptible_timeout
389 drm_fence_object_signaled(fence, mask),
392 ret = wait_event_timeout
394 drm_fence_object_signaled(fence, mask),
397 if (unlikely(ret == -ERESTARTSYS))
400 if (unlikely(ret == 0))
406 return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
409 EXPORT_SYMBOL(drm_fence_object_wait);
411 int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
412 uint32_t fence_class, uint32_t type)
414 struct drm_device *dev = fence->dev;
415 struct drm_fence_manager *fm = &dev->fm;
416 struct drm_fence_driver *driver = dev->driver->fence_driver;
417 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
420 uint32_t native_types;
423 drm_fence_unring(dev, &fence->ring);
424 ret = driver->emit(dev, fence_class, fence_flags, &sequence,
429 write_lock_irqsave(&fm->lock, flags);
430 fence->fence_class = fence_class;
432 fence->waiting_types = 0;
433 fence->signaled_types = 0;
435 fence->sequence = sequence;
436 fence->native_types = native_types;
437 if (list_empty(&fc->ring))
438 fc->highest_waiting_sequence = sequence - 1;
439 list_add_tail(&fence->ring, &fc->ring);
440 fc->latest_queued_sequence = sequence;
441 write_unlock_irqrestore(&fm->lock, flags);
444 EXPORT_SYMBOL(drm_fence_object_emit);
446 static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
448 uint32_t fence_flags,
449 struct drm_fence_object *fence)
453 struct drm_fence_manager *fm = &dev->fm;
455 mutex_lock(&dev->struct_mutex);
456 atomic_set(&fence->usage, 1);
457 mutex_unlock(&dev->struct_mutex);
459 write_lock_irqsave(&fm->lock, flags);
460 INIT_LIST_HEAD(&fence->ring);
463 * Avoid hitting BUG() for kernel-only fence objects.
466 fence->fence_class = fence_class;
468 fence->signaled_types = 0;
469 fence->waiting_types = 0;
473 write_unlock_irqrestore(&fm->lock, flags);
474 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
475 ret = drm_fence_object_emit(fence, fence_flags,
476 fence->fence_class, type);
481 int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
482 uint32_t type, unsigned flags,
483 struct drm_fence_object **c_fence)
485 struct drm_fence_object *fence;
487 struct drm_fence_manager *fm = &dev->fm;
489 fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
491 DRM_ERROR("Out of memory creating fence object\n");
494 ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
496 drm_fence_usage_deref_unlocked(&fence);
500 atomic_inc(&fm->count);
504 EXPORT_SYMBOL(drm_fence_object_create);
506 void drm_fence_manager_init(struct drm_device *dev)
508 struct drm_fence_manager *fm = &dev->fm;
509 struct drm_fence_class_manager *fence_class;
510 struct drm_fence_driver *fed = dev->driver->fence_driver;
514 rwlock_init(&fm->lock);
515 write_lock_irqsave(&fm->lock, flags);
521 fm->num_classes = fed->num_classes;
522 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
524 for (i = 0; i < fm->num_classes; ++i) {
525 fence_class = &fm->fence_class[i];
527 memset(fence_class, 0, sizeof(*fence_class));
528 INIT_LIST_HEAD(&fence_class->ring);
529 DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
532 atomic_set(&fm->count, 0);
534 write_unlock_irqrestore(&fm->lock, flags);
537 void drm_fence_manager_takedown(struct drm_device *dev)