3 * File operations for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Daryll Strauss <daryll@valinux.com>
7 * \author Gareth Hughes <gareth@valinux.com>
11 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15 * All Rights Reserved.
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
38 #include "drm_sarea.h"
39 #include <linux/poll.h>
41 static int drm_open_helper(struct inode *inode, struct file *filp,
44 static int drm_setup(drm_device_t * dev)
51 if (dev->driver->firstopen) {
52 ret = dev->driver->firstopen(dev);
57 dev->magicfree.next = NULL;
59 /* prebuild the SAREA */
60 sareapage = max(SAREA_MAX, PAGE_SIZE);
61 i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
65 atomic_set(&dev->ioctl_count, 0);
66 atomic_set(&dev->vma_count, 0);
68 atomic_set(&dev->buf_alloc, 0);
70 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
71 i = drm_dma_setup(dev);
76 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
77 atomic_set(&dev->counts[i], 0);
79 drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
80 INIT_LIST_HEAD(&dev->magicfree);
82 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
83 if (dev->ctxlist == NULL)
85 memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
86 INIT_LIST_HEAD(&dev->ctxlist->head);
89 dev->sigdata.lock = NULL;
90 init_waitqueue_head(&dev->lock.lock_queue);
92 dev->queue_reserved = 0;
94 dev->queuelist = NULL;
96 dev->context_flag = 0;
97 dev->interrupt_flag = 0;
99 dev->last_context = 0;
100 dev->last_switch = 0;
101 dev->last_checked = 0;
102 init_waitqueue_head(&dev->context_wait);
108 dev->buf_async = NULL;
109 init_waitqueue_head(&dev->buf_readers);
110 init_waitqueue_head(&dev->buf_writers);
115 * The kernel's context could be created here, but is now created
116 * in drm_dma_enqueue. This is more resource-efficient for
117 * hardware that does not do DMA, but may mean that
118 * drm_select_queue fails between the time the interrupt is
119 * initialized and the time the queues are initialized.
128 * \param inode device inode
129 * \param filp file pointer.
130 * \return zero on success or a negative number on failure.
132 * Searches the DRM device with the same minor number, calls open_helper(), and
133 * increments the device open count. If the open count was previous at zero,
134 * i.e., it's the first that the device is open, then calls setup().
136 int drm_open(struct inode *inode, struct file *filp)
138 drm_device_t *dev = NULL;
139 int minor = iminor(inode);
142 if (!((minor >= 0) && (minor < drm_cards_limit)))
145 if (!drm_heads[minor])
148 if (!(dev = drm_heads[minor]->dev))
151 retcode = drm_open_helper(inode, filp, dev);
153 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
154 spin_lock(&dev->count_lock);
155 if (!dev->open_count++) {
156 spin_unlock(&dev->count_lock);
157 return drm_setup(dev);
159 spin_unlock(&dev->count_lock);
161 mutex_lock(&dev->struct_mutex);
162 BUG_ON((dev->dev_mapping != NULL) &&
163 (dev->dev_mapping != inode->i_mapping));
164 if (dev->dev_mapping == NULL)
165 dev->dev_mapping = inode->i_mapping;
166 mutex_unlock(&dev->struct_mutex);
170 EXPORT_SYMBOL(drm_open);
173 * File \c open operation.
175 * \param inode device inode.
176 * \param filp file pointer.
178 * Puts the dev->fops corresponding to the device minor number into
179 * \p filp, call the \c open method, and restore the file operations.
181 int drm_stub_open(struct inode *inode, struct file *filp)
183 drm_device_t *dev = NULL;
184 int minor = iminor(inode);
186 const struct file_operations *old_fops;
190 if (!((minor >= 0) && (minor < drm_cards_limit)))
193 if (!drm_heads[minor])
196 if (!(dev = drm_heads[minor]->dev))
199 old_fops = filp->f_op;
200 filp->f_op = fops_get(&dev->driver->fops);
201 if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
202 fops_put(filp->f_op);
203 filp->f_op = fops_get(old_fops);
211 * Check whether DRI will run on this CPU.
213 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
215 static int drm_cpu_valid(void)
217 #if defined(__i386__)
218 if (boot_cpu_data.x86 == 3)
219 return 0; /* No cmpxchg on a 386 */
221 #if defined(__sparc__) && !defined(__sparc_v9__)
222 return 0; /* No cmpxchg before v9 sparc. */
228 * Called whenever a process opens /dev/drm.
230 * \param inode device inode.
231 * \param filp file pointer.
233 * \return zero on success or a negative number on failure.
235 * Creates and initializes a drm_file structure for the file private data in \p
236 * filp and add it into the double linked list in \p dev.
238 static int drm_open_helper(struct inode *inode, struct file *filp,
241 int minor = iminor(inode);
246 if (filp->f_flags & O_EXCL)
247 return -EBUSY; /* No exclusive opens */
248 if (!drm_cpu_valid())
251 DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
253 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
257 memset(priv, 0, sizeof(*priv));
258 filp->private_data = priv;
259 priv->uid = current->euid;
260 priv->pid = current->pid;
262 priv->head = drm_heads[minor];
263 priv->ioctl_count = 0;
264 /* for compatibility root is always authenticated */
265 priv->authenticated = capable(CAP_SYS_ADMIN);
266 priv->lock_count = 0;
268 INIT_LIST_HEAD(&priv->user_objects);
269 INIT_LIST_HEAD(&priv->refd_objects);
271 for (i=0; i<_DRM_NO_REF_TYPES; ++i) {
272 ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER);
279 drm_ht_remove(&priv->refd_object_hash[j]);
284 if (dev->driver->open) {
285 ret = dev->driver->open(dev, priv);
290 mutex_lock(&dev->struct_mutex);
291 if (!dev->file_last) {
294 dev->file_first = priv;
295 dev->file_last = priv;
296 /* first opener automatically becomes master */
300 priv->prev = dev->file_last;
301 dev->file_last->next = priv;
302 dev->file_last = priv;
304 mutex_unlock(&dev->struct_mutex);
311 struct pci_dev *pci_dev;
312 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
314 dev->hose = pci_dev->sysdata;
315 pci_dev_put(pci_dev);
318 struct pci_bus *b = pci_bus_b(pci_root_buses.next);
320 dev->hose = b->sysdata;
327 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
328 filp->private_data = NULL;
333 int drm_fasync(int fd, struct file *filp, int on)
335 drm_file_t *priv = filp->private_data;
336 drm_device_t *dev = priv->head->dev;
339 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
340 (long)old_encode_dev(priv->head->device));
341 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
346 EXPORT_SYMBOL(drm_fasync);
348 static void drm_object_release(struct file *filp) {
350 drm_file_t *priv = filp->private_data;
351 struct list_head *head;
352 drm_user_object_t *user_object;
353 drm_ref_object_t *ref_object;
357 * Free leftover ref objects created by me. Note that we cannot use
358 * list_for_each() here, as the struct_mutex may be temporarily released
359 * by the remove_() functions, and thus the lists may be altered.
360 * Also, a drm_remove_ref_object() will not remove it
361 * from the list unless its refcount is 1.
364 head = &priv->refd_objects;
365 while (head->next != head) {
366 ref_object = list_entry(head->next, drm_ref_object_t, list);
367 drm_remove_ref_object(priv, ref_object);
368 head = &priv->refd_objects;
372 * Free leftover user objects created by me.
375 head = &priv->user_objects;
376 while (head->next != head) {
377 user_object = list_entry(head->next, drm_user_object_t, list);
378 drm_remove_user_object(priv, user_object);
379 head = &priv->user_objects;
385 for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
386 drm_ht_remove(&priv->refd_object_hash[i]);
398 * \param inode device inode
399 * \param filp file pointer.
400 * \return zero on success or a negative number on failure.
402 * If the hardware lock is held then free it, and take it again for the kernel
403 * context since it's necessary to reclaim buffers. Unlink the file private
404 * data from its list and free it. Decreases the open count and if it reaches
405 * zero calls drm_lastclose().
407 int drm_release(struct inode *inode, struct file *filp)
409 drm_file_t *priv = filp->private_data;
414 dev = priv->head->dev;
416 DRM_DEBUG("open_count = %d\n", dev->open_count);
418 if (dev->driver->preclose)
419 dev->driver->preclose(dev, filp);
421 /* ========================================================
422 * Begin inline drm_release
425 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
426 current->pid, (long)old_encode_dev(priv->head->device),
429 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
430 unsigned long _end = jiffies + DRM_HZ*3;
433 retcode = drm_kernel_take_hw_lock(filp);
434 } while(retcode && !time_after_eq(jiffies,_end));
437 dev->driver->reclaim_buffers_locked(dev, filp);
439 drm_lock_free(dev, &dev->lock.hw_lock->lock,
440 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
444 * FIXME: This is not a good solution. We should perhaps associate the
445 * DRM lock with a process context, and check whether the current process
446 * holds the lock. Then we can run reclaim buffers locked anyway.
449 DRM_ERROR("Reclaim buffers locked deadlock.\n"
450 "\tThis is probably a single thread having multiple\n"
451 "\tDRM file descriptors open either dying or"
452 " closing file descriptors\n"
453 "\twhile having the lock. I will not reclaim buffers.\n"
454 "\tLocking context is 0x%08x\n",
455 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
457 } else if (drm_i_have_hw_lock(filp)) {
458 DRM_DEBUG("File %p released, freeing lock for context %d\n",
459 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
461 drm_lock_free(dev, &dev->lock.hw_lock->lock,
462 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
466 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
467 !dev->driver->reclaim_buffers_locked) {
468 dev->driver->reclaim_buffers(dev, filp);
471 drm_fasync(-1, filp, 0);
473 mutex_lock(&dev->ctxlist_mutex);
475 if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
476 drm_ctx_list_t *pos, *n;
478 list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
479 if (pos->tag == priv &&
480 pos->handle != DRM_KERNEL_CONTEXT) {
481 if (dev->driver->context_dtor)
482 dev->driver->context_dtor(dev,
485 drm_ctxbitmap_free(dev, pos->handle);
487 list_del(&pos->head);
488 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
493 mutex_unlock(&dev->ctxlist_mutex);
495 mutex_lock(&dev->struct_mutex);
496 drm_object_release(filp);
497 if (priv->remove_auth_on_close == 1) {
498 drm_file_t *temp = dev->file_first;
500 temp->authenticated = 0;
505 priv->prev->next = priv->next;
507 dev->file_first = priv->next;
510 priv->next->prev = priv->prev;
512 dev->file_last = priv->prev;
514 mutex_unlock(&dev->struct_mutex);
516 if (dev->driver->postclose)
517 dev->driver->postclose(dev, priv);
518 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
520 /* ========================================================
521 * End inline drm_release
524 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
525 spin_lock(&dev->count_lock);
526 if (!--dev->open_count) {
527 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
528 DRM_ERROR("Device busy: %d %d\n",
529 atomic_read(&dev->ioctl_count), dev->blocked);
530 spin_unlock(&dev->count_lock);
534 spin_unlock(&dev->count_lock);
536 return drm_lastclose(dev);
538 spin_unlock(&dev->count_lock);
544 EXPORT_SYMBOL(drm_release);
547 /* This is to deal with older X servers that believe 0 means data is
548 * available which is not the correct return for a poll function.
549 * This cannot be fixed until the Xserver is fixed. Xserver will need
550 * to set a newer interface version to avoid breaking older Xservers.
551 * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22"
552 * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try
553 * to return the correct response.
555 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
557 /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */
560 EXPORT_SYMBOL(drm_poll);