OSDN Git Service

Merge branch 'master' into modesetting-101
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Mon, 28 Apr 2008 10:10:44 +0000 (12:10 +0200)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Mon, 28 Apr 2008 10:10:44 +0000 (12:10 +0200)
Conflicts:

linux-core/Makefile.kernel
linux-core/drm_compat.c
linux-core/drm_fops.c
linux-core/drm_lock.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c

24 files changed:
1  2 
libdrm/xf86drm.c
linux-core/Makefile
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_bo.c
linux-core/drm_bo_move.c
linux-core/drm_compat.c
linux-core/drm_compat.h
linux-core/drm_fops.c
linux-core/drm_irq.c
linux-core/drm_lock.c
linux-core/drm_objects.h
linux-core/i915_buffer.c
linux-core/i915_drv.c
linux-core/i915_execbuf.c
linux-core/i915_fence.c
shared-core/drm.h
shared-core/drm_pciids.txt
shared-core/i915_dma.c
shared-core/i915_drm.h
shared-core/i915_drv.h
shared-core/i915_irq.c
shared-core/radeon_drm.h
shared-core/radeon_ms_exec.c

Simple merge
Simple merge
@@@ -20,13 -20,10 +20,14 @@@ r128-objs   := r128_drv.o r128_cce.o r1
  mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
  i810-objs   := i810_drv.o i810_dma.o
  i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
-               i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \
 -              i915_buffer.o i915_compat.o i915_execbuf.o
++              i915_buffer.o i915_execbuf.o \
++              intel_display.o intel_crt.o intel_lvds.o \
 +              intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
 +              intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \
 +              dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
  nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
                nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
-               nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \
+               nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
                nv04_timer.o \
                nv04_mc.o nv40_mc.o nv50_mc.o \
                nv04_fb.o nv10_fb.o nv40_fb.o \
Simple merge
Simple merge
Simple merge
@@@ -730,57 -730,7 +730,57 @@@ void *idr_replace(struct idr *idp, voi
  EXPORT_SYMBOL(idr_replace);
  #endif
  
- #if defined(DRM_KMAP_ATOMIC_PROT_PFN) && defined(CONFIG_HIMEM)
 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
 +static __inline__ unsigned long __round_jiffies(unsigned long j, int cpu)
 +{
 +      int rem;
 +      unsigned long original = j;
 +
 +      j += cpu * 3;
 +
 +      rem = j % HZ;
 +
 +      if (rem < HZ/4) /* round down */
 +              j = j - rem;
 +      else /* round up */
 +              j = j - rem + HZ;
 +
 +      /* now that we have rounded, subtract the extra skew again */
 +      j -= cpu * 3;
 +
 +      if (j <= jiffies) /* rounding ate our timeout entirely; */
 +              return original;
 +      return j;
 +}
 +
 +static __inline__ unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 +{
 +      return  __round_jiffies(j + jiffies, cpu) - jiffies;
 +}
 +
 +unsigned long round_jiffies_relative(unsigned long j)
 +{
 +      return __round_jiffies_relative(j, raw_smp_processor_id());
 +}
 +EXPORT_SYMBOL(round_jiffies_relative);
 +#endif
 +
 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
 +struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
 +{
 +    struct pci_dev *dev = NULL;
 +
 +    while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
 +        if (pci_domain_nr(dev->bus) == 0 &&
 +           (dev->bus->number == bus && dev->devfn == devfn))
 +            return dev;
 +   }
 +   return NULL;
 +}
 +EXPORT_SYMBOL(pci_get_bus_and_slot);
 +#endif
 +
+ #if defined(DRM_KMAP_ATOMIC_PROT_PFN)
  #define drm_kmap_get_fixmap_pte(vaddr)                                        \
        pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
  
@@@ -806,5 -756,5 +806,4 @@@ void *kmap_atomic_prot_pfn(unsigned lon
  }
  
  EXPORT_SYMBOL(kmap_atomic_prot_pfn);
 -
  #endif
@@@ -332,26 -325,10 +332,27 @@@ void *idr_replace(struct idr *idp, voi
  #endif
  
  #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
 -typedef _Bool                   bool;
 +extern unsigned long round_jiffies_relative(unsigned long j);
 +#endif
 +
 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
 +extern struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
 +#endif
 +
 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
 +static inline int kobject_uevent_env(struct kobject *kobj,
 +                                     enum kobject_action action,
 +                                     char *envp[])
 +{
 +    return 0;
 +}
 +#endif
 +
 +#ifndef PM_EVENT_PRETHAW 
 +#define PM_EVENT_PRETHAW 3
  #endif
  
  #if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
  #define DRM_KMAP_ATOMIC_PROT_PFN
  extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
@@@ -402,63 -389,59 +402,63 @@@ int drm_release(struct inode *inode, st
                  current->pid, (long)old_encode_dev(file_priv->minor->device),
                  dev->open_count);
  
 -      if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
 -              if (drm_i_have_hw_lock(dev, file_priv)) {
 -                      dev->driver->reclaim_buffers_locked(dev, file_priv);
 -              } else {
 -                      unsigned long _end=jiffies + 3*DRM_HZ;
 -                      int locked = 0;
 -
 -                      drm_idlelock_take(&dev->lock);
 -
 -                      /*
 -                       * Wait for a while.
 -                       */
 -
 -                      do{
 -                              spin_lock_bh(&dev->lock.spinlock);
 -                              locked = dev->lock.idle_has_lock;
 -                              spin_unlock_bh(&dev->lock.spinlock);
 -                              if (locked)
 -                                      break;
 -                              schedule();
 -                      } while (!time_after_eq(jiffies, _end));
 -
 -                      if (!locked) {
 -                              DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
 -                                        "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
 -                                        "\tI will go on reclaiming the buffers anyway.\n");
 +      /* if the master has gone away we can't do anything with the lock */
 +      if (file_priv->minor->master) {
 +              if (dev->driver->reclaim_buffers_locked && file_priv->master->lock.hw_lock) {
 +                      if (drm_i_have_hw_lock(dev, file_priv)) {
 +                              dev->driver->reclaim_buffers_locked(dev, file_priv);
 +                      } else {
 +                              unsigned long _end=jiffies + 3*DRM_HZ;
 +                              int locked = 0;
 +                              
 +                              drm_idlelock_take(&file_priv->master->lock);
 +                              
 +                              /*
 +                               * Wait for a while.
 +                               */
 +                              
 +                              do{
-                                       spin_lock(&file_priv->master->lock.spinlock);
++                                      spin_lock_bh(&file_priv->master->lock.spinlock);
 +                                      locked = file_priv->master->lock.idle_has_lock;
-                                       spin_unlock(&file_priv->master->lock.spinlock);
++                                      spin_unlock_bh(&file_priv->master->lock.spinlock);
 +                                      if (locked)
 +                                              break;
 +                                      schedule();
 +                              } while (!time_after_eq(jiffies, _end));
 +                              
 +                              if (!locked) {
 +                                      DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
 +                                                "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
 +                                                "\tI will go on reclaiming the buffers anyway.\n");
 +                              }
 +                              
 +                              dev->driver->reclaim_buffers_locked(dev, file_priv);
 +                              drm_idlelock_release(&file_priv->master->lock);
                        }
 -
 -                      dev->driver->reclaim_buffers_locked(dev, file_priv);
 -                      drm_idlelock_release(&dev->lock);
                }
 -      }
 -
 -      if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
 -
 -              drm_idlelock_take(&dev->lock);
 -              dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
 -              drm_idlelock_release(&dev->lock);
  
 -      }
 +              if (dev->driver->reclaim_buffers_idlelocked && file_priv->master->lock.hw_lock) {
 +                      
 +                      drm_idlelock_take(&file_priv->master->lock);
 +                      dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
 +                      drm_idlelock_release(&file_priv->master->lock);
 +                      
 +              }
  
 -      if (drm_i_have_hw_lock(dev, file_priv)) {
 -              DRM_DEBUG("File %p released, freeing lock for context %d\n",
 -                        filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
 -
 -              drm_lock_free(&dev->lock,
 -                            _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
 -      }
  
 +              if (drm_i_have_hw_lock(dev, file_priv)) {
 +                      DRM_DEBUG("File %p released, freeing lock for context %d\n",
 +                                filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
 +                      
 +                      drm_lock_free(&file_priv->master->lock,
 +                                    _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
 +              }
 +              
  
 -      if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
 -          !dev->driver->reclaim_buffers_locked) {
 -              dev->driver->reclaim_buffers(dev, file_priv);
 +              if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
 +                  !dev->driver->reclaim_buffers_locked) {
 +                      dev->driver->reclaim_buffers(dev, file_priv);
 +              }
        }
  
        drm_fasync(-1, filp, 0);
Simple merge
@@@ -71,13 -70,13 +71,14 @@@ int drm_lock(struct drm_device *dev, vo
                if (lock->context < 0)
                        return -EINVAL;
  
 -      add_wait_queue(&dev->lock.lock_queue, &entry);
 -      spin_lock_bh(&dev->lock.spinlock);
 -      dev->lock.user_waiters++;
 -      spin_unlock_bh(&dev->lock.spinlock);
 +      add_wait_queue(&master->lock.lock_queue, &entry);
-       spin_lock(&master->lock.spinlock);
++      spin_lock_bh(&master->lock.spinlock);
 +      master->lock.user_waiters++;
-       spin_unlock(&master->lock.spinlock);
++      spin_unlock_bh(&master->lock.spinlock);
++
        for (;;) {
                __set_current_state(TASK_INTERRUPTIBLE);
 -              if (!dev->lock.hw_lock) {
 +              if (!master->lock.hw_lock) {
                        /* Device has been unregistered */
                        ret = -EINTR;
                        break;
                        break;
                }
        }
-       spin_lock(&master->lock.spinlock);
 -      spin_lock_bh(&dev->lock.spinlock);
 -      dev->lock.user_waiters--;
 -      spin_unlock_bh(&dev->lock.spinlock);
++      spin_lock_bh(&master->lock.spinlock);
 +      master->lock.user_waiters--;
-       spin_unlock(&master->lock.spinlock);
++      spin_unlock_bh(&master->lock.spinlock);
        __set_current_state(TASK_RUNNING);
 -      remove_wait_queue(&dev->lock.lock_queue, &entry);
 +      remove_wait_queue(&master->lock.lock_queue, &entry);
  
        DRM_DEBUG("%d %s\n", lock->context,
                  ret ? "interrupted" : "has lock");
Simple merge
Simple merge
Simple merge
index 0000000,804f3ac..932882d
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,917 +1,921 @@@
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 -      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
 -          dev_priv->sarea_priv;
+ /*
+  * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the
+  * "Software"), to deal in the Software without restriction, including
+  * without limitation the rights to use, copy, modify, merge, publish,
+  * distribute, sub license, and/or sell copies of the Software, and to
+  * permit persons to whom the Software is furnished to do so, subject to
+  * the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the
+  * next paragraph) shall be included in all copies or substantial portions
+  * of the Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+  *
+  * Authors:
+  *     Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+  *     Dave Airlie
+  *     Keith Packard
+  *     ... ?
+  */
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ #if DRM_DEBUG_CODE
+ #define DRM_DEBUG_RELOCATION  (drm_debug != 0)
+ #else
+ #define DRM_DEBUG_RELOCATION  0
+ #endif
+ enum i915_buf_idle {
+       I915_RELOC_UNCHECKED,
+       I915_RELOC_IDLE,
+       I915_RELOC_BUSY
+ };
+ struct i915_relocatee_info {
+       struct drm_buffer_object *buf;
+       unsigned long offset;
+       uint32_t *data_page;
+       unsigned page_offset;
+       struct drm_bo_kmap_obj kmap;
+       int is_iomem;
+       int dst;
+       int idle;
+       int performed_ring_relocs;
+ #ifdef DRM_KMAP_ATOMIC_PROT_PFN
+       unsigned long pfn;
+       pgprot_t pg_prot;
+ #endif
+ };
+ struct drm_i915_validate_buffer {
+       struct drm_buffer_object *buffer;
+       int presumed_offset_correct;
+       void __user *data;
+       int ret;
+       enum i915_buf_idle idle;
+ };
+ /*
+  * I'd like to use MI_STORE_DATA_IMM here, but I can't make
+  * it work. Seems like GART writes are broken with that
+  * instruction. Also I'm not sure that MI_FLUSH will
+  * act as a memory barrier for that instruction. It will
+  * for this single dword 2D blit.
+  */
+ static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
+                                uint32_t value)
+ {
+       struct drm_i915_private *dev_priv =
+           (struct drm_i915_private *)dev->dev_private;
+       RING_LOCALS;
+       i915_kernel_lost_context(dev);
+       BEGIN_LP_RING(6);
+       OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
+       OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
+       OUT_RING((0x1 << 16) | (0x4));
+       OUT_RING(offset);
+       OUT_RING(value);
+       OUT_RING(0);
+       ADVANCE_LP_RING();
+ }
+ static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
+                                           *buffers, unsigned num_buffers)
+ {
+       while (num_buffers--)
+               drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
+ }
+ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
+                    struct drm_i915_validate_buffer *buffers,
+                    struct i915_relocatee_info *relocatee, uint32_t * reloc)
+ {
+       unsigned index;
+       unsigned long new_cmd_offset;
+       u32 val;
+       int ret, i;
+       int buf_index = -1;
+       /*
+        * FIXME: O(relocs * buffers) complexity.
+        */
+       for (i = 0; i <= num_buffers; i++)
+               if (buffers[i].buffer)
+                       if (reloc[2] == buffers[i].buffer->base.hash.key)
+                               buf_index = i;
+       if (buf_index == -1) {
+               DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
+               return -EINVAL;
+       }
+       /*
+        * Short-circuit relocations that were correctly
+        * guessed by the client
+        */
+       if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
+               return 0;
+       new_cmd_offset = reloc[0];
+       if (!relocatee->data_page ||
+           !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
+               struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
+               drm_bo_kunmap(&relocatee->kmap);
+               relocatee->data_page = NULL;
+               relocatee->offset = new_cmd_offset;
+               if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
+                 ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
+                       if (ret)
+                               return ret;
+                       relocatee->idle = I915_RELOC_IDLE;
+               }
+               if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
+                            (mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
+                       drm_bo_evict_cached(relocatee->buf);
+               ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
+                                 1, &relocatee->kmap);
+               if (ret) {
+                       DRM_ERROR
+                           ("Could not map command buffer to apply relocs\n %08lx",
+                            new_cmd_offset);
+                       return ret;
+               }
+               relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
+                                                      &relocatee->is_iomem);
+               relocatee->page_offset = (relocatee->offset & PAGE_MASK);
+       }
+       val = buffers[buf_index].buffer->offset;
+       index = (reloc[0] - relocatee->page_offset) >> 2;
+       /* add in validate */
+       val = val + reloc[1];
+       if (DRM_DEBUG_RELOCATION) {
+               if (buffers[buf_index].presumed_offset_correct &&
+                   relocatee->data_page[index] != val) {
+                       DRM_DEBUG
+                           ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
+                            reloc[0], reloc[1], buf_index,
+                            relocatee->data_page[index], val);
+               }
+       }
+       if (relocatee->is_iomem)
+               iowrite32(val, relocatee->data_page + index);
+       else
+               relocatee->data_page[index] = val;
+       return 0;
+ }
+ int i915_process_relocs(struct drm_file *file_priv,
+                       uint32_t buf_handle,
+                       uint32_t __user ** reloc_user_ptr,
+                       struct i915_relocatee_info *relocatee,
+                       struct drm_i915_validate_buffer *buffers,
+                       uint32_t num_buffers)
+ {
+       int ret, reloc_stride;
+       uint32_t cur_offset;
+       uint32_t reloc_count;
+       uint32_t reloc_type;
+       uint32_t reloc_buf_size;
+       uint32_t *reloc_buf = NULL;
+       int i;
+       /* do a copy from user from the user ptr */
+       ret = get_user(reloc_count, *reloc_user_ptr);
+       if (ret) {
+               DRM_ERROR("Could not map relocation buffer.\n");
+               goto out;
+       }
+       ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
+       if (ret) {
+               DRM_ERROR("Could not map relocation buffer.\n");
+               goto out;
+       }
+       if (reloc_type != 0) {
+               DRM_ERROR("Unsupported relocation type requested\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       reloc_buf_size =
+           (I915_RELOC_HEADER +
+            (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
+       reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
+       if (!reloc_buf) {
+               DRM_ERROR("Out of memory for reloc buffer\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+       if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+       /* get next relocate buffer handle */
+       *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
+       reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);   /* may be different for other types of relocs */
+       DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
+                 *reloc_user_ptr);
+       for (i = 0; i < reloc_count; i++) {
+               cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
+               ret = i915_apply_reloc(file_priv, num_buffers, buffers,
+                                      relocatee, reloc_buf + cur_offset);
+               if (ret)
+                       goto out;
+       }
+       out:
+       if (reloc_buf)
+               kfree(reloc_buf);
+       if (relocatee->data_page) {
+               drm_bo_kunmap(&relocatee->kmap);
+               relocatee->data_page = NULL;
+       }
+       return ret;
+ }
+ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
+                          uint32_t __user * reloc_user_ptr,
+                          struct drm_i915_validate_buffer *buffers,
+                          uint32_t buf_count)
+ {
+       struct drm_device *dev = file_priv->minor->dev;
+       struct i915_relocatee_info relocatee;
+       int ret = 0;
+       int b;
+       /*
+        * Short circuit relocations when all previous
+        * buffers offsets were correctly guessed by
+        * the client
+        */
+       if (!DRM_DEBUG_RELOCATION) {
+               for (b = 0; b < buf_count; b++)
+                       if (!buffers[b].presumed_offset_correct)
+                               break;
+               if (b == buf_count)
+                       return 0;
+       }
+       memset(&relocatee, 0, sizeof(relocatee));
+       relocatee.idle = I915_RELOC_UNCHECKED;
+       mutex_lock(&dev->struct_mutex);
+       relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+       if (!relocatee.buf) {
+               DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
+               ret = -EINVAL;
+               goto out_err;
+       }
+       mutex_lock(&relocatee.buf->mutex);
+       while (reloc_user_ptr) {
+               ret =
+                   i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
+                                       &relocatee, buffers, buf_count);
+               if (ret) {
+                       DRM_ERROR("process relocs failed\n");
+                       goto out_err1;
+               }
+       }
+       out_err1:
+       mutex_unlock(&relocatee.buf->mutex);
+       drm_bo_usage_deref_unlocked(&relocatee.buf);
+       out_err:
+       return ret;
+ }
+ static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
+ {
+       if (relocatee->data_page) {
+ #ifndef DRM_KMAP_ATOMIC_PROT_PFN
+               drm_bo_kunmap(&relocatee->kmap);
+ #else
+               kunmap_atomic(relocatee->data_page, KM_USER0);
+ #endif
+               relocatee->data_page = NULL;
+       }
+       relocatee->buf = NULL;
+       relocatee->dst = ~0;
+ }
+ static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
+                                struct drm_i915_validate_buffer *buffers,
+                                unsigned int dst, unsigned long dst_offset)
+ {
+       int ret;
+       if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
+               i915_clear_relocatee(relocatee);
+               relocatee->dst = dst;
+               relocatee->buf = buffers[dst].buffer;
+               relocatee->idle = buffers[dst].idle;
+               /*
+                * Check for buffer idle. If the buffer is busy, revert to
+                * ring relocations.
+                */
+               if (relocatee->idle == I915_RELOC_UNCHECKED) {
+                       preempt_enable();
+                       mutex_lock(&relocatee->buf->mutex);
+                       ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
+                       if (ret == 0)
+                               relocatee->idle = I915_RELOC_IDLE;
+                       else {
+                               relocatee->idle = I915_RELOC_BUSY;
+                               relocatee->performed_ring_relocs = 1;
+                       }
+                       mutex_unlock(&relocatee->buf->mutex);
+                       preempt_disable();
+                       buffers[dst].idle = relocatee->idle;
+               }
+       }
+       if (relocatee->idle == I915_RELOC_BUSY)
+               return 0;
+       if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
+               DRM_ERROR("Relocation destination out of bounds.\n");
+               return -EINVAL;
+       }
+       if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
+                    NULL == relocatee->data_page)) {
+ #ifdef DRM_KMAP_ATOMIC_PROT_PFN
+               if (NULL != relocatee->data_page) {
+                       kunmap_atomic(relocatee->data_page, KM_USER0);
+                       relocatee->data_page = NULL;
+               }
+               ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
+                                     &relocatee->pfn, &relocatee->pg_prot);
+               if (ret) {
+                       DRM_ERROR("Can't map relocation destination.\n");
+                       return -EINVAL;
+               }
+               relocatee->data_page =
+                   kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
+                                        relocatee->pg_prot);
+ #else
+               if (NULL != relocatee->data_page) {
+                       drm_bo_kunmap(&relocatee->kmap);
+                       relocatee->data_page = NULL;
+               }
+               ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
+                                 1, &relocatee->kmap);
+               if (ret) {
+                       DRM_ERROR("Can't map relocation destination.\n");
+                       return ret;
+               }
+               relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
+                                                      &relocatee->is_iomem);
+ #endif
+               relocatee->page_offset = dst_offset & PAGE_MASK;
+       }
+       return 0;
+ }
+ static int i915_apply_post_reloc(uint32_t reloc[],
+                                struct drm_i915_validate_buffer *buffers,
+                                uint32_t num_buffers,
+                                struct i915_relocatee_info *relocatee)
+ {
+       uint32_t reloc_buffer = reloc[2];
+       uint32_t dst_buffer = reloc[3];
+       uint32_t val;
+       uint32_t index;
+       int ret;
+       if (likely(buffers[reloc_buffer].presumed_offset_correct))
+               return 0;
+       if (unlikely(reloc_buffer >= num_buffers)) {
+               DRM_ERROR("Invalid reloc buffer index.\n");
+               return -EINVAL;
+       }
+       if (unlikely(dst_buffer >= num_buffers)) {
+               DRM_ERROR("Invalid dest buffer index.\n");
+               return -EINVAL;
+       }
+       ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
+       if (unlikely(ret))
+               return ret;
+       val = buffers[reloc_buffer].buffer->offset;
+       index = (reloc[0] - relocatee->page_offset) >> 2;
+       val = val + reloc[1];
+       if (relocatee->idle == I915_RELOC_BUSY) {
+               i915_emit_ring_reloc(relocatee->buf->dev,
+                                    relocatee->buf->offset + reloc[0], val);
+               return 0;
+       }
+ #ifdef DRM_KMAP_ATOMIC_PROT_PFN
+       relocatee->data_page[index] = val;
+ #else
+       if (likely(relocatee->is_iomem))
+               iowrite32(val, relocatee->data_page + index);
+       else
+               relocatee->data_page[index] = val;
+ #endif
+       return 0;
+ }
+ static int i915_post_relocs(struct drm_file *file_priv,
+                           uint32_t __user * new_reloc_ptr,
+                           struct drm_i915_validate_buffer *buffers,
+                           unsigned int num_buffers)
+ {
+       uint32_t *reloc;
+       uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
+       uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
+       struct i915_relocatee_info relocatee;
+       uint32_t reloc_type;
+       uint32_t num_relocs;
+       uint32_t count;
+       int ret = 0;
+       int i;
+       int short_circuit = 1;
+       uint32_t __user *reloc_ptr;
+       uint64_t new_reloc_data;
+       uint32_t reloc_buf_size;
+       uint32_t *reloc_buf;
+       for (i = 0; i < num_buffers; ++i) {
+               if (unlikely(!buffers[i].presumed_offset_correct)) {
+                       short_circuit = 0;
+                       break;
+               }
+       }
+       if (likely(short_circuit))
+               return 0;
+       memset(&relocatee, 0, sizeof(relocatee));
+       while (new_reloc_ptr) {
+               reloc_ptr = new_reloc_ptr;
+               ret = get_user(num_relocs, reloc_ptr);
+               if (unlikely(ret))
+                       goto out;
+               if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
+                                       header_size +
+                                       num_relocs * reloc_stride)))
+                       return -EFAULT;
+               ret = __get_user(reloc_type, reloc_ptr + 1);
+               if (unlikely(ret))
+                       goto out;
+               if (unlikely(reloc_type != 1)) {
+                       DRM_ERROR("Unsupported relocation type requested.\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = __get_user(new_reloc_data, reloc_ptr + 2);
+               new_reloc_ptr = (uint32_t __user *) (unsigned long)
+                   new_reloc_data;
+               reloc_ptr += I915_RELOC_HEADER;
+               if (num_relocs == 0)
+                       goto out;
+               reloc_buf_size =
+                   (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
+               reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
+               if (!reloc_buf) {
+                       DRM_ERROR("Out of memory for reloc buffer\n");
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+               reloc = reloc_buf;
+               preempt_disable();
+               for (count = 0; count < num_relocs; ++count) {
+                       ret = i915_apply_post_reloc(reloc, buffers,
+                                                   num_buffers, &relocatee);
+                       if (unlikely(ret)) {
+                               preempt_enable();
+                               goto out;
+                       }
+                       reloc += I915_RELOC0_STRIDE;
+               }
+               preempt_enable();
+               if (reloc_buf) {
+                       kfree(reloc_buf);
+                       reloc_buf = NULL;
+               }
+               i915_clear_relocatee(&relocatee);
+       }
+       out:
+       /*
+        * Flush ring relocs so the command parser will pick them up.
+        */
+       if (relocatee.performed_ring_relocs)
+               (void)i915_emit_mi_flush(file_priv->minor->dev, 0);
+       i915_clear_relocatee(&relocatee);
+       if (reloc_buf) {
+               kfree(reloc_buf);
+               reloc_buf = NULL;
+       }
+       return ret;
+ }
+ static int i915_check_presumed(struct drm_i915_op_arg *arg,
+                              struct drm_buffer_object *bo,
+                              uint32_t __user * data, int *presumed_ok)
+ {
+       struct drm_bo_op_req *req = &arg->d.req;
+       uint32_t hint_offset;
+       uint32_t hint = req->bo_req.hint;
+       *presumed_ok = 0;
+       if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
+               return 0;
+       if (bo->offset == req->bo_req.presumed_offset) {
+               *presumed_ok = 1;
+               return 0;
+       }
+       /*
+        * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
+        * the user-space IOCTL argument list, since the buffer has moved,
+        * we're about to apply relocations and we might subsequently
+        * hit an -EAGAIN. In that case the argument list will be reused by
+        * user-space, but the presumed offset is no longer valid.
+        *
+        * Needless to say, this is a bit ugly.
+        */
+       hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
+       hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
+       return __put_user(hint, data + hint_offset);
+ }
+ /*
+  * Validate, add fence and relocate a block of bos from a userspace list
+  */
+ int i915_validate_buffer_list(struct drm_file *file_priv,
+                             unsigned int fence_class, uint64_t data,
+                             struct drm_i915_validate_buffer *buffers,
+                             uint32_t * num_buffers,
+                             uint32_t __user ** post_relocs)
+ {
+       struct drm_i915_op_arg arg;
+       struct drm_bo_op_req *req = &arg.d.req;
+       int ret = 0;
+       unsigned buf_count = 0;
+       uint32_t buf_handle;
+       uint32_t __user *reloc_user_ptr;
+       struct drm_i915_validate_buffer *item = buffers;
+       *post_relocs = NULL;
+       do {
+               if (buf_count >= *num_buffers) {
+                       DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+               item = buffers + buf_count;
+               item->buffer = NULL;
+               item->presumed_offset_correct = 0;
+               item->idle = I915_RELOC_UNCHECKED;
+               if (copy_from_user
+                   (&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
+                       ret = -EFAULT;
+                       goto out_err;
+               }
+               ret = 0;
+               if (req->op != drm_bo_validate) {
+                       DRM_ERROR
+                           ("Buffer object operation wasn't \"validate\".\n");
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+               item->ret = 0;
+               item->data = (void __user *)(unsigned long)data;
+               buf_handle = req->bo_req.handle;
+               reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
+               /*
+                * Switch mode to post-validation relocations?
+                */
+               if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
+                            (reloc_user_ptr != NULL))) {
+                       uint32_t reloc_type;
+                       ret = get_user(reloc_type, reloc_user_ptr + 1);
+                       if (ret)
+                               goto out_err;
+                       if (reloc_type == 1)
+                               *post_relocs = reloc_user_ptr;
+               }
+               if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
+                       ret =
+                           i915_exec_reloc(file_priv, buf_handle,
+                                           reloc_user_ptr, buffers, buf_count);
+                       if (ret)
+                               goto out_err;
+                       DRM_MEMORYBARRIER();
+               }
+               ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
+                                            req->bo_req.flags,
+                                            req->bo_req.mask, req->bo_req.hint,
+                                            req->bo_req.fence_class,
+                                            NULL, &item->buffer);
+               if (ret) {
+                       DRM_ERROR("error on handle validate %d\n", ret);
+                       goto out_err;
+               }
+               buf_count++;
+               ret = i915_check_presumed(&arg, item->buffer,
+                                         (uint32_t __user *)
+                                         (unsigned long)data,
+                                         &item->presumed_offset_correct);
+               if (ret)
+                       goto out_err;
+               data = arg.next;
+       } while (data != 0);
+       out_err:
+       *num_buffers = buf_count;
+       item->ret = (ret != -EAGAIN) ? ret : 0;
+       return ret;
+ }
+ /*
+  * Remove all buffers from the unfenced list.
+  * If the execbuffer operation was aborted, for example due to a signal,
+  * this also make sure that buffers retain their original state and
+  * fence pointers.
+  * Copy back buffer information to user-space unless we were interrupted
+  * by a signal. In which case the IOCTL must be rerun.
+  */
+ static int i915_handle_copyback(struct drm_device *dev,
+                               struct drm_i915_validate_buffer *buffers,
+                               unsigned int num_buffers, int ret)
+ {
+       int err = ret;
+       int i;
+       struct drm_i915_op_arg arg;
+       struct drm_buffer_object *bo;
+       if (ret)
+               drm_putback_buffer_objects(dev);
+       if (ret != -EAGAIN) {
+               for (i = 0; i < num_buffers; ++i) {
+                       arg.handled = 1;
+                       arg.d.rep.ret = buffers->ret;
+                       bo = buffers->buffer;
+                       mutex_lock(&bo->mutex);
+                       drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
+                       mutex_unlock(&bo->mutex);
+                       if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
+                               err = -EFAULT;
+                       buffers++;
+               }
+       }
+       return err;
+ }
+ /*
+  * Create a fence object, and if that fails, pretend that everything is
+  * OK and just idle the GPU.
+  */
+ void i915_fence_or_sync(struct drm_file *file_priv,
+                       uint32_t fence_flags,
+                       struct drm_fence_arg *fence_arg,
+                       struct drm_fence_object **fence_p)
+ {
+       struct drm_device *dev = file_priv->minor->dev;
+       int ret;
+       struct drm_fence_object *fence;
+       ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
+       if (ret) {
+               /*
+                * Fence creation failed.
+                * Fall back to synchronous operation and idle the engine.
+                */
+               (void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
+               (void)i915_quiescent(dev);
+               if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
+                       /*
+                        * Communicate to user-space that
+                        * fence creation has failed and that
+                        * the engine is idle.
+                        */
+                       fence_arg->handle = ~0;
+                       fence_arg->error = ret;
+               }
+               drm_putback_buffer_objects(dev);
+               if (fence_p)
+                       *fence_p = NULL;
+               return;
+       }
+       if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
+               ret = drm_fence_add_user_object(file_priv, fence,
+                                               fence_flags &
+                                               DRM_FENCE_FLAG_SHAREABLE);
+               if (!ret)
+                       drm_fence_fill_arg(fence, fence_arg);
+               else {
+                       /*
+                        * Fence user object creation failed.
+                        * We must idle the engine here as well, as user-
+                        * space expects a fence object to wait on. Since we
+                        * have a fence object we wait for it to signal
+                        * to indicate engine "sufficiently" idle.
+                        */
+                       (void)drm_fence_object_wait(fence, 0, 1, fence->type);
+                       drm_fence_usage_deref_unlocked(&fence);
+                       fence_arg->handle = ~0;
+                       fence_arg->error = ret;
+               }
+       }
+       if (fence_p)
+               *fence_p = fence;
+       else if (fence)
+               drm_fence_usage_deref_unlocked(&fence);
+ }
+ int i915_execbuffer(struct drm_device *dev, void *data,
+                   struct drm_file *file_priv)
+ {
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) 
++              dev->dev_private;
++      struct drm_i915_master_private *master_priv = 
++              (struct drm_i915_master_private *)
++              dev->primary->master->driver_priv;
++      struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
++              master_priv->sarea_priv;
+       struct drm_i915_execbuffer *exec_buf = data;
+       struct drm_i915_batchbuffer *batch = &exec_buf->batch;
+       struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
+       int num_buffers;
+       int ret;
+       uint32_t __user *post_relocs;
+       if (!dev_priv->allow_batchbuffer) {
+               DRM_ERROR("Batchbuffer ioctl disabled\n");
+               return -EINVAL;
+       }
+       if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+                                                       batch->num_cliprects *
+                                                       sizeof(struct
+                                                              drm_clip_rect)))
+               return -EFAULT;
+       if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
+               return -EINVAL;
+       ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+       if (ret)
+               return ret;
+       /*
+        * The cmdbuf_mutex makes sure the validate-submit-fence
+        * operation is atomic.
+        */
+       ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+       if (ret) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return -EAGAIN;
+       }
+       num_buffers = exec_buf->num_buffers;
+       if (!dev_priv->val_bufs) {
+               dev_priv->val_bufs =
+                   vmalloc(sizeof(struct drm_i915_validate_buffer) *
+                           dev_priv->max_validate_buffers);
+       }
+       if (!dev_priv->val_bufs) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               mutex_unlock(&dev_priv->cmdbuf_mutex);
+               return -ENOMEM;
+       }
+       /* validate buffer list + fixup relocations */
+       ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
+                                       dev_priv->val_bufs, &num_buffers,
+                                       &post_relocs);
+       if (ret)
+               goto out_err0;
+       if (post_relocs) {
+               ret = i915_post_relocs(file_priv, post_relocs,
+                                      dev_priv->val_bufs, num_buffers);
+               if (ret)
+                       goto out_err0;
+       }
+       /* make sure all previous memory operations have passed */
+       DRM_MEMORYBARRIER();
+       if (!post_relocs) {
+               drm_agp_chipset_flush(dev);
+               batch->start =
+                   dev_priv->val_bufs[num_buffers - 1].buffer->offset;
+       } else {
+               batch->start += dev_priv->val_bufs[0].buffer->offset;
+       }
+       DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
+                 batch->start, batch->used, batch->num_cliprects);
+       ret = i915_dispatch_batchbuffer(dev, batch);
+       if (ret)
+               goto out_err0;
+       if (sarea_priv)
+               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+       i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
+       out_err0:
+       ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
+       mutex_lock(&dev->struct_mutex);
+       i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
+       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
+       return ret;
+ }
Simple merge
@@@ -555,44 -555,10 +555,44 @@@ union drm_wait_vblank 
        struct drm_wait_vblank_reply reply;
  };
  
 +/* Handle monitor hotplug.
 + *
 + * May want to extend this later to pass reply information which
 + * details the outputs which generated the hotplug event.
 + * Some chipsets can't determine that though, and we'd need to leave
 + * it to the higher levels to determine exactly what changed.
 + */
 +enum drm_hotplug_seq_type {
 +      _DRM_HOTPLUG_SIGNAL = 0x00000001, /**< Send signal instead of blocking */
 +};
 +struct drm_wait_hotplug_request {
 +      enum drm_hotplug_seq_type type;
 +      unsigned long signal;
 +};
 +
 +struct drm_wait_hotplug_reply {
 +      enum drm_hotplug_seq_type type;
 +      unsigned int counter;
 +      long tval_sec;
 +      long tval_usec;
 +};
 +
 +/**
 + * DRM_IOCTL_WAIT_HOTPLUG ioctl argument type.
 + *
 + * \sa drmWaitHotplug().
 + */
 +union drm_wait_hotplug {
 +      struct drm_wait_hotplug_request request;
 +      struct drm_wait_hotplug_reply reply;
 +};
 +
 +enum drm_modeset_ctl_cmd {
 +      _DRM_PRE_MODESET = 1,
 +      _DRM_POST_MODESET = 2,
 +};
  
 -#define _DRM_PRE_MODESET 1
 -#define _DRM_POST_MODESET 2
  /**
   * DRM_IOCTL_MODESET_CTL ioctl argument type
   *
Simple merge
@@@ -578,10 -583,10 +578,10 @@@ static int i915_dispatch_cmdbuffer(stru
        return 0;
  }
  
static int i915_dispatch_batchbuffer(struct drm_device * dev,
-                                    drm_i915_batchbuffer_t * batch)
+ int i915_dispatch_batchbuffer(struct drm_device * dev,
+                             drm_i915_batchbuffer_t * batch)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_clip_rect __user *boxes = batch->cliprects;
        int nbox = batch->num_cliprects;
        int i = 0, count;
@@@ -708,9 -711,9 +708,9 @@@ void i915_dispatch_flip(struct drm_devi
  #endif
  }
  
static int i915_quiescent(struct drm_device *dev)
+ int i915_quiescent(struct drm_device *dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
  
        i915_kernel_lost_context(dev);
        return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
@@@ -795,575 -796,17 +795,17 @@@ static int i915_cmdbuffer(struct drm_de
  #define DRM_DEBUG_RELOCATION  0
  #endif
  
- #ifdef I915_HAVE_BUFFER
- struct i915_relocatee_info {
-       struct drm_buffer_object *buf;
-       unsigned long offset;
-       u32 *data_page;
-       unsigned page_offset;
-       struct drm_bo_kmap_obj kmap;
-       int is_iomem;
-       int idle;
-       int evicted;
- };
- struct drm_i915_validate_buffer {
-       struct drm_buffer_object *buffer;
-       struct drm_bo_info_rep rep;
-       int presumed_offset_correct;
-       void __user *data;
-       int ret;
- };
- static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,
-                                           unsigned num_buffers)
- {
-       while (num_buffers--)
-               drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
- }
- int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
-                    struct drm_i915_validate_buffer *buffers,
-                    struct i915_relocatee_info *relocatee,
-                    uint32_t *reloc)
- {
-       unsigned index;
-       unsigned long new_cmd_offset;
-       u32 val;
-       int ret, i;
-       int buf_index = -1;
-       /*
-        * FIXME: O(relocs * buffers) complexity.
-        */
-       for (i = 0; i <= num_buffers; i++)
-               if (buffers[i].buffer)
-                       if (reloc[2] == buffers[i].buffer->base.hash.key)
-                               buf_index = i;
-       if (buf_index == -1) {
-               DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
-               return -EINVAL;
-       }
-       /*
-        * Short-circuit relocations that were correctly
-        * guessed by the client
-        */
-       if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
-               return 0;
-       new_cmd_offset = reloc[0];
-       if (!relocatee->data_page ||
-           !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
-               drm_bo_kunmap(&relocatee->kmap);
-               relocatee->data_page = NULL;
-               relocatee->offset = new_cmd_offset;
-               
-               if (unlikely(!relocatee->idle)) {
-                       ret = drm_bo_wait(relocatee->buf, 0, 0, 0);
-                       if (ret)
-                               return ret;
-                       relocatee->idle = 1;
-               }
-               ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
-                                 1, &relocatee->kmap);
-               if (ret) {
-                       DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
-                       return ret;
-               }
-               relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
-                                                      &relocatee->is_iomem);
-               relocatee->page_offset = (relocatee->offset & PAGE_MASK);
-               
-               if (!relocatee->evicted && 
-                   relocatee->buf->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
-                   drm_bo_evict_cached(relocatee->buf);
-                   relocatee->evicted = 1;
-               }
-       }
-       val = buffers[buf_index].buffer->offset;
-       index = (reloc[0] - relocatee->page_offset) >> 2;
-       /* add in validate */
-       val = val + reloc[1];
-       if (DRM_DEBUG_RELOCATION) {
-               if (buffers[buf_index].presumed_offset_correct &&
-                   relocatee->data_page[index] != val) {
-                       DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
-                                  reloc[0], reloc[1], buf_index, relocatee->data_page[index], val);
-               }
-       }
-       if (relocatee->is_iomem)
-               iowrite32(val, relocatee->data_page + index);
-       else
-               relocatee->data_page[index] = val;
-       return 0;
- }
- int i915_process_relocs(struct drm_file *file_priv,
-                       uint32_t buf_handle,
-                       uint32_t __user **reloc_user_ptr,
-                       struct i915_relocatee_info *relocatee,
-                       struct drm_i915_validate_buffer *buffers,
-                       uint32_t num_buffers)
- {
-       int ret, reloc_stride;
-       uint32_t cur_offset;
-       uint32_t reloc_count;
-       uint32_t reloc_type;
-       uint32_t reloc_buf_size;
-       uint32_t *reloc_buf = NULL;
-       int i;
-       /* do a copy from user from the user ptr */
-       ret = get_user(reloc_count, *reloc_user_ptr);
-       if (ret) {
-               DRM_ERROR("Could not map relocation buffer.\n");
-               goto out;
-       }
-       ret = get_user(reloc_type, (*reloc_user_ptr)+1);
-       if (ret) {
-               DRM_ERROR("Could not map relocation buffer.\n");
-               goto out;
-       }
-       if (reloc_type != 0) {
-               DRM_ERROR("Unsupported relocation type requested\n");
-               ret = -EINVAL;
-               goto out;
-       }
-       reloc_buf_size = (I915_RELOC_HEADER + (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
-       reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
-       if (!reloc_buf) {
-               DRM_ERROR("Out of memory for reloc buffer\n");
-               ret = -ENOMEM;
-               goto out;
-       }
-       if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-       /* get next relocate buffer handle */
-       *reloc_user_ptr = (uint32_t *)*(unsigned long *)&reloc_buf[2];
-       reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
-       DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, *reloc_user_ptr);
-       for (i = 0; i < reloc_count; i++) {
-               cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
-                 
-               ret = i915_apply_reloc(file_priv, num_buffers, buffers,
-                                      relocatee, reloc_buf + cur_offset);
-               if (ret)
-                       goto out;
-       }
- out:
-       if (reloc_buf)
-               kfree(reloc_buf);
-       if (relocatee->data_page) {             
-               drm_bo_kunmap(&relocatee->kmap);
-               relocatee->data_page = NULL;
-       }
-       return ret;
- }
- static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
-                          uint32_t __user *reloc_user_ptr,
-                          struct drm_i915_validate_buffer *buffers,
-                          uint32_t buf_count)
- {
-       struct drm_device *dev = file_priv->minor->dev;
-       struct i915_relocatee_info relocatee;
-       int ret = 0;
-       int b;
-       /*
-        * Short circuit relocations when all previous
-        * buffers offsets were correctly guessed by
-        * the client
-        */
-       if (!DRM_DEBUG_RELOCATION) {
-               for (b = 0; b < buf_count; b++)
-                       if (!buffers[b].presumed_offset_correct)
-                               break;
-       
-               if (b == buf_count)
-                       return 0;
-       }
-       memset(&relocatee, 0, sizeof(relocatee));
-       mutex_lock(&dev->struct_mutex);
-       relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!relocatee.buf) {
-               DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
-               ret = -EINVAL;
-               goto out_err;
-       }
-       mutex_lock (&relocatee.buf->mutex);
-       while (reloc_user_ptr) {
-               ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count);
-               if (ret) {
-                       DRM_ERROR("process relocs failed\n");
-                       goto out_err1;
-               }
-       }
- out_err1:
-       mutex_unlock (&relocatee.buf->mutex);
-       drm_bo_usage_deref_unlocked(&relocatee.buf);
- out_err:
-       return ret;
- }
- static int i915_check_presumed(struct drm_i915_op_arg *arg,
-                              struct drm_buffer_object *bo,
-                              uint32_t __user *data,
-                              int *presumed_ok)
- {
-       struct drm_bo_op_req *req = &arg->d.req;
-       uint32_t hint_offset;
-       uint32_t hint = req->bo_req.hint;
-       *presumed_ok = 0;
-       if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
-               return 0;
-       if (bo->offset == req->bo_req.presumed_offset) {
-               *presumed_ok = 1;
-               return 0;
-       }
-       /*
-        * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
-        * the user-space IOCTL argument list, since the buffer has moved,
-        * we're about to apply relocations and we might subsequently
-        * hit an -EAGAIN. In that case the argument list will be reused by
-        * user-space, but the presumed offset is no longer valid.
-        *
-        * Needless to say, this is a bit ugly.
-        */
-               hint_offset = (uint32_t *)&req->bo_req.hint - (uint32_t *)arg;
-       hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
-       return __put_user(hint, data + hint_offset);
- }
- /*
-  * Validate, add fence and relocate a block of bos from a userspace list
-  */
- int i915_validate_buffer_list(struct drm_file *file_priv,
-                             unsigned int fence_class, uint64_t data,
-                             struct drm_i915_validate_buffer *buffers,
-                             uint32_t *num_buffers)
- {
-       struct drm_i915_op_arg arg;
-       struct drm_bo_op_req *req = &arg.d.req;
-       int ret = 0;
-       unsigned buf_count = 0;
-       uint32_t buf_handle;
-       uint32_t __user *reloc_user_ptr;
-       struct drm_i915_validate_buffer *item = buffers;
-       do {
-               if (buf_count >= *num_buffers) {
-                       DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
-                       ret = -EINVAL;
-                       goto out_err;
-               }
-               item = buffers + buf_count;
-               item->buffer = NULL;
-               item->presumed_offset_correct = 0;
-               buffers[buf_count].buffer = NULL;
-               if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
-                       ret = -EFAULT;
-                       goto out_err;
-               }
-               ret = 0;
-               if (req->op != drm_bo_validate) {
-                       DRM_ERROR
-                           ("Buffer object operation wasn't \"validate\".\n");
-                       ret = -EINVAL;
-                       goto out_err;
-               }
-               item->ret = 0;
-               item->data = (void __user *) (unsigned long) data;
-               buf_handle = req->bo_req.handle;
-               reloc_user_ptr = (uint32_t *)(unsigned long)arg.reloc_ptr;
-               if (reloc_user_ptr) {
-                       ret = i915_exec_reloc(file_priv, buf_handle, reloc_user_ptr, buffers, buf_count);
-                       if (ret)
-                               goto out_err;
-                       DRM_MEMORYBARRIER();
-               }
-               ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
-                                            req->bo_req.flags, req->bo_req.mask,
-                                            req->bo_req.hint,
-                                            req->bo_req.fence_class, 0,
-                                            &item->rep,
-                                            &item->buffer);
-               if (ret) {
-                       DRM_ERROR("error on handle validate %d\n", ret);
-                       goto out_err;
-               }
-               buf_count++;
-               ret = i915_check_presumed(&arg, item->buffer,
-                                         (uint32_t __user *)
-                                         (unsigned long) data,
-                                         &item->presumed_offset_correct);
-               if (ret)
-                       goto out_err;
-               data = arg.next;
-       } while (data != 0);
- out_err:
-       *num_buffers = buf_count;
-       item->ret = (ret != -EAGAIN) ? ret : 0;
-       return ret;
- }
- /*
-  * Remove all buffers from the unfenced list.
-  * If the execbuffer operation was aborted, for example due to a signal,
-  * this also make sure that buffers retain their original state and
-  * fence pointers.
-  * Copy back buffer information to user-space unless we were interrupted
-  * by a signal. In which case the IOCTL must be rerun.
-  */
- static int i915_handle_copyback(struct drm_device *dev,
-                               struct drm_i915_validate_buffer *buffers,
-                               unsigned int num_buffers, int ret)
- {
-       int err = ret;
-       int i;
-       struct drm_i915_op_arg arg;
-       if (ret)
-               drm_putback_buffer_objects(dev);
-       if (ret != -EAGAIN) {
-               for (i = 0; i < num_buffers; ++i) {
-                       arg.handled = 1;
-                       arg.d.rep.ret = buffers->ret;
-                       arg.d.rep.bo_info = buffers->rep;
-                       if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
-                               err = -EFAULT;
-                       buffers++;
-               }
-       }
-       return err;
- }
- /*
-  * Create a fence object, and if that fails, pretend that everything is
-  * OK and just idle the GPU.
-  */
- void i915_fence_or_sync(struct drm_file *file_priv,
-                       uint32_t fence_flags,
-                       struct drm_fence_arg *fence_arg,
-                       struct drm_fence_object **fence_p)
- {
-       struct drm_device *dev = file_priv->minor->dev;
-       int ret;
-       struct drm_fence_object *fence;
-       ret = drm_fence_buffer_objects(dev, NULL, fence_flags,
-                        NULL, &fence);
-       if (ret) {
-               /*
-                * Fence creation failed.
-                * Fall back to synchronous operation and idle the engine.
-                */
-               (void) i915_emit_mi_flush(dev, MI_READ_FLUSH);
-               (void) i915_quiescent(dev);
-               if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-                       /*
-                        * Communicate to user-space that
-                        * fence creation has failed and that
-                        * the engine is idle.
-                        */
-                       fence_arg->handle = ~0;
-                       fence_arg->error = ret;
-               }
-               drm_putback_buffer_objects(dev);
-               if (fence_p)
-                   *fence_p = NULL;
-               return;
-       }
-       if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-               ret = drm_fence_add_user_object(file_priv, fence,
-                                               fence_flags &
-                                               DRM_FENCE_FLAG_SHAREABLE);
-               if (!ret)
-                       drm_fence_fill_arg(fence, fence_arg);
-               else {
-                       /*
-                        * Fence user object creation failed.
-                        * We must idle the engine here as well, as user-
-                        * space expects a fence object to wait on. Since we
-                        * have a fence object we wait for it to signal
-                        * to indicate engine "sufficiently" idle.
-                        */
-                       (void) drm_fence_object_wait(fence, 0, 1,
-                                                    fence->type);
-                       drm_fence_usage_deref_unlocked(&fence);
-                       fence_arg->handle = ~0;
-                       fence_arg->error = ret;
-               }
-       }
-       if (fence_p)
-               *fence_p = fence;
-       else if (fence)
-               drm_fence_usage_deref_unlocked(&fence);
- }
- static int i915_execbuffer(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
- {
-       struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
-       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-               master_priv->sarea_priv;
-       struct drm_i915_execbuffer *exec_buf = data;
-       struct drm_i915_batchbuffer *batch = &exec_buf->batch;
-       struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
-       int num_buffers;
-       int ret;
-       if (!dev_priv->allow_batchbuffer) {
-               DRM_ERROR("Batchbuffer ioctl disabled\n");
-               return -EINVAL;
-       }
-       if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
-                                                       batch->num_cliprects *
-                                                       sizeof(struct drm_clip_rect)))
-               return -EFAULT;
-       if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
-               return -EINVAL;
-       ret = drm_bo_read_lock(&dev->bm.bm_lock);
-       if (ret)
-               return ret;
-       /*
-        * The cmdbuf_mutex makes sure the validate-submit-fence
-        * operation is atomic.
-        */
-       ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
-       if (ret) {
-               drm_bo_read_unlock(&dev->bm.bm_lock);
-               return -EAGAIN;
-       }
-       num_buffers = exec_buf->num_buffers;
-       if (!dev_priv->val_bufs) {
-               dev_priv->val_bufs =
-                       vmalloc(sizeof(struct drm_i915_validate_buffer)*
-                               dev_priv->max_validate_buffers);
-       }
-       if (!dev_priv->val_bufs) {
-               drm_bo_read_unlock(&dev->bm.bm_lock);
-               mutex_unlock(&dev_priv->cmdbuf_mutex);
-               return -ENOMEM;
-       }
-       /* validate buffer list + fixup relocations */
-       ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
-                                       dev_priv->val_bufs, &num_buffers);
-       if (ret)
-               goto out_err0;
-       /* make sure all previous memory operations have passed */
-       DRM_MEMORYBARRIER();
-       drm_agp_chipset_flush(dev);
-       /* submit buffer */
-       batch->start = dev_priv->val_bufs[num_buffers-1].buffer->offset;
-       DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
-                 batch->start, batch->used, batch->num_cliprects);
-       ret = i915_dispatch_batchbuffer(dev, batch);
-       if (ret)
-               goto out_err0;
-       if (sarea_priv)
-               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-       i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
- out_err0:
-       /* handle errors */
-       ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
-       mutex_lock(&dev->struct_mutex);
-       i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
-       mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev_priv->cmdbuf_mutex);
-       drm_bo_read_unlock(&dev->bm.bm_lock);
-       return ret;
- }
- #endif
 -static int i915_do_cleanup_pageflip(struct drm_device * dev)
 +int i915_do_cleanup_pageflip(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
 +      struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 +      int i, planes, num_pages;
  
        DRM_DEBUG("\n");
 -
 -      for (i = 0, planes = 0; i < 2; i++)
 -              if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
 -                      dev_priv->sarea_priv->pf_current_page =
 -                              (dev_priv->sarea_priv->pf_current_page &
 +      num_pages = master_priv->sarea_priv->third_handle ? 3 : 2;
 +      for (i = 0, planes = 0; i < 2; i++) {
 +              if (master_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
 +                      master_priv->sarea_priv->pf_current_page =
 +                              (master_priv->sarea_priv->pf_current_page &
                                 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
  
                        planes |= 1 << i;
@@@ -1545,9 -967,12 +987,12 @@@ static int i915_mmio(struct drm_device 
  static int i915_set_status_page(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
  
+       if (!I915_NEED_GFX_HWS(dev))
+               return -EINVAL;
        if (!dev_priv) {
                DRM_ERROR("called with no initialization\n");
                return -EINVAL;
Simple merge
@@@ -285,8 -267,9 +287,11 @@@ extern void i915_emit_breadcrumb(struc
  extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
  extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
  extern int i915_driver_firstopen(struct drm_device *dev);
 +extern int i915_do_cleanup_pageflip(struct drm_device *dev);
 +extern int i915_dma_cleanup(struct drm_device *dev);
+ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
+                                    drm_i915_batchbuffer_t * batch);
+ extern int i915_quiescent(struct drm_device *dev);
  
  /* i915_irq.c */
  extern int i915_irq_emit(struct drm_device *dev, void *data,
@@@ -875,15 -733,12 +885,17 @@@ extern int i915_wait_ring(struct drm_de
  
  #define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
  #define XY_SRC_COPY_BLT_CMD           ((2<<29)|(0x53<<22)|6)
 +#define XY_MONO_SRC_COPY_IMM_BLT      ((2<<29)|(0x71<<22)|5)
  #define XY_SRC_COPY_BLT_WRITE_ALPHA   (1<<21)
  #define XY_SRC_COPY_BLT_WRITE_RGB     (1<<20)
 +#define   BLT_DEPTH_8                 (0<<24)
 +#define   BLT_DEPTH_16_565            (1<<24)
 +#define   BLT_DEPTH_16_1555           (2<<24)
 +#define   BLT_DEPTH_32                        (3<<24)
 +#define   BLT_ROP_GXCOPY              (0xcc<<16)
+ #define XY_SRC_COPY_BLT_SRC_TILED     (1<<15)
+ #define XY_SRC_COPY_BLT_DST_TILED     (1<<11)
  
 -
  #define MI_BATCH_BUFFER               ((0x30<<23)|1)
  #define MI_BATCH_BUFFER_START (0x31<<23)
  #define MI_BATCH_BUFFER_END   (0xA<<23)
@@@ -187,12 -196,6 +198,9 @@@ static void i915_vblank_tasklet(struct 
                if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
                        continue;
  
-               pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
-                       (cpp << 23) | (1 << 24);
 +              master_priv = vbl_swap->minor->master->driver_priv;
 +              sarea_priv = master_priv->sarea_priv;
 +              
                list_del(list);
                dev_priv->swaps_pending--;
                drm_vblank_put(dev, pipe);
Simple merge
index fc359d4,0000000..bda9a84
mode 100644,000000..100644
--- /dev/null
@@@ -1,395 -1,0 +1,404 @@@
-                                    0,
 +/*
 + * Copyright 2007 Jérôme Glisse
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the next
 + * paragraph) shall be included in all copies or substantial portions of the
 + * Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + *
 + * Authors:
 + *    Jerome Glisse <glisse@freedesktop.org>
 + */
 +#include "radeon_ms.h"
 +#include "amd.h"
 +
 +static inline void amd_cmd_bo_cleanup(struct drm_device *dev,
 +                                    struct amd_cmd *cmd)
 +{
 +      struct amd_cmd_bo *bo;
 +
 +      mutex_lock(&dev->struct_mutex);
 +      list_for_each_entry(bo, &cmd->bo_unused.list, list) {
 +              drm_bo_usage_deref_locked(&bo->bo);
 +      }
 +      list_for_each_entry(bo, &cmd->bo_used.list, list) {
 +              drm_bo_usage_deref_locked(&bo->bo);
 +      }
 +      mutex_unlock(&dev->struct_mutex);
 +}
 +
 +static inline int amd_cmd_bo_validate(struct drm_device *dev,
 +                                    struct drm_file *file,
 +                                    struct amd_cmd_bo *cmd_bo,
 +                                    struct drm_amd_cmd_bo *bo,
 +                                    uint64_t data)
 +{
 +      int ret;
 +
 +      /* validate only cmd indirect or data bo */
 +      switch (bo->type) {
 +      case DRM_AMD_CMD_BO_TYPE_CMD_INDIRECT:
 +      case DRM_AMD_CMD_BO_TYPE_DATA:
 +      case DRM_AMD_CMD_BO_TYPE_CMD_RING:
 +              /* FIXME: make sure userspace can no longer map the bo */
 +              break;
 +      default:
 +              return 0;
 +      }
 +      /* check that buffer operation is validate */
 +      if (bo->op_req.op != drm_bo_validate) {
 +              DRM_ERROR("buffer 0x%x object operation is not validate.\n",
 +                        cmd_bo->handle);
 +              return -EINVAL;
 +      }
 +      /* validate buffer */
 +      memset(&bo->op_rep, 0, sizeof(struct drm_bo_arg_rep));
 +      ret = drm_bo_handle_validate(file,
 +                                   bo->op_req.bo_req.handle,
 +                                   bo->op_req.bo_req.flags,
 +                                   bo->op_req.bo_req.mask,
 +                                   bo->op_req.bo_req.hint,
 +                                   bo->op_req.bo_req.fence_class,
-       /* FIXME: Lock buffer manager, is this really needed ? */
-       ret = drm_bo_read_lock(&dev->bm.bm_lock);
 +                                   &bo->op_rep.bo_info,
 +                                   &cmd_bo->bo);
 +      if (ret) {
 +              DRM_ERROR("validate error %d for 0x%08x\n",
 +                        ret, cmd_bo->handle);
 +              return ret;
 +      }
 +      if (copy_to_user((void __user *)((unsigned)data), bo,
 +                       sizeof(struct drm_amd_cmd_bo))) {
 +              DRM_ERROR("failed to copy to user validate result of 0x%08x\n",
 +                        cmd_bo->handle);
 +              return -EFAULT;
 +      }
 +      return 0;
 +}
 +
 +static int amd_cmd_parse_cmd_bo(struct drm_device *dev,
 +                              struct drm_file *file,
 +                              struct drm_amd_cmd *drm_amd_cmd,
 +                              struct amd_cmd *cmd)
 +{
 +      struct drm_amd_cmd_bo drm_amd_cmd_bo;
 +      struct amd_cmd_bo *cmd_bo;
 +      uint32_t bo_count = 0;
 +      uint64_t data = drm_amd_cmd->bo;
 +      int ret = 0;
 +
 +      do {
 +              /* check we don't have more buffer than announced */
 +              if (bo_count >= drm_amd_cmd->bo_count) {
 +                      DRM_ERROR("cmd bo count exceeded got %d waited %d\n.",
 +                                bo_count, drm_amd_cmd->bo_count);
 +                      return -EINVAL;
 +              }
 +              /* initialize amd_cmd_bo */
 +              cmd_bo = &cmd->bo[bo_count];
 +              INIT_LIST_HEAD(&cmd_bo->list);
 +              cmd_bo->bo = NULL;
 +              /* copy from userspace */
 +              if (copy_from_user(&drm_amd_cmd_bo,
 +                                 (void __user *)((unsigned)data),
 +                                 sizeof(struct drm_amd_cmd_bo))) {
 +                      return -EFAULT;
 +              }
 +              /* collect informations */
 +              cmd_bo->type = drm_amd_cmd_bo.type;
 +              cmd_bo->mask = drm_amd_cmd_bo.op_req.bo_req.mask;
 +              cmd_bo->flags = drm_amd_cmd_bo.op_req.bo_req.flags;
 +              cmd_bo->handle = drm_amd_cmd_bo.op_req.arg_handle;
 +              /* get bo objects */
 +              mutex_lock(&dev->struct_mutex);
 +              cmd_bo->bo = drm_lookup_buffer_object(file, cmd_bo->handle, 1);
 +              mutex_unlock(&dev->struct_mutex);
 +              if (cmd_bo->bo == NULL) {
 +                      DRM_ERROR("unknown bo handle 0x%x\n", cmd_bo->handle);
 +                      return -EINVAL;
 +              }
 +              /* validate buffer if necessary */
 +              ret = amd_cmd_bo_validate(dev, file, cmd_bo,
 +                                        &drm_amd_cmd_bo, data);
 +              if (ret) {
 +                      mutex_lock(&dev->struct_mutex);
 +                      drm_bo_usage_deref_locked(&cmd_bo->bo);
 +                      mutex_unlock(&dev->struct_mutex);
 +                      return ret;
 +              }
 +              /* inspect bo type */
 +              switch (cmd_bo->type) {
 +              case DRM_AMD_CMD_BO_TYPE_CMD_INDIRECT:
 +                      /* add it so we properly unreference in case of error */
 +                      list_add_tail(&cmd_bo->list, &cmd->bo_used.list);
 +                      return -EINVAL;
 +              case DRM_AMD_CMD_BO_TYPE_DATA:
 +                      /* add to unused list */
 +                      list_add_tail(&cmd_bo->list, &cmd->bo_unused.list);
 +                      break;
 +              case DRM_AMD_CMD_BO_TYPE_CMD_RING:
 +                      /* set cdw_bo */
 +                      list_add_tail(&cmd_bo->list, &cmd->bo_used.list);
 +                      cmd->cdw_bo = cmd_bo;
 +                      break;
 +              default:
 +                      mutex_lock(&dev->struct_mutex);
 +                      drm_bo_usage_deref_locked(&cmd_bo->bo);
 +                      mutex_unlock(&dev->struct_mutex);
 +                      DRM_ERROR("unknow bo 0x%x unknown type 0x%x in cmd\n",
 +                                cmd_bo->handle, cmd_bo->type);
 +                      return -EINVAL;
 +              }
 +              /* ok next bo */
 +              data = drm_amd_cmd_bo.next;
 +              bo_count++;
 +      } while (data != 0);
 +      if (bo_count != drm_amd_cmd->bo_count) {
 +              DRM_ERROR("not enought buffer got %d expected %d\n.",
 +                        bo_count, drm_amd_cmd->bo_count);
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +static int amd_cmd_packet0_check(struct drm_device *dev,
 +                               struct amd_cmd *cmd,
 +                               int *cdw_id)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +      uint32_t reg, count, r, i;
 +      int ret;
 +
 +      reg = cmd->cdw[*cdw_id] & PACKET0_REG_MASK;
 +      count = (cmd->cdw[*cdw_id] & PACKET0_COUNT_MASK) >> PACKET0_COUNT_SHIFT;
 +      if (reg + count > dev_priv->cmd_module.numof_p0_checkers) {
 +              DRM_ERROR("0x%08X registers is above last accepted registers\n",
 +                        reg << 2);
 +              return -EINVAL;
 +      }
 +      for (r = reg, i = 0; i <= count; i++, r++) {
 +              if (dev_priv->cmd_module.check_p0[r] == NULL) {
 +                      continue;
 +              }
 +              if (dev_priv->cmd_module.check_p0[r] == (void *)-1) {
 +                      DRM_ERROR("register 0x%08X (at %d) is forbidden\n",
 +                               r << 2, (*cdw_id) + i + 1);
 +                      return -EINVAL;
 +              }
 +              ret = dev_priv->cmd_module.check_p0[r](dev, cmd,
 +                                                     (*cdw_id) + i + 1, r);
 +              if (ret) {
 +                      return ret;
 +              }
 +      }
 +      /* header + N + 1 dword passed test */
 +      (*cdw_id) += count + 2;
 +      return 0;
 +}
 +
 +static int amd_cmd_packet3_check(struct drm_device *dev,
 +                               struct amd_cmd *cmd,
 +                               int *cdw_id)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +      uint32_t opcode, count;
 +      int ret;
 +
 +      opcode = (cmd->cdw[*cdw_id] & PACKET3_OPCODE_MASK) >>
 +               PACKET3_OPCODE_SHIFT;
 +      if (opcode > dev_priv->cmd_module.numof_p3_checkers) {
 +              DRM_ERROR("0x%08X opcode is above last accepted opcodes\n",
 +                        opcode);
 +              return -EINVAL;
 +      }
 +      count = (cmd->cdw[*cdw_id] & PACKET3_COUNT_MASK) >> PACKET3_COUNT_SHIFT;
 +      if (dev_priv->cmd_module.check_p3[opcode] == NULL) {
 +              DRM_ERROR("0x%08X opcode is forbidden\n", opcode);
 +              return -EINVAL;
 +      }
 +      ret = dev_priv->cmd_module.check_p3[opcode](dev, cmd,
 +                                                  (*cdw_id) + 1, opcode,
 +                                                  count);
 +      if (ret) {
 +              return ret;
 +      }
 +      /* header + N + 1 dword passed test */
 +      (*cdw_id) += count + 2;
 +      return 0;
 +}
 +
 +int amd_cmd_check(struct drm_device *dev, struct amd_cmd *cmd)
 +{
 +      uint32_t i;
 +      int ret;
 +
 +      for (i = 0; i < cmd->cdw_count;) {
 +              switch (PACKET_HEADER_GET(cmd->cdw[i])) {
 +              case 0:
 +                      ret = amd_cmd_packet0_check(dev, cmd, &i);
 +                      if (ret) {
 +                              return ret;
 +                      }
 +                      break;
 +              case 1:
 +                      /* we don't accept packet 1 */
 +                      return -EINVAL;
 +              case 2:
 +                      /* FIXME: accept packet 2 */
 +                      return -EINVAL;
 +              case 3:
 +                      ret = amd_cmd_packet3_check(dev, cmd, &i);
 +                      if (ret) {
 +                              return ret;
 +                      }
 +                      break;
 +              }
 +      }
 +      return 0;
 +}
 +
 +static int amd_ioctl_cmd_cleanup(struct drm_device *dev,
 +                               struct drm_file *file,
 +                               struct amd_cmd *cmd,
 +                               int r)
 +{
 +      /* check if we need to unfence object */
 +      if (r && (!list_empty(&cmd->bo_unused.list) ||
 +                !list_empty(&cmd->bo_unused.list))) {
 +              drm_putback_buffer_objects(dev);                
 +      }
 +      if (cmd->cdw) {
 +              drm_bo_kunmap(&cmd->cdw_kmap);
 +              cmd->cdw = NULL;
 +      }
 +      /* derefence buffer as lookup reference them */
 +      amd_cmd_bo_cleanup(dev, cmd);
 +      if (cmd->bo) {
 +              drm_free(cmd->bo,
 +                       cmd->bo_count * sizeof(struct amd_cmd_bo),
 +                       DRM_MEM_DRIVER);
 +              cmd->bo = NULL;
 +      }
 +      drm_bo_read_unlock(&dev->bm.bm_lock);
 +      return r;
 +}
 +
 +int amd_ioctl_cmd(struct drm_device *dev, void *data, struct drm_file *file)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +      struct drm_amd_cmd *drm_amd_cmd = data;
 +      struct drm_fence_arg *fence_arg = &drm_amd_cmd->fence_arg;
 +      struct drm_fence_object *fence;
 +      struct amd_cmd cmd;
 +      int tmp;
 +      int ret;
 +
 +      /* check that we have a command checker */
 +      if (dev_priv->cmd_module.check == NULL) {
 +              DRM_ERROR("invalid command checker module.\n");
 +              return -EFAULT;
 +      }
 +      /* command dword count must be >= 0 */
 +      if (drm_amd_cmd->cdw_count == 0) {
 +              DRM_ERROR("command dword count is 0.\n");
 +              return -EINVAL;
 +      }
++
++      /* FIXME: Lock buffer manager. This is needed so the X server can
++       * block DRI clients while VT switched. The X server will then 
++       * take the lock in write mode
++       */
++
++      ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
 +      if (ret) {
++
++              /* FIXME: ret can be -EAGAIN here, 
++               * which really isn't an error. 
++               */
++
 +              DRM_ERROR("bo read locking failed.\n");
 +              return ret;
 +      }
 +      /* cleanup & initialize amd cmd structure */
 +      memset(&cmd, 0, sizeof(struct amd_cmd));
 +      cmd.bo_count = drm_amd_cmd->bo_count;
 +      INIT_LIST_HEAD(&cmd.bo_unused.list);
 +      INIT_LIST_HEAD(&cmd.bo_used.list);
 +      /* allocate structure for bo parsing */
 +      cmd.bo = drm_calloc(cmd.bo_count, sizeof(struct amd_cmd_bo),
 +                          DRM_MEM_DRIVER);
 +      if (cmd.bo == NULL) {
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, -ENOMEM);
 +        }
 +      /* parse cmd bo */
 +      ret = amd_cmd_parse_cmd_bo(dev, file, drm_amd_cmd, &cmd);
 +      if (ret) {
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
 +      }
 +      /* check that a command buffer have been found */
 +      if (cmd.cdw_bo == NULL) {
 +              DRM_ERROR("no command buffer submited in cmd ioctl\n");
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, -EINVAL);
 +      }
 +      /* map command buffer */
 +      cmd.cdw_count = drm_amd_cmd->cdw_count;
 +      cmd.cdw_size = (cmd.cdw_bo->bo->mem.num_pages * PAGE_SIZE) >> 2;
 +      if (cmd.cdw_size < cmd.cdw_count) {
 +              DRM_ERROR("command buffer (%d) is smaller than expected (%d)\n",
 +                        cmd.cdw_size, cmd.cdw_count);
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, -EINVAL);
 +      }
 +      memset(&cmd.cdw_kmap, 0, sizeof(struct drm_bo_kmap_obj));
 +      ret = drm_bo_kmap(cmd.cdw_bo->bo, 0,
 +                        cmd.cdw_bo->bo->mem.num_pages, &cmd.cdw_kmap);
 +      if (ret) {
 +              DRM_ERROR("error mapping command buffer\n");
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
 +      }
 +      cmd.cdw = drm_bmo_virtual(&cmd.cdw_kmap, &tmp);
 +      /* do command checking */
 +      ret = dev_priv->cmd_module.check(dev, &cmd);
 +      if (ret) {
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
 +      }
 +      /* copy command to ring */
 +      ret = radeon_ms_ring_emit(dev, cmd.cdw, cmd.cdw_count);
 +      if (ret) {
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
 +      }
 +      /* fence */
 +      ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
 +      if (ret) {
 +              return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
 +      }
 +      if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
 +              ret = drm_fence_add_user_object(file, fence,
 +                                              fence_arg->flags &
 +                                              DRM_FENCE_FLAG_SHAREABLE);
 +              if (!ret) {
 +                      fence_arg->handle = fence->base.hash.key;
 +                      fence_arg->fence_class = fence->fence_class;
 +                      fence_arg->type = fence->type;
 +                      fence_arg->signaled = fence->signaled_types;
 +                      fence_arg->sequence = fence->sequence;
 +              } else {
 +                      DRM_ERROR("error add object fence, expect oddity !\n");
 +              }
 +      }
 +      drm_fence_usage_deref_unlocked(&fence);
 +      return amd_ioctl_cmd_cleanup(dev, file, &cmd, 0);
 +}