OSDN Git Service

Merge branch 'radeon-gem-cs' into modesetting-gem
authorDave Airlie <airlied@redhat.com>
Wed, 13 Aug 2008 23:36:34 +0000 (09:36 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 13 Aug 2008 23:36:34 +0000 (09:36 +1000)
Conflicts:

libdrm/xf86drm.c
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_compat.h
linux-core/drm_drv.c
linux-core/drm_stub.c
linux-core/drm_vm.c
shared-core/i915_dma.c
shared-core/r300_cmdbuf.c
shared-core/radeon_drv.h

20 files changed:
1  2 
libdrm/intel/intel_bufmgr_fake.c
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_bo.c
linux-core/drm_drv.c
linux-core/drm_memory.c
linux-core/drm_objects.h
linux-core/drm_stub.c
linux-core/drm_vm.c
linux-core/radeon_buffer.c
linux-core/radeon_reg.h
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drm.h
shared-core/i915_init.c
shared-core/r300_cmdbuf.c
shared-core/r300_reg.h
shared-core/radeon_cp.c
shared-core/radeon_cs.c
shared-core/radeon_drv.h

@@@ -42,9 -42,6 +42,9 @@@
  #include "drm.h"
  #include "i915_drm.h"
  #include "mm.h"
 +#include "libdrm_lists.h"
 +
 +#define ALIGN(value, alignment)  ((value + alignment - 1) & ~(alignment - 1))
  
  #define DBG(...) do {                                 \
     if (bufmgr_fake->bufmgr.debug)                     \
@@@ -150,6 -147,9 +150,6 @@@ typedef struct _bufmgr_fake 
     int debug;
  
     int performed_rendering;
 -
 -   /* keep track of the current total size of objects we have relocs for */
 -   unsigned long current_total_size;
  } dri_bufmgr_fake;
  
  typedef struct _dri_bo_fake {
     const char *name;
  
     unsigned dirty:1;
 -   unsigned size_accounted:1; /*this buffers size has been accounted against the aperture */
 -   unsigned card_dirty:1; /* has the card written to this buffer - we make need to copy it back */
 +   /** has the card written to this buffer - we make need to copy it back */
 +   unsigned card_dirty:1;
     unsigned int refcount;
     /* Flags may consist of any of the DRM_BO flags, plus
      * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
     /** relocation list */
     struct fake_buffer_reloc *relocs;
     int nr_relocs;
 +   /**
 +    * Total size of the target_bos of this buffer.
 +    *
 +    * Used for estimation in check_aperture.
 +    */
 +   unsigned int child_size;
  
     struct block *block;
     void *backing_store;
  static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
                        unsigned int fence_cookie);
  
 -static int dri_fake_check_aperture_space(dri_bo *bo);
 -
  #define MAXFENCE 0x7fffffff
  
  static int FENCE_LTE( unsigned a, unsigned b )
@@@ -653,7 -649,7 +653,7 @@@ intel_bo_fake_alloc_static(dri_bufmgr *
     bo_fake->refcount = 1;
     bo_fake->id = ++bufmgr_fake->buf_nr;
     bo_fake->name = name;
-    bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
+    bo_fake->flags = BM_PINNED;
     bo_fake->is_static = 1;
  
     DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
@@@ -859,6 -855,9 +859,6 @@@ dri_fake_bo_validate(dri_bo *bo
        return 0;
     }
  
 -   /* reset size accounted */
 -   bo_fake->size_accounted = 0;
 -
     /* Allocate the card memory */
     if (!bo_fake->block && !evict_and_alloc_block(bo)) {
        bufmgr_fake->fail = 1;
@@@ -942,6 -941,8 +942,6 @@@ dri_fake_emit_reloc(dri_bo *reloc_buf
     assert(reloc_buf);
     assert(target_buf);
  
 -   assert(target_fake->is_static || target_fake->size_accounted);
 -
     if (reloc_fake->relocs == NULL) {
        reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
                                  MAX_RELOCS);
  
     dri_bo_reference(target_buf);
  
 +   if (!target_fake->is_static)
 +      reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment);
 +
     r->target_buf = target_buf;
     r->offset = offset;
     r->last_target_offset = target_buf->offset;
@@@ -1081,6 -1079,7 +1081,6 @@@ dri_fake_process_relocs(dri_bo *batch_b
  
     assert(ret == 0);
  
 -   bufmgr_fake->current_total_size = 0;
     return NULL;
  }
  
@@@ -1118,39 -1117,26 +1118,39 @@@ dri_fake_post_submit(dri_bo *batch_buf
     dri_bo_fake_post_submit(batch_buf);
  }
  
 +/**
 + * Return an error if the list of BOs will exceed the aperture size.
 + *
 + * This is a rough guess and likely to fail, as during the validate sequence we
 + * may place a buffer in an inopportune spot early on and then fail to fit
 + * a set smaller than the aperture.
 + */
  static int
 -dri_fake_check_aperture_space(dri_bo *bo)
 +dri_fake_check_aperture_space(dri_bo **bo_array, int count)
  {
 -   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
 -   dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
 -   unsigned int sz;
 +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo_array[0]->bufmgr;
 +   unsigned int sz = 0;
 +   int i;
  
 -   sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
 +   for (i = 0; i < count; i++) {
 +      dri_bo_fake *bo_fake = (dri_bo_fake *)bo_array[i];
  
 -   if (bo_fake->size_accounted || bo_fake->is_static)
 -      return 0;
 +      if (bo_fake == NULL)
 +       continue;
 +
 +      if (!bo_fake->is_static)
 +       sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
 +      sz += bo_fake->child_size;
 +   }
  
 -   if (bufmgr_fake->current_total_size + sz > bufmgr_fake->size) {
 -     DBG("check_space: %s bo %d %d overflowed bufmgr size %d\n", bo_fake->name, bo_fake->id, sz, bufmgr_fake->size);
 +   if (sz > bufmgr_fake->size) {
 +      DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n",
 +        sz / 1024, bufmgr_fake->size / 1024);
        return -1;
     }
  
 -   bufmgr_fake->current_total_size += sz;
 -   bo_fake->size_accounted = 1;
 -   DBG("drm_check_space: buf %d, %s %d %d\n", bo_fake->id, bo_fake->name, bo->size, bufmgr_fake->current_total_size);
 +   DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024 ,
 +       bufmgr_fake->size / 1024);
     return 0;
  }
  
@@@ -12,21 -12,18 +12,20 @@@ drm-objs    := drm_auth.o drm_bufs.o dr
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
                drm_memory_debug.o ati_pcigart.o drm_sman.o \
-               drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
-               drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
 -              drm_hashtab.o drm_memrange.o drm_compat.o \
++              drm_hashtab.o drm_mm.o drm_compat.o \
+               drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
                drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
                drm_regman.o drm_vm_nopage_compat.o drm_gem.o
  tdfx-objs   := tdfx_drv.o
  r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
  mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
  i810-objs   := i810_drv.o i810_dma.o
- i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
-               i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
-               i915_opregion.o \
 -i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_gem.o \
++i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
++              i915_compat.o i915_suspend.o i915_opregion.o \
 +              i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \
                intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
                intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
 -              intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \
 +              intel_tv.o intel_dvo.o dvo_ch7xxx.o \
                dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
  nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
                nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
@@@ -43,8 -40,9 +42,9 @@@
                nv50_kms_wrapper.o \
                nv50_fbcon.o
  radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_gem.o \
-        radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o \
-       atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o
+        radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o radeon_cs.o \
+       atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o radeon_legacy_crtc.o radeon_legacy_encoders.o \
+       radeon_cursor.o
  sis-objs    := sis_drv.o sis_mm.o
  ffb-objs    := ffb_drv.o ffb_context.o
  savage-objs := savage_drv.o savage_bci.o savage_state.o
diff --combined linux-core/drmP.h
@@@ -166,7 -166,6 +166,6 @@@ typedef unsigned long uintptr_t
  #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
  #define DRM_MAP_HASH_OFFSET 0x10000000
  #define DRM_MAP_HASH_ORDER 12
- #define DRM_OBJECT_HASH_ORDER 12
  #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  /*
@@@ -405,14 -404,6 +404,6 @@@ struct drm_buf_entry 
        struct drm_freelist freelist;
  };
  
- enum drm_ref_type {
-       _DRM_REF_USE = 0,
-       _DRM_REF_TYPE1,
-       _DRM_NO_REF_TYPES
- };
  /** File private data */
  struct drm_file {
        int authenticated;
        struct drm_minor *minor;
        unsigned long lock_count;
  
-       /*
-        * The user object hash table is global and resides in the
-        * drm_device structure. We protect the lists and hash tables with the
-        * device struct_mutex. A bit coarse-grained but probably the best
-        * option.
-        */
-       struct list_head refd_objects;
        /** Mapping of mm object handles to object pointers. */
        struct idr object_idr;
        /** Lock for synchronization of access to object_idr. */
        spinlock_t table_lock;
  
-       struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
        struct file *filp;
        void *driver_priv;
  
@@@ -480,6 -461,11 +461,6 @@@ struct drm_lock_data 
        uint32_t kernel_waiters;
        uint32_t user_waiters;
        int idle_has_lock;
 -      /**
 -       * Boolean signaling that the lock is held on behalf of the
 -       * file_priv client by the kernel in an ioctl handler.
 -       */
 -      int kernel_held;
  };
  
  /**
@@@ -555,17 -541,17 +536,17 @@@ struct drm_sigdata 
   * Generic memory manager structs
   */
  
 -struct drm_memrange_node {
 +struct drm_mm_node {
        struct list_head fl_entry;
        struct list_head ml_entry;
        int free;
        unsigned long start;
        unsigned long size;
 -      struct drm_memrange *mm;
 +      struct drm_mm *mm;
        void *private;
  };
  
 -struct drm_memrange {
 +struct drm_mm {
        struct list_head fl_entry;
        struct list_head ml_entry;
  };
@@@ -581,7 -567,7 +562,7 @@@ struct drm_map_list 
        uint64_t user_token;
        struct drm_master *master; /** if this map is associated with a specific
                                       master */
 -      struct drm_memrange_node *file_offset_node;
 +      struct drm_mm_node *file_offset_node;
  };
  
  typedef struct drm_map drm_local_map_t;
@@@ -684,7 -670,9 +665,9 @@@ struct drm_gem_object 
  
  /* per-master structure */
  struct drm_master {
-       
+       struct kref refcount; /* refcount for this master */
        struct list_head head; /**< each minor contains a list of masters */
        struct drm_minor *minor; /**< link back to minor we are a master for */
  
@@@ -900,8 -888,7 +883,7 @@@ struct drm_device 
        struct list_head maplist;       /**< Linked list of regions */
        int map_count;                  /**< Number of mappable regions */
        struct drm_open_hash map_hash;       /**< User token hash table for maps */
 -      struct drm_memrange offset_manager;  /**< User token manager */
 +      struct drm_mm offset_manager;        /**< User token manager */
-       struct drm_open_hash object_hash;    /**< User token hash table for objects */
        struct address_space *dev_mapping;  /**< For unmap_mapping_range() */
        struct page *ttm_dummy_page;
  
        /** \name VBLANK IRQ support */
        /*@{ */
  
 +      /*
 +       * At load time, disabling the vblank interrupt won't be allowed since
 +       * old clients may not call the modeset ioctl and therefore misbehave.
 +       * Once the modeset ioctl *has* been called though, we can safely
 +       * disable them when unused.
 +       */
 +      int vblank_disable_allowed;
 +
        wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
        atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
        spinlock_t vbl_lock;
        atomic_t *vblank_refcount;      /* number of users of vblank interrupts per crtc */
        u32 *last_vblank;               /* protected by dev->vbl_lock, used */
                                        /* for wraparound handling */
 -      u32 *vblank_offset;             /* used to track how many vblanks */
        int *vblank_enabled;            /* so we don't call enable more than
                                           once per disable */
 -      u32 *vblank_premodeset;         /*  were lost during modeset */
 +      int *vblank_inmodeset;          /* Display driver is setting mode */
        struct timer_list vblank_disable_timer;
  
 -      unsigned long max_vblank_count; /**< size of vblank counter register */
 +      u32 max_vblank_count;           /**< size of vblank counter register */
        spinlock_t tasklet_lock;        /**< For drm_locked_tasklet */
        void (*locked_tasklet_func)(struct drm_device *dev);
  
@@@ -1316,6 -1296,7 +1298,6 @@@ extern int drm_wait_hotplug(struct drm_
  extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
  extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
  extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 -extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
  extern void drm_handle_vblank(struct drm_device *dev, int crtc);
  extern void drm_handle_hotplug(struct drm_device *dev);
  extern int drm_vblank_get(struct drm_device *dev, int crtc);
@@@ -1366,12 -1347,13 +1348,13 @@@ extern int drm_setmaster_ioctl(struct d
                               struct drm_file *file_priv);
  extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
- extern struct drm_master *drm_get_master(struct drm_minor *minor);
- extern void drm_put_master(struct drm_master *master);
+ struct drm_master *drm_master_create(struct drm_minor *minor);
+ extern struct drm_master *drm_master_get(struct drm_master *master);
+ extern void drm_master_put(struct drm_master **master);
  extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
-                    struct drm_driver *driver);
+                      struct drm_driver *driver);
  extern int drm_put_dev(struct drm_device *dev);
- extern int drm_put_minor(struct drm_device *dev, struct drm_minor **p);
+ extern int drm_put_minor(struct drm_minor **minor_p);
  extern unsigned int drm_debug; /* 1 to enable debug output */
  
  extern struct class *drm_class;
@@@ -1417,22 -1399,26 +1400,22 @@@ extern int drm_sysfs_connector_add(stru
  extern void drm_sysfs_connector_remove(struct drm_connector *connector);
  
  /*
 - * Basic memory manager support (drm_memrange.c)
 + * Basic memory manager support (drm_mm.c)
   */
  
 -extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
 -                                                      unsigned long size,
 -                                                      unsigned alignment);
 -extern void drm_memrange_put_block(struct drm_memrange_node *cur);
 -extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
 -                                                        unsigned long size,
 -                                                        unsigned alignment, int best_match);
 -extern int drm_memrange_init(struct drm_memrange *mm,
 -                           unsigned long start, unsigned long size);
 -extern void drm_memrange_takedown(struct drm_memrange *mm);
 -extern int drm_memrange_clean(struct drm_memrange *mm);
 -extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
 -extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
 -                                             unsigned long size);
 -extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
 -                                        unsigned long size);
 -static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
 +extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
 +                                             unsigned alignment);
 +extern void drm_mm_put_block(struct drm_mm_node *cur);
 +extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
 +                                              unsigned alignment, int best_match);
 +extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
 +extern void drm_mm_takedown(struct drm_mm *mm);
 +extern int drm_mm_clean(struct drm_mm *mm);
 +extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
 +extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
 +extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
 +
 +static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
  {
        return block->mm;
  }
@@@ -1502,7 -1488,6 +1485,7 @@@ void drm_gem_open(struct drm_device *de
  void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
  
  extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
 +extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
  extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
  
  static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
diff --combined linux-core/drm_bo.c
@@@ -417,14 -417,14 +417,14 @@@ static void drm_bo_cleanup_refs(struct 
        if (!bo->fence) {
                list_del_init(&bo->lru);
                if (bo->mem.mm_node) {
 -                      drm_memrange_put_block(bo->mem.mm_node);
 +                      drm_mm_put_block(bo->mem.mm_node);
                        if (bo->pinned_node == bo->mem.mm_node)
                                bo->pinned_node = NULL;
                        bo->mem.mm_node = NULL;
                }
                list_del_init(&bo->pinned_lru);
                if (bo->pinned_node) {
 -                      drm_memrange_put_block(bo->pinned_node);
 +                      drm_mm_put_block(bo->pinned_node);
                        bo->pinned_node = NULL;
                }
                list_del_init(&bo->ddestroy);
@@@ -565,18 -565,6 +565,6 @@@ void drm_bo_usage_deref_locked(struct d
  }
  EXPORT_SYMBOL(drm_bo_usage_deref_locked);
  
- static void drm_bo_base_deref_locked(struct drm_file *file_priv,
-                                    struct drm_user_object *uo)
- {
-       struct drm_buffer_object *bo =
-           drm_user_object_entry(uo, struct drm_buffer_object, base);
-       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-       drm_bo_takedown_vm_locked(bo);
-       drm_bo_usage_deref_locked(&bo);
- }
  void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
  {
        struct drm_buffer_object *tmp_bo = *bo;
@@@ -790,7 -778,7 +778,7 @@@ out
        mutex_lock(&dev->struct_mutex);
        if (evict_mem.mm_node) {
                if (evict_mem.mm_node != bo->pinned_node)
 -                      drm_memrange_put_block(evict_mem.mm_node);
 +                      drm_mm_put_block(evict_mem.mm_node);
                evict_mem.mm_node = NULL;
        }
        drm_bo_add_to_lru(bo);
@@@ -809,7 -797,7 +797,7 @@@ static int drm_bo_mem_force_space(struc
                                  struct drm_bo_mem_reg *mem,
                                  uint32_t mem_type, int no_wait)
  {
 -      struct drm_memrange_node *node;
 +      struct drm_mm_node *node;
        struct drm_buffer_manager *bm = &dev->bm;
        struct drm_buffer_object *entry;
        struct drm_mem_type_manager *man = &bm->man[mem_type];
  
        mutex_lock(&dev->struct_mutex);
        do {
 -              node = drm_memrange_search_free(&man->manager, num_pages,
 +              node = drm_mm_search_free(&man->manager, num_pages,
                                          mem->page_alignment, 1);
                if (node)
                        break;
                return -ENOMEM;
        }
  
 -      node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
 +      node = drm_mm_get_block(node, num_pages, mem->page_alignment);
        if (unlikely(!node)) {
                mutex_unlock(&dev->struct_mutex);
                return -ENOMEM;
@@@ -923,7 -911,7 +911,7 @@@ int drm_bo_mem_space(struct drm_buffer_
        int type_found = 0;
        int type_ok = 0;
        int has_eagain = 0;
 -      struct drm_memrange_node *node = NULL;
 +      struct drm_mm_node *node = NULL;
        int ret;
  
        mem->mm_node = NULL;
                mutex_lock(&dev->struct_mutex);
                if (man->has_type && man->use_type) {
                        type_found = 1;
 -                      node = drm_memrange_search_free(&man->manager, mem->num_pages,
 +                      node = drm_mm_search_free(&man->manager, mem->num_pages,
                                                  mem->page_alignment, 1);
                        if (node)
 -                              node = drm_memrange_get_block(node, mem->num_pages,
 +                              node = drm_mm_get_block(node, mem->num_pages,
                                                        mem->page_alignment);
                }
                mutex_unlock(&dev->struct_mutex);
@@@ -1068,40 -1056,12 +1056,12 @@@ static int drm_bo_modify_proposed_flag
  }
  
  /*
-  * Call dev->struct_mutex locked.
-  */
- struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
-                                             uint32_t handle, int check_owner)
- {
-       struct drm_user_object *uo;
-       struct drm_buffer_object *bo;
-       uo = drm_lookup_user_object(file_priv, handle);
-       if (!uo || (uo->type != drm_buffer_type)) {
-               DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
-               return NULL;
-       }
-       if (check_owner && file_priv != uo->owner) {
-               if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
-                       return NULL;
-       }
-       bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
-       atomic_inc(&bo->usage);
-       return bo;
- }
- EXPORT_SYMBOL(drm_lookup_buffer_object);
- /*
   * Call bo->mutex locked.
   * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
   * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
   */
  
static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
+ int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
  {
        struct drm_fence_object *fence = bo->fence;
  
@@@ -1158,149 -1118,6 +1118,6 @@@ static int drm_bo_wait_unmapped(struct 
  }
  
  /*
-  * Fill in the ioctl reply argument with buffer info.
-  * Bo locked.
-  */
- void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
-                        struct drm_bo_info_rep *rep)
- {
-       if (!rep)
-               return;
-       rep->handle = bo->base.hash.key;
-       rep->flags = bo->mem.flags;
-       rep->size = bo->num_pages * PAGE_SIZE;
-       rep->offset = bo->offset;
-       /*
-        * drm_bo_type_device buffers have user-visible
-        * handles which can be used to share across
-        * processes. Hand that back to the application
-        */
-       if (bo->type == drm_bo_type_device)
-               rep->arg_handle = bo->map_list.user_token;
-       else
-               rep->arg_handle = 0;
-       rep->proposed_flags = bo->mem.proposed_flags;
-       rep->buffer_start = bo->buffer_start;
-       rep->fence_flags = bo->fence_type;
-       rep->rep_flags = 0;
-       rep->page_alignment = bo->mem.page_alignment;
-       if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
-               DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
-                               DRM_BO_REP_BUSY);
-       }
- }
- EXPORT_SYMBOL(drm_bo_fill_rep_arg);
- /*
-  * Wait for buffer idle and register that we've mapped the buffer.
-  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
-  * so that if the client dies, the mapping is automatically
-  * unregistered.
-  */
- static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
-                                uint32_t map_flags, unsigned hint,
-                                struct drm_bo_info_rep *rep)
- {
-       struct drm_buffer_object *bo;
-       struct drm_device *dev = file_priv->minor->dev;
-       int ret = 0;
-       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!bo)
-               return -EINVAL;
-       mutex_lock(&bo->mutex);
-       do {
-               bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
-               ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
-               if (unlikely(ret))
-                       goto out;
-               if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
-                       drm_bo_evict_cached(bo);
-       } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
-       atomic_inc(&bo->mapped);
-       mutex_lock(&dev->struct_mutex);
-       ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
-       mutex_unlock(&dev->struct_mutex);
-       if (ret) {
-               if (atomic_dec_and_test(&bo->mapped))
-                       wake_up_all(&bo->event_queue);
-       } else
-               drm_bo_fill_rep_arg(bo, rep);
-  out:
-       mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(&bo);
-       return ret;
- }
- static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
- {
-       struct drm_device *dev = file_priv->minor->dev;
-       struct drm_buffer_object *bo;
-       struct drm_ref_object *ro;
-       int ret = 0;
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       if (!bo) {
-               ret = -EINVAL;
-               goto out;
-       }
-       ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
-       if (!ro) {
-               ret = -EINVAL;
-               goto out;
-       }
-       drm_remove_ref_object(file_priv, ro);
-       drm_bo_usage_deref_locked(&bo);
- out:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
- }
- /*
-  * Call struct-sem locked.
-  */
- static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
-                                        struct drm_user_object *uo,
-                                        enum drm_ref_type action)
- {
-       struct drm_buffer_object *bo =
-           drm_user_object_entry(uo, struct drm_buffer_object, base);
-       /*
-        * We DON'T want to take the bo->lock here, because we want to
-        * hold it when we wait for unmapped buffer.
-        */
-       BUG_ON(action != _DRM_REF_TYPE1);
-       if (atomic_dec_and_test(&bo->mapped))
-               wake_up_all(&bo->event_queue);
- }
- /*
   * bo->mutex locked.
   * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
   */
@@@ -1339,7 -1156,7 +1156,7 @@@ out_unlock
        if (ret || !move_unfenced) {
                if (mem.mm_node) {
                        if (mem.mm_node != bo->pinned_node)
 -                              drm_memrange_put_block(mem.mm_node);
 +                              drm_mm_put_block(mem.mm_node);
                        mem.mm_node = NULL;
                }
                drm_bo_add_to_lru(bo);
@@@ -1431,7 -1248,7 +1248,7 @@@ static int drm_buffer_object_validate(s
  
                if (bo->pinned_node != bo->mem.mm_node) {
                        if (bo->pinned_node != NULL)
 -                              drm_memrange_put_block(bo->pinned_node);
 +                              drm_mm_put_block(bo->pinned_node);
                        bo->pinned_node = bo->mem.mm_node;
                }
  
                mutex_lock(&dev->struct_mutex);
  
                if (bo->pinned_node != bo->mem.mm_node)
 -                      drm_memrange_put_block(bo->pinned_node);
 +                      drm_mm_put_block(bo->pinned_node);
  
                list_del_init(&bo->pinned_lru);
                bo->pinned_node = NULL;
@@@ -1594,8 -1411,7 +1411,7 @@@ static int drm_bo_prepare_for_validate(
  
  int drm_bo_do_validate(struct drm_buffer_object *bo,
                       uint64_t flags, uint64_t mask, uint32_t hint,
-                      uint32_t fence_class,
-                      struct drm_bo_info_rep *rep)
+                      uint32_t fence_class)
  {
        int ret;
        int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
  
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
  out:
-       if (rep)
-               drm_bo_fill_rep_arg(bo, rep);
        mutex_unlock(&bo->mutex);
  
        return ret;
  }
  EXPORT_SYMBOL(drm_bo_do_validate);
  
- /**
-  * drm_bo_handle_validate
-  *
-  * @file_priv: the drm file private, used to get a handle to the user context
-  *
-  * @handle: the buffer object handle
-  *
-  * @flags: access rights, mapping parameters and cacheability. See
-  * the DRM_BO_FLAG_* values in drm.h
-  *
-  * @mask: Which flag values to change; this allows callers to modify
-  * things without knowing the current state of other flags.
-  *
-  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
-  * values in drm.h.
-  *
-  * @fence_class: a driver-specific way of doing fences. Presumably,
-  * this would be used if the driver had more than one submission and
-  * fencing mechanism. At this point, there isn't any use of this
-  * from the user mode code.
-  *
-  * @rep: To be stuffed with the reply from validation
-  *
-  * @bp_rep: To be stuffed with the buffer object pointer
-  *
-  * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
-  * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
-  * This is a convenience wrapper only.
-  */
- int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-                          uint64_t flags, uint64_t mask,
-                          uint32_t hint,
-                          uint32_t fence_class,
-                          struct drm_bo_info_rep *rep,
-                          struct drm_buffer_object **bo_rep)
- {
-       struct drm_device *dev = file_priv->minor->dev;
-       struct drm_buffer_object *bo;
-       int ret;
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!bo)
-               return -EINVAL;
-       if (bo->base.owner != file_priv)
-               mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
-       ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
-       if (!ret && bo_rep)
-               *bo_rep = bo;
-       else
-               drm_bo_usage_deref_unlocked(&bo);
-       return ret;
- }
- EXPORT_SYMBOL(drm_bo_handle_validate);
- static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
-                             struct drm_bo_info_rep *rep)
- {
-       struct drm_device *dev = file_priv->minor->dev;
-       struct drm_buffer_object *bo;
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!bo)
-               return -EINVAL;
-       mutex_lock(&bo->mutex);
-       /*
-        * FIXME: Quick busy here?
-        */
-       drm_bo_busy(bo, 1);
-       drm_bo_fill_rep_arg(bo, rep);
-       mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(&bo);
-       return 0;
- }
- static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
-                             uint32_t hint,
-                             struct drm_bo_info_rep *rep)
- {
-       struct drm_device *dev = file_priv->minor->dev;
-       struct drm_buffer_object *bo;
-       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-       int ret;
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!bo)
-               return -EINVAL;
-       mutex_lock(&bo->mutex);
-       ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
-       if (ret)
-               goto out;
-       drm_bo_fill_rep_arg(bo, rep);
- out:
-       mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(&bo);
-       return ret;
- }
  int drm_buffer_object_create(struct drm_device *dev,
                             unsigned long size,
                             enum drm_bo_type type,
  
        mutex_unlock(&bo->mutex);
        ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
-                                0, NULL);
+                                0);
        if (ret)
                goto out_err_unlocked;
  
@@@ -1837,230 -1533,6 +1533,6 @@@ out_err_unlocked
  }
  EXPORT_SYMBOL(drm_buffer_object_create);
  
- int drm_bo_add_user_object(struct drm_file *file_priv,
-                          struct drm_buffer_object *bo, int shareable)
- {
-       struct drm_device *dev = file_priv->minor->dev;
-       int ret;
-       mutex_lock(&dev->struct_mutex);
-       ret = drm_add_user_object(file_priv, &bo->base, shareable);
-       if (ret)
-               goto out;
-       bo->base.remove = drm_bo_base_deref_locked;
-       bo->base.type = drm_buffer_type;
-       bo->base.ref_struct_locked = NULL;
-       bo->base.unref = drm_buffer_user_object_unmap;
- out:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
- }
- EXPORT_SYMBOL(drm_bo_add_user_object);
- int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_create_arg *arg = data;
-       struct drm_bo_create_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       struct drm_buffer_object *entry;
-       enum drm_bo_type bo_type;
-       int ret = 0;
-       DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
-           (int)(req->size / 1024), req->page_alignment * 4);
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       /*
-        * If the buffer creation request comes in with a starting address,
-        * that points at the desired user pages to map. Otherwise, create
-        * a drm_bo_type_device buffer, which uses pages allocated from the kernel
-        */
-       bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
-       /*
-        * User buffers cannot be shared
-        */
-       if (bo_type == drm_bo_type_user)
-               req->flags &= ~DRM_BO_FLAG_SHAREABLE;
-       ret = drm_buffer_object_create(file_priv->minor->dev,
-                                      req->size, bo_type, req->flags,
-                                      req->hint, req->page_alignment,
-                                      req->buffer_start, &entry);
-       if (ret)
-               goto out;
-       ret = drm_bo_add_user_object(file_priv, entry,
-                                    req->flags & DRM_BO_FLAG_SHAREABLE);
-       if (ret) {
-               drm_bo_usage_deref_unlocked(&entry);
-               goto out;
-       }
-       mutex_lock(&entry->mutex);
-       drm_bo_fill_rep_arg(entry, rep);
-       mutex_unlock(&entry->mutex);
- out:
-       return ret;
- }
- int drm_bo_setstatus_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_map_wait_idle_arg *arg = data;
-       struct drm_bo_info_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       struct drm_buffer_object *bo;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
-       if (ret)
-               return ret;
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!bo)
-               return -EINVAL;
-       if (bo->base.owner != file_priv)
-               req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
-       ret = drm_bo_do_validate(bo, req->flags, req->mask,
-                                req->hint | DRM_BO_HINT_DONT_FENCE,
-                                bo->fence_class, rep);
-       drm_bo_usage_deref_unlocked(&bo);
-       (void) drm_bo_read_unlock(&dev->bm.bm_lock);
-       return ret;
- }
- int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_map_wait_idle_arg *arg = data;
-       struct drm_bo_info_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
-                                   req->hint, rep);
-       if (ret)
-               return ret;
-       return 0;
- }
- int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_handle_arg *arg = data;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       ret = drm_buffer_object_unmap(file_priv, arg->handle);
-       return ret;
- }
- int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_reference_info_arg *arg = data;
-       struct drm_bo_handle_arg *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       struct drm_user_object *uo;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       ret = drm_user_object_ref(file_priv, req->handle,
-                                 drm_buffer_type, &uo);
-       if (ret)
-               return ret;
-       ret = drm_bo_handle_info(file_priv, req->handle, rep);
-       if (ret)
-               return ret;
-       return 0;
- }
- int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_handle_arg *arg = data;
-       int ret = 0;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
-       return ret;
- }
- int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_reference_info_arg *arg = data;
-       struct drm_bo_handle_arg *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       ret = drm_bo_handle_info(file_priv, req->handle, rep);
-       if (ret)
-               return ret;
-       return 0;
- }
- int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_bo_map_wait_idle_arg *arg = data;
-       struct drm_bo_info_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-       ret = drm_bo_handle_wait(file_priv, req->handle,
-                                req->hint, rep);
-       if (ret)
-               return ret;
-       return 0;
- }
  static int drm_bo_leave_list(struct drm_buffer_object *bo,
                             uint32_t mem_type,
                             int free_pinned,
                if (bo->pinned_node == bo->mem.mm_node)
                        bo->pinned_node = NULL;
                if (bo->pinned_node != NULL) {
 -                      drm_memrange_put_block(bo->pinned_node);
 +                      drm_mm_put_block(bo->pinned_node);
                        bo->pinned_node = NULL;
                }
                mutex_unlock(&dev->struct_mutex);
@@@ -2223,8 -1695,8 +1695,8 @@@ int drm_bo_clean_mm(struct drm_device *
                drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
                drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
  
 -              if (drm_memrange_clean(&man->manager)) {
 -                      drm_memrange_takedown(&man->manager);
 +              if (drm_mm_clean(&man->manager)) {
 +                      drm_mm_takedown(&man->manager);
                } else {
                        ret = -EBUSY;
                }
@@@ -2240,7 -1712,7 +1712,7 @@@ EXPORT_SYMBOL(drm_bo_clean_mm)
   *point since we have the hardware lock.
   */
  
static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
+ int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
  {
        int ret;
        struct drm_buffer_manager *bm = &dev->bm;
@@@ -2295,7 -1767,7 +1767,7 @@@ int drm_bo_init_mm(struct drm_device *d
                        DRM_ERROR("Zero size memory manager type %d\n", type);
                        return ret;
                }
 -              ret = drm_memrange_init(&man->manager, p_offset, p_size);
 +              ret = drm_mm_init(&man->manager, p_offset, p_size);
                if (ret)
                        return ret;
        }
@@@ -2389,7 -1861,6 +1861,6 @@@ int drm_bo_driver_init(struct drm_devic
        int ret = -EINVAL;
  
        bm->dummy_read_page = NULL;
-       drm_bo_init_lock(&bm->bm_lock);
        mutex_lock(&dev->struct_mutex);
        if (!driver)
                goto out_unlock;
@@@ -2435,191 -1906,6 +1906,6 @@@ out_unlock
  }
  EXPORT_SYMBOL(drm_bo_driver_init);
  
- int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_mm_init_arg *arg = data;
-       struct drm_buffer_manager *bm = &dev->bm;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-       ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
-       if (ret)
-               return ret;
-       ret = -EINVAL;
-       if (arg->magic != DRM_BO_INIT_MAGIC) {
-               DRM_ERROR("You are using an old libdrm that is not compatible with\n"
-                         "\tthe kernel DRM module. Please upgrade your libdrm.\n");
-               return -EINVAL;
-       }
-       if (arg->major != DRM_BO_INIT_MAJOR) {
-               DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
-                         "\tversion don't match. Got %d, expected %d.\n",
-                         arg->major, DRM_BO_INIT_MAJOR);
-               return -EINVAL;
-       }
-       mutex_lock(&dev->struct_mutex);
-       if (!bm->initialized) {
-               DRM_ERROR("DRM memory manager was not initialized.\n");
-               goto out;
-       }
-       if (arg->mem_type == 0) {
-               DRM_ERROR("System memory buffers already initialized.\n");
-               goto out;
-       }
-       ret = drm_bo_init_mm(dev, arg->mem_type,
-                            arg->p_offset, arg->p_size, 0);
- out:
-       mutex_unlock(&dev->struct_mutex);
-       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-       if (ret)
-               return ret;
-       return 0;
- }
- int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_mm_type_arg *arg = data;
-       struct drm_buffer_manager *bm = &dev->bm;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-       ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
-       if (ret)
-               return ret;
-       mutex_lock(&dev->struct_mutex);
-       ret = -EINVAL;
-       if (!bm->initialized) {
-               DRM_ERROR("DRM memory manager was not initialized\n");
-               goto out;
-       }
-       if (arg->mem_type == 0) {
-               DRM_ERROR("No takedown for System memory buffers.\n");
-               goto out;
-       }
-       ret = 0;
-       if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
-               if (ret == -EINVAL)
-                       DRM_ERROR("Memory manager type %d not clean. "
-                                 "Delaying takedown\n", arg->mem_type);
-               ret = 0;
-       }
- out:
-       mutex_unlock(&dev->struct_mutex);
-       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-       if (ret)
-               return ret;
-       return 0;
- }
- int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_mm_type_arg *arg = data;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-       if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
-               DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
-               return -EINVAL;
-       }
-       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-               ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
-               if (ret)
-                       return ret;
-       }
-       mutex_lock(&dev->struct_mutex);
-       ret = drm_bo_lock_mm(dev, arg->mem_type);
-       mutex_unlock(&dev->struct_mutex);
-       if (ret) {
-               (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-               return ret;
-       }
-       return 0;
- }
- int drm_mm_unlock_ioctl(struct drm_device *dev,
-                       void *data,
-                       struct drm_file *file_priv)
- {
-       struct drm_mm_type_arg *arg = data;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-               ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-               if (ret)
-                       return ret;
-       }
-       return 0;
- }
- int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
-       struct drm_mm_info_arg *arg = data;
-       struct drm_buffer_manager *bm = &dev->bm;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       struct drm_mem_type_manager *man;
-       int ret = 0;
-       int mem_type = arg->mem_type;
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-       if (mem_type >= DRM_BO_MEM_TYPES) {
-               DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
-               return -EINVAL;
-       }
-       mutex_lock(&dev->struct_mutex);
-       if (!bm->initialized) {
-               DRM_ERROR("DRM memory manager was not initialized\n");
-               ret = -EINVAL;
-               goto out;
-       }
-       man = &bm->man[arg->mem_type];
-       arg->p_size = man->size;
- out:
-       mutex_unlock(&dev->struct_mutex);
-      
-       return ret;
- }
  /*
   * buffer object vm functions.
   */
@@@ -2722,7 -2008,7 +2008,7 @@@ void drm_bo_takedown_vm_locked(struct d
                list->user_token = 0;
        }
        if (list->file_offset_node) {
 -              drm_memrange_put_block(list->file_offset_node);
 +              drm_mm_put_block(list->file_offset_node);
                list->file_offset_node = NULL;
        }
  
@@@ -2766,7 -2052,7 +2052,7 @@@ static int drm_bo_setup_vm_locked(struc
        atomic_inc(&bo->usage);
        map->handle = (void *)bo;
  
 -      list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
 +      list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
                                                    bo->mem.num_pages, 0, 0);
  
        if (unlikely(!list->file_offset_node)) {
                return -ENOMEM;
        }
  
 -      list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
 +      list->file_offset_node = drm_mm_get_block(list->file_offset_node,
                                                  bo->mem.num_pages, 0);
  
        if (unlikely(!list->file_offset_node)) {
  
        return 0;
  }
- int drm_bo_version_ioctl(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
- {
-       struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
-       arg->major = DRM_BO_INIT_MAJOR;
-       arg->minor = DRM_BO_INIT_MINOR;
-       arg->patchlevel = DRM_BO_INIT_PATCH;
-       return 0;
- }
diff --combined linux-core/drm_drv.c
@@@ -146,41 -146,9 +146,11 @@@ static struct drm_ioctl_desc drm_ioctls
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
  
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
-                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
-                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
-                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
-                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
 +#if OS_HAS_GEM
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
 +#endif
  };
  
  #define DRM_CORE_IOCTL_COUNT  ARRAY_SIZE( drm_ioctls )
@@@ -202,8 -170,6 +172,6 @@@ int drm_lastclose(struct drm_device * d
  
        DRM_DEBUG("\n");
  
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_bo_driver_finish(dev);
  
        /*
         * We can't do much about this function failing.
                dev->driver->lastclose(dev);
        DRM_DEBUG("driver lastclose completed\n");
  
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_bo_driver_finish(dev);
  
/*    if (dev->irq_enabled)
-               drm_irq_uninstall(dev); */
      if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_uninstall(dev);
  
        /* Free drawable information memory */
        mutex_lock(&dev->struct_mutex);
        drm_drawable_free_all(dev);
        del_timer(&dev->timer);
  
-       if (dev->primary->master) {
-               drm_put_master(dev->primary->master);
-               dev->primary->master = NULL;
-       }
-       
        /* Clear AGP information */
-       if (drm_core_has_AGP(dev) && dev->agp) {
+       if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) {
                struct drm_agp_mem *entry, *tempe;
  
                /* Remove AGP resources, but leave dev->agp
        }
        dev->queue_count = 0;
  
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET))
                drm_dma_takedown(dev);
  
        dev->dev_mapping = NULL;
@@@ -430,12 -393,12 +395,12 @@@ static void drm_cleanup(struct drm_devi
  
        drm_ctxbitmap_cleanup(dev);
        drm_ht_remove(&dev->map_hash);
 -      drm_memrange_takedown(&dev->offset_manager);
 +      drm_mm_takedown(&dev->offset_manager);
-       drm_ht_remove(&dev->object_hash);
  
-       drm_put_minor(dev, &dev->primary);
        if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_put_minor(dev, &dev->control);
+               drm_put_minor(&dev->control);
+       drm_put_minor(&dev->primary);
  
        if (drm_put_dev(dev))
                DRM_ERROR("Cannot unload module\n");
@@@ -637,10 -600,9 +602,10 @@@ long drm_unlocked_ioctl(struct file *fi
        if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
                && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
                ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
 -      else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
 +      else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
                ioctl = &drm_ioctls[nr];
 -      else {
 +              cmd = ioctl->cmd;
 +      } else {
                retcode = -EINVAL;
                goto err_i1;
        }
                goto err_i1;
        }
  #endif
 +
        func = ioctl->func;
        /* is there a local override? */
        if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
                retcode = func(dev, kdata, file_priv);
        }
  
 -      if ((retcode == 0) && (cmd & IOC_OUT)) {
 +      if (cmd & IOC_OUT) {
                if (copy_to_user((void __user *)arg, kdata,
                                 _IOC_SIZE(cmd)) != 0)
                        retcode = -EFAULT;
diff --combined linux-core/drm_memory.c
@@@ -188,6 -188,7 +188,7 @@@ void *drm_realloc(void *oldpt, size_t o
        }
        return pt;
  }
+ EXPORT_SYMBOL(drm_realloc);
  
  /**
   * Allocate pages.
@@@ -350,15 -351,6 +351,15 @@@ void drm_core_ioremap(struct drm_map *m
  }
  EXPORT_SYMBOL_GPL(drm_core_ioremap);
  
 +
 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
 +void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
 +{
 +      map->handle = ioremap_wc(map->offset, map->size);
 +}
 +EXPORT_SYMBOL_GPL(drm_core_ioremap_wc);
 +#endif
 +
  void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
  {
        if (!map->handle || !map->size)
diff --combined linux-core/drm_objects.h
  struct drm_device;
  struct drm_bo_mem_reg;
  
- /***************************************************
-  * User space objects. (drm_object.c)
-  */
- #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
- enum drm_object_type {
-       drm_fence_type,
-       drm_buffer_type,
-       drm_lock_type,
-           /*
-            * Add other user space object types here.
-            */
-       drm_driver_type0 = 256,
-       drm_driver_type1,
-       drm_driver_type2,
-       drm_driver_type3,
-       drm_driver_type4
+ #define DRM_FENCE_FLAG_EMIT                0x00000001
+ #define DRM_FENCE_FLAG_SHAREABLE           0x00000002
+ /**
+  * On hardware with no interrupt events for operation completion,
+  * indicates that the kernel should sleep while waiting for any blocking
+  * operation to complete rather than spinning.
+  *
+  * Has no effect otherwise.
+  */
+ #define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
+ #define DRM_FENCE_FLAG_NO_USER             0x00000010
+ /* Reserved for driver use */
+ #define DRM_FENCE_MASK_DRIVER              0xFF000000
+ #define DRM_FENCE_TYPE_EXE                 0x00000001
+ struct drm_fence_arg {
+       unsigned int handle;
+       unsigned int fence_class;
+       unsigned int type;
+       unsigned int flags;
+       unsigned int signaled;
+       unsigned int error;
+       unsigned int sequence;
+       unsigned int pad64;
+       uint64_t expand_pad[2]; /*Future expansion */
  };
  
+ /* Buffer permissions, referring to how the GPU uses the buffers.
+  * these translate to fence types used for the buffers.
+  * Typically a texture buffer is read, A destination buffer is write and
+  *  a command (batch-) buffer is exe. Can be or-ed together.
+  */
+ #define DRM_BO_FLAG_READ        (1ULL << 0)
+ #define DRM_BO_FLAG_WRITE       (1ULL << 1)
+ #define DRM_BO_FLAG_EXE         (1ULL << 2)
  /*
-  * A user object is a structure that helps the drm give out user handles
-  * to kernel internal objects and to keep track of these objects so that
-  * they can be destroyed, for example when the user space process exits.
-  * Designed to be accessible using a user space 32-bit handle.
-  */
- struct drm_user_object {
-       struct drm_hash_item hash;
-       struct list_head list;
-       enum drm_object_type type;
-       atomic_t refcount;
-       int shareable;
-       struct drm_file *owner;
-       void (*ref_struct_locked) (struct drm_file *priv,
-                                  struct drm_user_object *obj,
-                                  enum drm_ref_type ref_action);
-       void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
-                      enum drm_ref_type unref_action);
-       void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
- };
+  * All of the bits related to access mode
+  */
+ #define DRM_BO_MASK_ACCESS    (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
+ /*
+  * Status flags. Can be read to determine the actual state of a buffer.
+  * Can also be set in the buffer mask before validation.
+  */
  
  /*
-  * A ref object is a structure which is used to
-  * keep track of references to user objects and to keep track of these
-  * references so that they can be destroyed for example when the user space
-  * process exits. Designed to be accessible using a pointer to the _user_ object.
+  * Mask: Never evict this buffer. Not even with force. This type of buffer is only
+  * available to root and must be manually removed before buffer manager shutdown
+  * or lock.
+  * Flags: Acknowledge
   */
+ #define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
  
- struct drm_ref_object {
-       struct drm_hash_item hash;
-       struct list_head list;
-       atomic_t refcount;
-       enum drm_ref_type unref_action;
- };
+ /*
+  * Mask: Require that the buffer is placed in mappable memory when validated.
+  *       If not set the buffer may or may not be in mappable memory when validated.
+  * Flags: If set, the buffer is in mappable memory.
+  */
+ #define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
  
- /**
-  * Must be called with the struct_mutex held.
+ /* Mask: The buffer should be shareable with other processes.
+  * Flags: The buffer is shareable with other processes.
   */
+ #define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
  
- extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
-                              int shareable);
- /**
-  * Must be called with the struct_mutex held.
+ /* Mask: If set, place the buffer in cache-coherent memory if available.
+  *       If clear, never place the buffer in cache coherent memory if validated.
+  * Flags: The buffer is currently in cache-coherent memory.
+  */
+ #define DRM_BO_FLAG_CACHED      (1ULL << 7)
+ /* Mask: Make sure that every time this buffer is validated,
+  *       it ends up on the same location provided that the memory mask is the same.
+  *       The buffer will also not be evicted when claiming space for
+  *       other buffers. Basically a pinned buffer but it may be thrown out as
+  *       part of buffer manager shutdown or locking.
+  * Flags: Acknowledge.
   */
+ #define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
+ /* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
+  * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
+  * with unsnooped PTEs instead of snooped, by using chipset-specific cache
+  * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
+  * as the eviction to local memory (TTM unbind) on map is just a side effect
+  * to prevent aggressive cache prefetch from the GPU disturbing the cache
+  * management that the DRM is doing.
+  *
+  * Flags: Acknowledge.
+  * Buffers allocated with this flag should not be used for suballocators
+  * This type may have issues on CPUs with over-aggressive caching
+  * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
+  */
+ #define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
  
- extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
-                                                uint32_t key);
+ /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
+  * Flags: Acknowledge.
+  */
+ #define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
  
  /*
-  * Must be called with the struct_mutex held. May temporarily release it.
+  * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
+  * Flags: Acknowledge.
   */
+ #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
+ #define DRM_BO_FLAG_TILE           (1ULL << 15)
  
- extern int drm_add_ref_object(struct drm_file *priv,
-                             struct drm_user_object *referenced_object,
-                             enum drm_ref_type ref_action);
+ /*
+  * Memory type flags that can be or'ed together in the mask, but only
+  * one appears in flags.
+  */
+ /* System memory */
+ #define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
+ /* Translation table memory */
+ #define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
+ /* Vram memory */
+ #define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
+ /* Up to the driver to define. */
+ #define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
+ #define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
+ #define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
+ #define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
+ #define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
+ /* We can add more of these now with a 64-bit flag type */
  
  /*
-  * Must be called with the struct_mutex held.
+  * This is a mask covering all of the memory type flags; easier to just
+  * use a single constant than a bunch of | values. It covers
+  * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
+  */
+ #define DRM_BO_MASK_MEM         0x00000000FF000000ULL
+ /*
+  * This adds all of the CPU-mapping options in with the memory
+  * type to label all bits which change how the page gets mapped
   */
+ #define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
+                                DRM_BO_FLAG_CACHED_MAPPED | \
+                                DRM_BO_FLAG_CACHED | \
+                                DRM_BO_FLAG_MAPPABLE)
+                                
+ /* Driver-private flags */
+ #define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
  
- struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
-                                       struct drm_user_object *referenced_object,
-                                       enum drm_ref_type ref_action);
  /*
-  * Must be called with the struct_mutex held.
-  * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
-  * release the struct_mutex before calling drm_remove_ref_object.
-  * This function may temporarily release the struct_mutex.
+  * Don't block on validate and map. Instead, return EBUSY.
+  */
+ #define DRM_BO_HINT_DONT_BLOCK  0x00000002
+ /*
+  * Don't place this buffer on the unfenced list. This means
+  * that the buffer will not end up having a fence associated
+  * with it as a result of this operation
+  */
+ #define DRM_BO_HINT_DONT_FENCE  0x00000004
+ /**
+  * On hardware with no interrupt events for operation completion,
+  * indicates that the kernel should sleep while waiting for any blocking
+  * operation to complete rather than spinning.
+  *
+  * Has no effect otherwise.
+  */
+ #define DRM_BO_HINT_WAIT_LAZY   0x00000008
+ /*
+  * The client has compute relocations refering to this buffer using the
+  * offset in the presumed_offset field. If that offset ends up matching
+  * where this buffer lands, the kernel is free to skip executing those
+  * relocations
   */
+ #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
+ #define DRM_BO_MEM_LOCAL 0
+ #define DRM_BO_MEM_TT 1
+ #define DRM_BO_MEM_VRAM 2
+ #define DRM_BO_MEM_PRIV0 3
+ #define DRM_BO_MEM_PRIV1 4
+ #define DRM_BO_MEM_PRIV2 5
+ #define DRM_BO_MEM_PRIV3 6
+ #define DRM_BO_MEM_PRIV4 7
+ #define DRM_BO_MEM_TYPES 8 /* For now. */
+ #define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
+ #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
  
- extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
- extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
-                              enum drm_object_type type,
-                              struct drm_user_object **object);
- extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
-                                enum drm_object_type type);
  
  /***************************************************
   * Fence objects. (drm_fence.c)
   */
  
  struct drm_fence_object {
-       struct drm_user_object base;
        struct drm_device *dev;
        atomic_t usage;
  
@@@ -417,7 -508,7 +508,7 @@@ extern int drm_ttm_destroy(struct drm_t
   */
  
  struct drm_bo_mem_reg {
 -      struct drm_memrange_node *mm_node;
 +      struct drm_mm_node *mm_node;
        unsigned long size;
        unsigned long num_pages;
        uint32_t page_alignment;
@@@ -470,7 -561,6 +561,6 @@@ enum drm_bo_type 
  
  struct drm_buffer_object {
        struct drm_device *dev;
-       struct drm_user_object base;
  
        /*
         * If there is a possibility that the usage variable is zero,
        unsigned long num_pages;
  
        /* For pinned buffers */
 -      struct drm_memrange_node *pinned_node;
 +      struct drm_mm_node *pinned_node;
        uint32_t pinned_mem_type;
        struct list_head pinned_lru;
  
@@@ -533,7 -623,7 +623,7 @@@ struct drm_mem_type_manager 
        int has_type;
        int use_type;
        int kern_init_type;
 -      struct drm_memrange manager;
 +      struct drm_mm manager;
        struct list_head lru;
        struct list_head pinned;
        uint32_t flags;
  };
  
  struct drm_bo_lock {
-       struct drm_user_object base;
+   //  struct drm_user_object base;
        wait_queue_head_t queue;
        atomic_t write_lock_pending;
        atomic_t readers;
@@@ -655,22 -745,10 +745,10 @@@ struct drm_bo_driver 
  /*
   * buffer objects (drm_bo.c)
   */
- extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int drm_bo_do_validate(struct drm_buffer_object *bo,
+                      uint64_t flags, uint64_t mask, uint32_t hint,
+                      uint32_t fence_class);
  extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
- extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int drm_bo_driver_finish(struct drm_device *dev);
  extern int drm_bo_driver_init(struct drm_device *dev);
  extern int drm_bo_pci_offset(struct drm_device *dev,
@@@ -707,18 -785,9 +785,9 @@@ extern int drm_bo_clean_mm(struct drm_d
  extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
                          unsigned long p_offset, unsigned long p_size,
                          int kern_init);
- extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-                                 uint64_t flags, uint64_t mask, uint32_t hint,
-                                 uint32_t fence_class,
-                                 struct drm_bo_info_rep *rep,
-                                 struct drm_buffer_object **bo_rep);
  extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
                                                          uint32_t handle,
                                                          int check_owner);
- extern int drm_bo_do_validate(struct drm_buffer_object *bo,
-                             uint64_t flags, uint64_t mask, uint32_t hint,
-                             uint32_t fence_class,
-                             struct drm_bo_info_rep *rep);
  extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
  
  extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
@@@ -766,8 -835,6 +835,6 @@@ extern int drm_bo_pfn_prot(struct drm_b
                           unsigned long dst_offset,
                           unsigned long *pfn,
                           pgprot_t *prot);
- extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
-                               struct drm_bo_info_rep *rep);
  
  
  /*
@@@ -812,23 -879,6 +879,6 @@@ extern int drm_mem_reg_ioremap(struct d
                               void **virtual);
  extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
                                void *virtual);
- /*
-  * drm_bo_lock.c
-  * Simple replacement for the hardware lock on buffer manager init and clean.
-  */
- extern void drm_bo_init_lock(struct drm_bo_lock *lock);
- extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
- extern int drm_bo_read_lock(struct drm_bo_lock *lock,
-                           int interruptible);
- extern int drm_bo_write_lock(struct drm_bo_lock *lock,
-                            int interruptible,
-                            struct drm_file *file_priv);
- extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
-                              struct drm_file *file_priv);
  #ifdef CONFIG_DEBUG_MUTEXES
  #define DRM_ASSERT_LOCKED(_mutex)                                     \
        BUG_ON(!mutex_is_locked(_mutex) ||                              \
diff --combined linux-core/drm_stub.c
@@@ -88,30 -88,7 +88,7 @@@ again
        return new_id;
  }
  
- int drm_setmaster_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv)
- {
-       if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
-               return -EINVAL;
-       if (!file_priv->master)
-               return -EINVAL;
-       if (!file_priv->minor->master && file_priv->minor->master != file_priv->master)
-               file_priv->minor->master = file_priv->master;
-       return 0;
- }
- int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
- {
-       if (!file_priv->master)
-               return -EINVAL;
-       file_priv->minor->master = NULL;
-       return 0;
- }
- struct drm_master *drm_get_master(struct drm_minor *minor)
+ struct drm_master *drm_master_create(struct drm_minor *minor)
  {
        struct drm_master *master;
  
        if (!master)
                return NULL;
  
//    INIT_LIST_HEAD(&master->filelist);
      kref_init(&master->refcount);
        spin_lock_init(&master->lock.spinlock);
        init_waitqueue_head(&master->lock.lock_queue);
        drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
        return master;
  }
  
void drm_put_master(struct drm_master *master)
struct drm_master *drm_master_get(struct drm_master *master)
  {
+       kref_get(&master->refcount);
+       return master;
+ }
+ static void drm_master_destroy(struct kref *kref)
+ {
+       struct drm_master *master = container_of(kref, struct drm_master, refcount);
        struct drm_magic_entry *pt, *next;
        struct drm_device *dev = master->minor->dev;
  
        drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
  }
  
+ void drm_master_put(struct drm_master **master)
+ {
+       kref_put(&(*master)->refcount, drm_master_destroy);
+       *master = NULL;
+ }
+ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+ {
+       if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+               return -EINVAL;
+       if (!file_priv->master)
+               return -EINVAL;
+       if (!file_priv->minor->master && file_priv->minor->master != file_priv->master) {
+               mutex_lock(&dev->struct_mutex);
+               file_priv->minor->master = drm_master_get(file_priv->master);
+               mutex_unlock(&dev->struct_mutex);
+       }
+       return 0;
+ }
+ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+ {
+       if (!file_priv->master)
+               return -EINVAL;
+       mutex_lock(&dev->struct_mutex);
+       drm_master_put(&file_priv->minor->master);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
  static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
                           const struct pci_device_id *ent,
                           struct drm_driver *driver)
  {
        int retcode;
  
+       INIT_LIST_HEAD(&dev->filelist);
        INIT_LIST_HEAD(&dev->ctxlist);
        INIT_LIST_HEAD(&dev->vmalist);
        INIT_LIST_HEAD(&dev->maplist);
-       INIT_LIST_HEAD(&dev->filelist);
  
        spin_lock_init(&dev->count_lock);
        spin_lock_init(&dev->drw_lock);
        spin_lock_init(&dev->tasklet_lock);
- //    spin_lock_init(&dev->lock.spinlock);
        init_timer(&dev->timer);
        mutex_init(&dev->struct_mutex);
        mutex_init(&dev->ctxlist_mutex);
  
        if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
                return -ENOMEM;
 -
 -      if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
 -                            DRM_FILE_PAGE_OFFSET_SIZE)) {
 +      if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
 +                      DRM_FILE_PAGE_OFFSET_SIZE)) {
                drm_ht_remove(&dev->map_hash);
                return -ENOMEM;
        }
  
-       if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
-               drm_ht_remove(&dev->map_hash);
-               drm_mm_takedown(&dev->offset_manager);
-               return -ENOMEM;
-       }
        /* the DRM has 6 counters */
        dev->counters = 6;
        dev->types[0] = _DRM_STAT_LOCK;
@@@ -407,10 -421,10 +420,10 @@@ int drm_get_dev(struct pci_dev *pdev, c
  
        return 0;
  err_g5:
-       drm_put_minor(dev, &dev->primary);
+       drm_put_minor(&dev->primary);
  err_g4:
        if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_put_minor(dev, &dev->control);
+               drm_put_minor(&dev->control);
  err_g3:
        if (!drm_fb_loaded)
                pci_disable_device(pdev);
@@@ -461,14 -475,14 +474,14 @@@ int drm_put_dev(struct drm_device * dev
   * last minor released.
   *
   */
- int drm_put_minor(struct drm_device *dev, struct drm_minor **minor_p)
+ int drm_put_minor(struct drm_minor **minor_p)
  {
        struct drm_minor *minor = *minor_p;
        DRM_DEBUG("release secondary minor %d\n", minor->index);
  
        if (minor->type == DRM_MINOR_LEGACY) {
-               if (dev->driver->proc_cleanup)
-                       dev->driver->proc_cleanup(minor);
+               if (minor->dev->driver->proc_cleanup)
+                       minor->dev->driver->proc_cleanup(minor);
                drm_proc_cleanup(minor, drm_proc_root);
        }
        drm_sysfs_device_remove(minor);
diff --combined linux-core/drm_vm.c
@@@ -698,7 -698,7 +698,7 @@@ EXPORT_SYMBOL(drm_mmap)
   * protected by the bo->mutex lock.
   */
  
 -#ifdef DRM_FULL_MM_COMPAT
 +#if defined(DRM_FULL_MM_COMPAT) && !defined(DRM_NO_FAULT)
  static int drm_bo_vm_fault(struct vm_area_struct *vma,
                                     struct vm_fault *vmf)
  {
        unsigned long ret = VM_FAULT_NOPAGE;
  
        dev = bo->dev;
-       err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
-       if (err)
-               return VM_FAULT_NOPAGE;
 +
        err = mutex_lock_interruptible(&bo->mutex);
        if (err) {
-               drm_bo_read_unlock(&dev->bm.bm_lock);
                return VM_FAULT_NOPAGE;
        }
  
  out_unlock:
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
        mutex_unlock(&bo->mutex);
-       drm_bo_read_unlock(&dev->bm.bm_lock);
        return ret;
  }
  #endif
@@@ -55,10 -55,14 +55,14 @@@ int radeon_invalidate_caches(struct drm
        drm_radeon_private_t *dev_priv = dev->dev_private;
        RING_LOCALS;
  
+       if (!dev_priv->cp_running)
+               return 0;
        BEGIN_RING(4);
        RADEON_FLUSH_CACHE();
        RADEON_FLUSH_ZCACHE();
        ADVANCE_RING();
+       COMMIT_RING();
        return 0;
  }
  
@@@ -216,7 -220,7 +220,7 @@@ out_cleanup
        if (tmp_mem.mm_node) {
                mutex_lock(&dev->struct_mutex);
                if (tmp_mem.mm_node != bo->pinned_node)
 -                      drm_memrange_put_block(tmp_mem.mm_node);
 +                      drm_mm_put_block(tmp_mem.mm_node);
                tmp_mem.mm_node = NULL;
                mutex_unlock(&dev->struct_mutex);
        }
@@@ -261,6 -265,6 +265,6 @@@ uint64_t radeon_evict_flags(struct drm_
        case DRM_BO_MEM_TT:
                return DRM_BO_FLAG_MEM_LOCAL;
        default:
-               return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
+               return DRM_BO_FLAG_MEM_TT;
        }
  }
diff --combined linux-core/radeon_reg.h
  #       define RADEON_CRTC_ICON_EN          (1 << 15)
  #       define RADEON_CRTC_CUR_EN           (1 << 16)
  #       define RADEON_CRTC_CUR_MODE_MASK    (7 << 20)
+ #       define RADEON_CRTC_CUR_MODE_SHIFT   20
+ #       define RADEON_CRTC_CUR_MODE_MONO    0
+ #       define RADEON_CRTC_CUR_MODE_24BPP   2
  #       define RADEON_CRTC_EXT_DISP_EN      (1 << 24)
  #       define RADEON_CRTC_EN               (1 << 25)
  #       define RADEON_CRTC_DISP_REQ_EN_B    (1 << 26)
  #       define RADEON_DAC_PDWN_R            (1 << 16)
  #       define RADEON_DAC_PDWN_G            (1 << 17)
  #       define RADEON_DAC_PDWN_B            (1 << 18)
+ #define RADEON_DISP_PWR_MAN                 0x0d08
+ #       define RADEON_DISP_PWR_MAN_D3_CRTC_EN      (1 << 0)
+ #       define RADEON_DISP_PWR_MAN_D3_CRTC2_EN     (1 << 4)
+ #       define RADEON_DISP_PWR_MAN_DPMS_ON  (0 << 8)
+ #       define RADEON_DISP_PWR_MAN_DPMS_STANDBY    (1 << 8)
+ #       define RADEON_DISP_PWR_MAN_DPMS_SUSPEND    (2 << 8)
+ #       define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
+ #       define RADEON_DISP_D3_RST           (1 << 16)
+ #       define RADEON_DISP_D3_REG_RST       (1 << 17)
+ #       define RADEON_DISP_D3_GRPH_RST      (1 << 18)
+ #       define RADEON_DISP_D3_SUBPIC_RST    (1 << 19)
+ #       define RADEON_DISP_D3_OV0_RST       (1 << 20)
+ #       define RADEON_DISP_D1D2_GRPH_RST    (1 << 21)
+ #       define RADEON_DISP_D1D2_SUBPIC_RST  (1 << 22)
+ #       define RADEON_DISP_D1D2_OV0_RST     (1 << 23)
+ #       define RADEON_DIG_TMDS_ENABLE_RST   (1 << 24)
+ #       define RADEON_TV_ENABLE_RST         (1 << 25)
+ #       define RADEON_AUTO_PWRUP_EN         (1 << 26)
  #define RADEON_TV_DAC_CNTL                  0x088c
  #       define RADEON_TV_DAC_NBLANK         (1 << 0)
  #       define RADEON_TV_DAC_NHOLD          (1 << 1)
  #       define RADEON_LVDS_DISPLAY_DIS      (1   <<  1)
  #       define RADEON_LVDS_PANEL_TYPE       (1   <<  2)
  #       define RADEON_LVDS_PANEL_FORMAT     (1   <<  3)
+ #       define RADEON_LVDS_NO_FM            (0   <<  4)
+ #       define RADEON_LVDS_2_GREY           (1   <<  4)
+ #       define RADEON_LVDS_4_GREY           (2   <<  4)
  #       define RADEON_LVDS_RST_FM           (1   <<  6)
  #       define RADEON_LVDS_EN               (1   <<  7)
  #       define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
  #       define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
  #       define RADEON_LVDS_BL_MOD_EN        (1   << 16)
+ #       define RADEON_LVDS_BL_CLK_SEL       (1   << 17)
  #       define RADEON_LVDS_DIGON            (1   << 18)
  #       define RADEON_LVDS_BLON             (1   << 19)
+ #       define RADEON_LVDS_FP_POL_LOW       (1   << 20)
+ #       define RADEON_LVDS_LP_POL_LOW       (1   << 21)
+ #       define RADEON_LVDS_DTM_POL_LOW      (1   << 22)
  #       define RADEON_LVDS_SEL_CRTC2        (1   << 23)
+ #       define RADEON_LVDS_FPDI_EN          (1   << 27)
+ #       define RADEON_LVDS_HSYNC_DELAY_SHIFT        28
  #define RADEON_LVDS_PLL_CNTL                0x02d4
  #       define RADEON_HSYNC_DELAY_SHIFT     28
  #       define RADEON_HSYNC_DELAY_MASK      (0xf << 28)
  #       define RADEON_STENCIL_ENABLE           (1  <<  7)
  #       define RADEON_Z_ENABLE                 (1  <<  8)
  #       define RADEON_DEPTH_XZ_OFFEST_ENABLE   (1  <<  9)
- #       define RADEON_COLOR_FORMAT_ARGB1555    (3  << 10)
- #       define RADEON_COLOR_FORMAT_RGB565      (4  << 10)
- #       define RADEON_COLOR_FORMAT_ARGB8888    (6  << 10)
- #       define RADEON_COLOR_FORMAT_RGB332      (7  << 10)
- #       define RADEON_COLOR_FORMAT_Y8          (8  << 10)
- #       define RADEON_COLOR_FORMAT_RGB8        (9  << 10)
- #       define RADEON_COLOR_FORMAT_YUV422_VYUY (11 << 10)
- #       define RADEON_COLOR_FORMAT_YUV422_YVYU (12 << 10)
- #       define RADEON_COLOR_FORMAT_aYUV444     (14 << 10)
- #       define RADEON_COLOR_FORMAT_ARGB4444    (15 << 10)
+ #       define RADEON_RB3D_COLOR_FORMAT_SHIFT  10
+ #       define RADEON_COLOR_FORMAT_ARGB1555    3
+ #       define RADEON_COLOR_FORMAT_RGB565      4
+ #       define RADEON_COLOR_FORMAT_ARGB8888    6
+ #       define RADEON_COLOR_FORMAT_RGB332      7
+ #       define RADEON_COLOR_FORMAT_Y8          8
+ #       define RADEON_COLOR_FORMAT_RGB8        9
+ #       define RADEON_COLOR_FORMAT_YUV422_VYUY 11
+ #       define RADEON_COLOR_FORMAT_YUV422_YVYU 12
+ #       define RADEON_COLOR_FORMAT_aYUV444     14
+ #       define RADEON_COLOR_FORMAT_ARGB4444    15
  #       define RADEON_CLRCMP_FLIP_ENABLE       (1  << 14)
  #define RADEON_RB3D_COLOROFFSET             0x1c40
  #       define RADEON_COLOROFFSET_MASK      0xfffffff0
  #       define RADEON_CSQ_PRIPIO_INDBM         (3    << 28)
  #       define RADEON_CSQ_PRIBM_INDBM          (4    << 28)
  #       define RADEON_CSQ_PRIPIO_INDPIO        (15   << 28)
+ #define R300_CP_RESYNC_ADDR                 0x778
+ #define R300_CP_RESYNC_DATA                 0x77c
  #define RADEON_CP_CSQ_STAT                  0x07f8
  #       define RADEON_CSQ_RPTR_PRIMARY_MASK    (0xff <<  0)
  #       define RADEON_CSQ_WPTR_PRIMARY_MASK    (0xff <<  8)
  #       define RADEON_CP_PACKET_COUNT_MASK          0x3fff0000
  #       define RADEON_CP_PACKET_MAX_DWORDS          (1 << 12)
  #       define RADEON_CP_PACKET0_REG_MASK           0x000007ff
+ #       define R300_CP_PACKET0_REG_MASK             0x00001fff
  #       define RADEON_CP_PACKET1_REG0_MASK          0x000007ff
  #       define RADEON_CP_PACKET1_REG1_MASK          0x003ff800
  
  #define AVIVO_D1GRPH_X_END                                      0x6134
  #define AVIVO_D1GRPH_Y_END                                      0x6138
  #define AVIVO_D1GRPH_UPDATE                                     0x6144
- #       define AVIVO_D1GRPH_UPDATE_LOCK                 (1<<16)
+ #       define AVIVO_D1GRPH_UPDATE_LOCK                         (1 << 16)
  #define AVIVO_D1GRPH_FLIP_CONTROL                               0x6148
  
  #define AVIVO_D1CUR_CONTROL                     0x6400
- #       define AVIVO_D1CURSOR_EN           (1<<0)
- #       define AVIVO_D1CURSOR_MODE_SHIFT  8
- #       define AVIVO_D1CURSOR_MODE_MASK   (0x3<<8)
- #       define AVIVO_D1CURSOR_MODE_24BPP  (0x2)
+ #       define AVIVO_D1CURSOR_EN                (1 << 0)
+ #       define AVIVO_D1CURSOR_MODE_SHIFT        8
+ #       define AVIVO_D1CURSOR_MODE_MASK         (3 << 8)
+ #       define AVIVO_D1CURSOR_MODE_24BPP        2
  #define AVIVO_D1CUR_SURFACE_ADDRESS             0x6408
  #define AVIVO_D1CUR_SIZE                        0x6410
  #define AVIVO_D1CUR_POSITION                    0x6414
  #       define R300_TILE_SIZE_32                        (2 << 4)
  #       define R300_SUBPIXEL_1_12                       (0 << 16)
  #       define R300_SUBPIXEL_1_16                       (1 << 16)
 -#define R300_GB_SELECT                                        0x401c
  #define R300_GB_ENABLE                                        0x4008
  #define R300_GB_AA_CONFIG                             0x4020
  #define R400_GB_PIPE_SELECT                             0x402c
  #       define R300_GL_CLIP_SPACE_DEF                   (0 << 22)
  #       define R300_DX_CLIP_SPACE_DEF                   (1 << 22)
  #       define R500_TCL_STATE_OPTIMIZATION              (1 << 23)
 -#define R300_VAP_VTE_CNTL                             0x20B0
 -#       define R300_VPORT_X_SCALE_ENA                   (1 << 0)
 -#       define R300_VPORT_X_OFFSET_ENA                  (1 << 1)
 -#       define R300_VPORT_Y_SCALE_ENA                   (1 << 2)
 -#       define R300_VPORT_Y_OFFSET_ENA                  (1 << 3)
 -#       define R300_VPORT_Z_SCALE_ENA                   (1 << 4)
 -#       define R300_VPORT_Z_OFFSET_ENA                  (1 << 5)
 -#       define R300_VTX_XY_FMT                          (1 << 8)
 -#       define R300_VTX_Z_FMT                           (1 << 9)
 -#       define R300_VTX_W0_FMT                          (1 << 10)
 -#define R300_VAP_VTX_STATE_CNTL                               0x2180
  #define R300_VAP_PSC_SGN_NORM_CNTL                    0x21DC
  #define R300_VAP_PROG_STREAM_CNTL_0                   0x2150
  #       define R300_DATA_TYPE_0_SHIFT                   0
  #       define R300_ENDIAN_SWAP_HALF_DWORD              (3 << 0)
  #       define R300_MACRO_TILE                          (1 << 2)
  
 -#define R300_TX_BORDER_COLOR_0                                0x45c0
++#define R300_TX_BORDER_COLOR_0                          0x45c0
  #define R300_TX_ENABLE                                        0x4104
  #       define R300_TEX_0_ENABLE                        (1 << 0)
  #       define R300_TEX_1_ENABLE                        (1 << 1)
  #       define R300_READ_ENABLE                         (1 << 2)
  #define R300_RB3D_ABLENDCNTL                          0x4e08
  #define R300_RB3D_DSTCACHE_CTLSTAT                    0x4e4c
 -#define R300_RB3D_COLOROFFSET0                                0x4e28
 -#define R300_RB3D_COLORPITCH0                         0x4e38
++#define R300_RB3D_COLOROFFSET0                          0x4e28
++#define R300_RB3D_COLORPITCH0                           0x4e38
+ #       define R300_COLORTILE                           (1 << 16)
+ #       define R300_COLORENDIAN_WORD                    (1 << 19)
+ #       define R300_COLORENDIAN_DWORD                   (2 << 19)
+ #       define R300_COLORENDIAN_HALF_DWORD              (3 << 19)
+ #       define R300_COLORFORMAT_ARGB1555                (3 << 21)
+ #       define R300_COLORFORMAT_RGB565                  (4 << 21)
+ #       define R300_COLORFORMAT_ARGB8888                (6 << 21)
+ #       define R300_COLORFORMAT_ARGB32323232            (7 << 21)
+ #       define R300_COLORFORMAT_I8                      (9 << 21)
+ #       define R300_COLORFORMAT_ARGB16161616            (10 << 21)
+ #       define R300_COLORFORMAT_VYUY                    (11 << 21)
+ #       define R300_COLORFORMAT_YVYU                    (12 << 21)
+ #       define R300_COLORFORMAT_UV88                    (13 << 21)
+ #       define R300_COLORFORMAT_ARGB4444                (15 << 21)
 -#define R300_RB3D_AARESOLVE_CTL                               0x4e88
++#define R300_RB3D_AARESOLVE_CTL                         0x4e88
  #define R300_RB3D_COLOR_CHANNEL_MASK                  0x4e0c
  #       define R300_BLUE_MASK_EN                        (1 << 0)
  #       define R300_GREEN_MASK_EN                       (1 << 1)
diff --combined shared-core/drm.h
@@@ -675,324 -675,6 +675,6 @@@ struct drm_set_version 
        int drm_dd_minor;
  };
  
- #define DRM_FENCE_FLAG_EMIT                0x00000001
- #define DRM_FENCE_FLAG_SHAREABLE           0x00000002
- /**
-  * On hardware with no interrupt events for operation completion,
-  * indicates that the kernel should sleep while waiting for any blocking
-  * operation to complete rather than spinning.
-  *
-  * Has no effect otherwise.
-  */
- #define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
- #define DRM_FENCE_FLAG_NO_USER             0x00000010
- /* Reserved for driver use */
- #define DRM_FENCE_MASK_DRIVER              0xFF000000
- #define DRM_FENCE_TYPE_EXE                 0x00000001
- struct drm_fence_arg {
-       unsigned int handle;
-       unsigned int fence_class;
-       unsigned int type;
-       unsigned int flags;
-       unsigned int signaled;
-       unsigned int error;
-       unsigned int sequence;
-       unsigned int pad64;
-       uint64_t expand_pad[2]; /*Future expansion */
- };
- /* Buffer permissions, referring to how the GPU uses the buffers.
-  * these translate to fence types used for the buffers.
-  * Typically a texture buffer is read, A destination buffer is write and
-  *  a command (batch-) buffer is exe. Can be or-ed together.
-  */
- #define DRM_BO_FLAG_READ        (1ULL << 0)
- #define DRM_BO_FLAG_WRITE       (1ULL << 1)
- #define DRM_BO_FLAG_EXE         (1ULL << 2)
- /*
-  * All of the bits related to access mode
-  */
- #define DRM_BO_MASK_ACCESS    (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
- /*
-  * Status flags. Can be read to determine the actual state of a buffer.
-  * Can also be set in the buffer mask before validation.
-  */
- /*
-  * Mask: Never evict this buffer. Not even with force. This type of buffer is only
-  * available to root and must be manually removed before buffer manager shutdown
-  * or lock.
-  * Flags: Acknowledge
-  */
- #define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
- /*
-  * Mask: Require that the buffer is placed in mappable memory when validated.
-  *       If not set the buffer may or may not be in mappable memory when validated.
-  * Flags: If set, the buffer is in mappable memory.
-  */
- #define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
- /* Mask: The buffer should be shareable with other processes.
-  * Flags: The buffer is shareable with other processes.
-  */
- #define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
- /* Mask: If set, place the buffer in cache-coherent memory if available.
-  *       If clear, never place the buffer in cache coherent memory if validated.
-  * Flags: The buffer is currently in cache-coherent memory.
-  */
- #define DRM_BO_FLAG_CACHED      (1ULL << 7)
- /* Mask: Make sure that every time this buffer is validated,
-  *       it ends up on the same location provided that the memory mask is the same.
-  *       The buffer will also not be evicted when claiming space for
-  *       other buffers. Basically a pinned buffer but it may be thrown out as
-  *       part of buffer manager shutdown or locking.
-  * Flags: Acknowledge.
-  */
- #define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
- /* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
-  * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
-  * with unsnooped PTEs instead of snooped, by using chipset-specific cache
-  * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
-  * as the eviction to local memory (TTM unbind) on map is just a side effect
-  * to prevent aggressive cache prefetch from the GPU disturbing the cache
-  * management that the DRM is doing.
-  *
-  * Flags: Acknowledge.
-  * Buffers allocated with this flag should not be used for suballocators
-  * This type may have issues on CPUs with over-aggressive caching
-  * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
-  */
- #define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
- /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
-  * Flags: Acknowledge.
-  */
- #define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
- /*
-  * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
-  * Flags: Acknowledge.
-  */
- #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
- #define DRM_BO_FLAG_TILE           (1ULL << 15)
- /*
-  * Memory type flags that can be or'ed together in the mask, but only
-  * one appears in flags.
-  */
- /* System memory */
- #define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
- /* Translation table memory */
- #define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
- /* Vram memory */
- #define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
- /* Up to the driver to define. */
- #define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
- #define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
- #define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
- #define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
- #define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
- /* We can add more of these now with a 64-bit flag type */
- /*
-  * This is a mask covering all of the memory type flags; easier to just
-  * use a single constant than a bunch of | values. It covers
-  * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
-  */
- #define DRM_BO_MASK_MEM         0x00000000FF000000ULL
- /*
-  * This adds all of the CPU-mapping options in with the memory
-  * type to label all bits which change how the page gets mapped
-  */
- #define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
-                                DRM_BO_FLAG_CACHED_MAPPED | \
-                                DRM_BO_FLAG_CACHED | \
-                                DRM_BO_FLAG_MAPPABLE)
-                                
- /* Driver-private flags */
- #define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
- /*
-  * Don't block on validate and map. Instead, return EBUSY.
-  */
- #define DRM_BO_HINT_DONT_BLOCK  0x00000002
- /*
-  * Don't place this buffer on the unfenced list. This means
-  * that the buffer will not end up having a fence associated
-  * with it as a result of this operation
-  */
- #define DRM_BO_HINT_DONT_FENCE  0x00000004
- /**
-  * On hardware with no interrupt events for operation completion,
-  * indicates that the kernel should sleep while waiting for any blocking
-  * operation to complete rather than spinning.
-  *
-  * Has no effect otherwise.
-  */
- #define DRM_BO_HINT_WAIT_LAZY   0x00000008
- /*
-  * The client has compute relocations refering to this buffer using the
-  * offset in the presumed_offset field. If that offset ends up matching
-  * where this buffer lands, the kernel is free to skip executing those
-  * relocations
-  */
- #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
- #define DRM_BO_INIT_MAGIC 0xfe769812
- #define DRM_BO_INIT_MAJOR 1
- #define DRM_BO_INIT_MINOR 0
- #define DRM_BO_INIT_PATCH 0
- struct drm_bo_info_req {
-       uint64_t mask;
-       uint64_t flags;
-       unsigned int handle;
-       unsigned int hint;
-       unsigned int fence_class;
-       unsigned int desired_tile_stride;
-       unsigned int tile_info;
-       unsigned int pad64;
-       uint64_t presumed_offset;
- };
- struct drm_bo_create_req {
-       uint64_t flags;
-       uint64_t size;
-       uint64_t buffer_start;
-       unsigned int hint;
-       unsigned int page_alignment;
- };
- /*
-  * Reply flags
-  */
- #define DRM_BO_REP_BUSY 0x00000001
- struct drm_bo_info_rep {
-       uint64_t flags;
-       uint64_t proposed_flags;
-       uint64_t size;
-       uint64_t offset;
-       uint64_t arg_handle;
-       uint64_t buffer_start;
-       unsigned int handle;
-       unsigned int fence_flags;
-       unsigned int rep_flags;
-       unsigned int page_alignment;
-       unsigned int desired_tile_stride;
-       unsigned int hw_tile_stride;
-       unsigned int tile_info;
-       unsigned int pad64;
-       uint64_t expand_pad[4]; /*Future expansion */
- };
- struct drm_bo_arg_rep {
-       struct drm_bo_info_rep bo_info;
-       int ret;
-       unsigned int pad64;
- };
- struct drm_bo_create_arg {
-       union {
-               struct drm_bo_create_req req;
-               struct drm_bo_info_rep rep;
-       } d;
- };
- struct drm_bo_handle_arg {
-       unsigned int handle;
- };
- struct drm_bo_reference_info_arg {
-       union {
-               struct drm_bo_handle_arg req;
-               struct drm_bo_info_rep rep;
-       } d;
- };
- struct drm_bo_map_wait_idle_arg {
-       union {
-               struct drm_bo_info_req req;
-               struct drm_bo_info_rep rep;
-       } d;
- };
- struct drm_bo_op_req {
-       enum {
-               drm_bo_validate,
-               drm_bo_fence,
-               drm_bo_ref_fence,
-       } op;
-       unsigned int arg_handle;
-       struct drm_bo_info_req bo_req;
- };
- struct drm_bo_op_arg {
-       uint64_t next;
-       union {
-               struct drm_bo_op_req req;
-               struct drm_bo_arg_rep rep;
-       } d;
-       int handled;
-       unsigned int pad64;
- };
- #define DRM_BO_MEM_LOCAL 0
- #define DRM_BO_MEM_TT 1
- #define DRM_BO_MEM_VRAM 2
- #define DRM_BO_MEM_PRIV0 3
- #define DRM_BO_MEM_PRIV1 4
- #define DRM_BO_MEM_PRIV2 5
- #define DRM_BO_MEM_PRIV3 6
- #define DRM_BO_MEM_PRIV4 7
- #define DRM_BO_MEM_TYPES 8 /* For now. */
- #define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
- #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
- struct drm_bo_version_arg {
-       uint32_t major;
-       uint32_t minor;
-       uint32_t patchlevel;
- };
- struct drm_mm_type_arg {
-       unsigned int mem_type;
-       unsigned int lock_flags;
- };
- struct drm_mm_init_arg {
-       unsigned int magic;
-       unsigned int major;
-       unsigned int minor;
-       unsigned int mem_type;
-       uint64_t p_offset;
-       uint64_t p_size;
- };
- struct drm_mm_info_arg {
-       unsigned int mem_type;
-       uint64_t p_size;
- };
  struct drm_gem_close {
        /** Handle of the object to be closed. */
        uint32_t handle;
@@@ -1330,38 -1012,13 +1012,13 @@@ struct drm_mode_crtc_lut 
  #define DRM_IOCTL_AGP_BIND            DRM_IOW( 0x36, struct drm_agp_binding)
  #define DRM_IOCTL_AGP_UNBIND          DRM_IOW( 0x37, struct drm_agp_binding)
  
 -#define DRM_IOCTL_SG_ALLOC            DRM_IOW0x38, struct drm_scatter_gather)
 +#define DRM_IOCTL_SG_ALLOC            DRM_IOWR(0x38, struct drm_scatter_gather)
  #define DRM_IOCTL_SG_FREE             DRM_IOW( 0x39, struct drm_scatter_gather)
  
  #define DRM_IOCTL_WAIT_VBLANK         DRM_IOWR(0x3a, union drm_wait_vblank)
  
  #define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
  
- #define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
- #define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
- #define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
- #define DRM_IOCTL_MM_UNLOCK             DRM_IOWR(0xc3, struct drm_mm_type_arg)
- #define DRM_IOCTL_FENCE_CREATE          DRM_IOWR(0xc4, struct drm_fence_arg)
- #define DRM_IOCTL_FENCE_REFERENCE       DRM_IOWR(0xc6, struct drm_fence_arg)
- #define DRM_IOCTL_FENCE_UNREFERENCE     DRM_IOWR(0xc7, struct drm_fence_arg)
- #define DRM_IOCTL_FENCE_SIGNALED        DRM_IOWR(0xc8, struct drm_fence_arg)
- #define DRM_IOCTL_FENCE_FLUSH           DRM_IOWR(0xc9, struct drm_fence_arg)
- #define DRM_IOCTL_FENCE_WAIT            DRM_IOWR(0xca, struct drm_fence_arg)
- #define DRM_IOCTL_FENCE_EMIT            DRM_IOWR(0xcb, struct drm_fence_arg)
- #define DRM_IOCTL_FENCE_BUFFERS         DRM_IOWR(0xcc, struct drm_fence_arg)
- #define DRM_IOCTL_BO_CREATE             DRM_IOWR(0xcd, struct drm_bo_create_arg)
- #define DRM_IOCTL_BO_MAP                DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
- #define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
- #define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
- #define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
- #define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
- #define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
- #define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
- #define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
- #define DRM_IOCTL_MM_INFO               DRM_IOWR(0xd7, struct drm_mm_info_arg)
  #define DRM_IOCTL_MODE_GETRESOURCES     DRM_IOWR(0xA0, struct drm_mode_card_res)
  #define DRM_IOCTL_MODE_GETCRTC          DRM_IOWR(0xA1, struct drm_mode_crtc)
  #define DRM_IOCTL_MODE_GETCONNECTOR        DRM_IOWR(0xA2, struct drm_mode_get_connector)
@@@ -1439,11 -1096,6 +1096,6 @@@ typedef struct drm_agp_binding drm_agp_
  typedef struct drm_agp_info drm_agp_info_t;
  typedef struct drm_scatter_gather drm_scatter_gather_t;
  typedef struct drm_set_version drm_set_version_t;
- typedef struct drm_fence_arg drm_fence_arg_t;
- typedef struct drm_mm_type_arg drm_mm_type_arg_t;
- typedef struct drm_mm_init_arg drm_mm_init_arg_t;
- typedef enum drm_bo_type drm_bo_type_t;
  #endif
  
  #endif
diff --combined shared-core/i915_dma.c
@@@ -41,12 -41,12 +41,12 @@@ int i915_wait_ring(struct drm_device * 
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
        u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 -      u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
 +      u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
        u32 last_acthd = I915_READ(acthd_reg);
        u32 acthd;
        int i;
  
 -      for (i = 0; i < 10000; i++) {
 +      for (i = 0; i < 100000; i++) {
                ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
                acthd = I915_READ(acthd_reg);
                ring->space = ring->head - (ring->tail + 8);
@@@ -132,10 -132,23 +132,10 @@@ int i915_dma_cleanup(struct drm_device 
                dev_priv->ring.Size = 0;
          }
  
 -        if (dev_priv->status_page_dmah) {
 -                drm_pci_free(dev, dev_priv->status_page_dmah);
 -                dev_priv->status_page_dmah = NULL;
 -                /* Need to rewrite hardware status page */
 -                I915_WRITE(0x02080, 0x1ffff000);
 -        }
 -
 -        if (dev_priv->hws_agpoffset) {
 -                dev_priv->hws_agpoffset = 0;
 -                drm_core_ioremapfree(&dev_priv->hws_map, dev);
 -                I915_WRITE(0x02080, 0x1ffff000);
 -        }
 -
        return 0;
  }
  
- #if defined(I915_HAVE_BUFFER) && defined(DRI2)
+ #if defined(DRI2)
  #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
  #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
  #define DRI2_SAREA_BLOCK_NEXT(p)                              \
@@@ -213,12 -226,7 +213,7 @@@ static int i915_initialize(struct drm_d
                }
        }
  
- #ifdef I915_HAVE_BUFFER
-       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-               dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
-       }
- #endif
+       
        if (init->ring_size != 0) {
                dev_priv->ring.Size = init->ring_size;
                dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
         */
        dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
  
- #ifdef I915_HAVE_BUFFER
-       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-               mutex_init(&dev_priv->cmdbuf_mutex);
+       /* Program Hardware Status Page */
+       if (!I915_NEED_GFX_HWS(dev)) {
+               dev_priv->status_page_dmah =
+                       drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+               if (!dev_priv->status_page_dmah) {
+                       i915_dma_cleanup(dev);
+                       DRM_ERROR("Can not allocate hardware status page\n");
+                       return -ENOMEM;
+               }
+               dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
+               dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+               memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
+               I915_WRITE(0x02080, dev_priv->dma_status_page);
        }
+       DRM_DEBUG("Enabled hardware status page\n");
  #ifdef DRI2
        if (init->func == I915_INIT_DMA2) {
                int ret = setup_dri2_sarea(dev, file_priv, init);
                }
        }
  #endif /* DRI2 */
- #endif /* I915_HAVE_BUFFER */
  
        return 0;
  }
@@@ -288,14 -310,14 +297,14 @@@ static int i915_dma_resume(struct drm_d
        }
  
        /* Program Hardware Status Page */
 -      if (!dev_priv->hws_vaddr) {
 +      if (!dev_priv->hw_status_page) {
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
 -      DRM_DEBUG("hw status page @ %p\n", dev_priv->hws_vaddr);
 +      DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
  
 -      if (dev_priv->hws_agpoffset != 0)
 -              I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
 +      if (dev_priv->status_gfx_addr != 0)
 +              I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
        DRM_DEBUG("Enabled hardware status page\n");
@@@ -533,9 -555,6 +542,6 @@@ int i915_emit_mi_flush(struct drm_devic
  static int i915_dispatch_cmdbuffer(struct drm_device * dev,
                                   struct drm_i915_cmdbuffer * cmd)
  {
- #ifdef I915_HAVE_FENCE
-       struct drm_i915_private *dev_priv = dev->dev_private;
- #endif
        int nbox = cmd->num_cliprects;
        int i = 0, count, ret;
  
        }
  
        i915_emit_breadcrumb(dev);
- #ifdef I915_HAVE_FENCE
-       if (unlikely((dev_priv->counter & 0xFF) == 0))
-               drm_fence_flush_old(dev, 0, dev_priv->counter);
- #endif
        return 0;
  }
  
@@@ -616,10 -631,6 +618,6 @@@ int i915_dispatch_batchbuffer(struct dr
        }
  
        i915_emit_breadcrumb(dev);
- #ifdef I915_HAVE_FENCE
-       if (unlikely((dev_priv->counter & 0xFF) == 0))
-               drm_fence_flush_old(dev, 0, dev_priv->counter);
- #endif
        return 0;
  }
  
@@@ -678,7 -689,6 +676,6 @@@ static void i915_do_dispatch_flip(struc
  
  void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        int i;
  
                        i915_do_dispatch_flip(dev, i, sync);
  
        i915_emit_breadcrumb(dev);
- #ifdef I915_HAVE_FENCE
-       if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
-               drm_fence_flush_old(dev, 0, dev_priv->counter);
- #endif
  }
  
  int i915_quiescent(struct drm_device *dev)
@@@ -864,9 -870,6 +857,9 @@@ static int i915_getparam(struct drm_dev
        case I915_PARAM_CHIPSET_ID:
                value = dev->pci_device;
                break;
 +      case I915_PARAM_HAS_GEM:
 +              value = 1;
 +              break;
        default:
                DRM_ERROR("Unknown parameter %d\n", param->param);
                return -EINVAL;
@@@ -1004,7 -1007,7 +997,7 @@@ static int i915_set_status_page(struct 
  
        DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
  
 -      dev_priv->hws_agpoffset = hws->addr & (0x1ffff<<12);
 +      dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
  
        dev_priv->hws_map.offset = dev->agp->base + hws->addr;
        dev_priv->hws_map.size = 4*1024;
        drm_core_ioremap(&dev_priv->hws_map, dev);
        if (dev_priv->hws_map.handle == NULL) {
                i915_dma_cleanup(dev);
 -              dev_priv->hws_agpoffset = 0;
 +              dev_priv->status_gfx_addr = 0;
                DRM_ERROR("can not ioremap virtual address for"
                                " G33 hw status page\n");
                return -ENOMEM;
        }
 -      dev_priv->hws_vaddr = dev_priv->hws_map.handle;
  
 -      memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
 -      I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
 -      DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
 +      dev_priv->hw_status_page = dev_priv->hws_map.handle;
 +
 +      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 +      I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 +      DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
  
        return 0;
  }
@@@ -1049,9 -1051,6 +1042,6 @@@ struct drm_ioctl_desc i915_ioctls[] = 
        DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
- #ifdef I915_HAVE_BUFFER
-       DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
- #endif
        DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
 +      DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
 +      DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
  };
  
  int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --combined shared-core/i915_drm.h
@@@ -190,8 -190,6 +190,8 @@@ typedef struct drm_i915_sarea 
  #define DRM_I915_GEM_MMAP     0x1e
  #define DRM_I915_GEM_SET_DOMAIN       0x1f
  #define DRM_I915_GEM_SW_FINISH        0x20
 +#define DRM_I915_GEM_SET_TILING       0x21
 +#define DRM_I915_GEM_GET_TILING       0x22
  
  #define DRM_IOCTL_I915_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
  #define DRM_IOCTL_I915_FLUSH          DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
  #define DRM_IOCTL_I915_GEM_MMAP               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
  #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
  #define DRM_IOCTL_I915_GEM_SW_FINISH  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 +#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
 +#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
  
  /* Asynchronous page flipping:
   */
@@@ -280,7 -276,6 +280,7 @@@ typedef struct drm_i915_irq_wait 
  #define I915_PARAM_ALLOW_BATCHBUFFER     2
  #define I915_PARAM_LAST_DISPATCH         3
  #define I915_PARAM_CHIPSET_ID            4
 +#define I915_PARAM_HAS_GEM               5
  
  typedef struct drm_i915_getparam {
        int param;
@@@ -380,58 -375,6 +380,6 @@@ typedef struct drm_i915_hws_addr 
        uint64_t addr;
  } drm_i915_hws_addr_t;
  
- /*
-  * Relocation header is 4 uint32_ts
-  * 0 - 32 bit reloc count
-  * 1 - 32-bit relocation type
-  * 2-3 - 64-bit user buffer handle ptr for another list of relocs.
-  */
- #define I915_RELOC_HEADER 4
- /*
-  * type 0 relocation has 4-uint32_t stride
-  * 0 - offset into buffer
-  * 1 - delta to add in
-  * 2 - buffer handle
-  * 3 - reserved (for optimisations later).
-  */
- /*
-  * type 1 relocation has 4-uint32_t stride.
-  * Hangs off the first item in the op list.
-  * Performed after all valiations are done.
-  * Try to group relocs into the same relocatee together for
-  * performance reasons.
-  * 0 - offset into buffer
-  * 1 - delta to add in
-  * 2 - buffer index in op list.
-  * 3 - relocatee index in op list.
-  */
- #define I915_RELOC_TYPE_0 0
- #define I915_RELOC0_STRIDE 4
- #define I915_RELOC_TYPE_1 1
- #define I915_RELOC1_STRIDE 4
- struct drm_i915_op_arg {
-       uint64_t next;
-       uint64_t reloc_ptr;
-       int handled;
-       unsigned int pad64;
-       union {
-               struct drm_bo_op_req req;
-               struct drm_bo_arg_rep rep;
-       } d;
- };
- struct drm_i915_execbuffer {
-       uint64_t ops_list;
-       uint32_t num_buffers;
-       struct drm_i915_batchbuffer batch;
-       drm_context_t context; /* for lockless use in the future */
-       struct drm_fence_arg fence_arg;
- };
  struct drm_i915_gem_init {
        /**
         * Beginning offset in the GTT to be managed by the DRM memory
@@@ -660,64 -603,4 +608,64 @@@ struct drm_i915_gem_busy 
        uint32_t busy;
  };
  
 +#define I915_TILING_NONE      0
 +#define I915_TILING_X         1
 +#define I915_TILING_Y         2
 +
 +#define I915_BIT_6_SWIZZLE_NONE               0
 +#define I915_BIT_6_SWIZZLE_9          1
 +#define I915_BIT_6_SWIZZLE_9_10               2
 +#define I915_BIT_6_SWIZZLE_9_11               3
 +#define I915_BIT_6_SWIZZLE_9_10_11    4
 +/* Not seen by userland */
 +#define I915_BIT_6_SWIZZLE_UNKNOWN    5
 +
 +struct drm_i915_gem_set_tiling {
 +      /** Handle of the buffer to have its tiling state updated */
 +      uint32_t handle;
 +
 +      /**
 +       * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
 +       * I915_TILING_Y).
 +       *
 +       * This value is to be set on request, and will be updated by the
 +       * kernel on successful return with the actual chosen tiling layout.
 +       *
 +       * The tiling mode may be demoted to I915_TILING_NONE when the system
 +       * has bit 6 swizzling that can't be managed correctly by GEM.
 +       *
 +       * Buffer contents become undefined when changing tiling_mode.
 +       */
 +      uint32_t tiling_mode;
 +
 +      /**
 +       * Stride in bytes for the object when in I915_TILING_X or
 +       * I915_TILING_Y.
 +       */
 +      uint32_t stride;
 +
 +      /**
 +       * Returned address bit 6 swizzling required for CPU access through
 +       * mmap mapping.
 +       */
 +      uint32_t swizzle_mode;
 +};
 +
 +struct drm_i915_gem_get_tiling {
 +      /** Handle of the buffer to get tiling state for. */
 +      uint32_t handle;
 +
 +      /**
 +       * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
 +       * I915_TILING_Y).
 +       */
 +      uint32_t tiling_mode;
 +
 +      /**
 +       * Returned address bit 6 swizzling required for CPU access through
 +       * mmap mapping.
 +       */
 +      uint32_t swizzle_mode;
 +};
 +
  #endif                                /* _I915_DRM_H_ */
diff --combined shared-core/i915_init.c
@@@ -100,32 -100,92 +100,32 @@@ int i915_probe_agp(struct pci_dev *pdev
        return 0;
  }
  
 -static int i915_init_hwstatus(struct drm_device *dev)
 +static int
 +i915_init_hws_phys(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_memrange_node *free_space;
        int ret = 0;
  
 -      /* Program Hardware Status Page */
 -      if (!IS_G33(dev)) {
 -              dev_priv->status_page_dmah = 
 -                      drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
 +      dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
 +                                                 0xffffffff);
  
 -              if (!dev_priv->status_page_dmah) {
 -                      DRM_ERROR("Can not allocate hardware status page\n");
 -                      ret = -ENOMEM;
 -                      goto out;
 -              }
 -              dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
 -              dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 -
 -              I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 -      } else {
 -              free_space = drm_memrange_search_free(&dev_priv->vram,
 -                                                    PAGE_SIZE,
 -                                                    PAGE_SIZE, 0);
 -              if (!free_space) {
 -                      DRM_ERROR("No free vram available, aborting\n");
 -                      ret = -ENOMEM;
 -                      goto out;
 -              }
 -
 -              dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE,
 -                                                     PAGE_SIZE);
 -              if (!dev_priv->hws) {
 -                      DRM_ERROR("Unable to allocate or pin hw status page\n");
 -                      ret = -EINVAL;
 -                      goto out;
 -              }
 -
 -              dev_priv->hws_agpoffset = dev_priv->hws->start;
 -              dev_priv->hws_map.offset = dev->agp->base +
 -                      dev_priv->hws->start;
 -              dev_priv->hws_map.size = PAGE_SIZE;
 -              dev_priv->hws_map.type= 0;
 -              dev_priv->hws_map.flags= 0;
 -              dev_priv->hws_map.mtrr = 0;
 -
 -              drm_core_ioremap(&dev_priv->hws_map, dev);
 -              if (dev_priv->hws_map.handle == NULL) {
 -                      dev_priv->hws_agpoffset = 0;
 -                      DRM_ERROR("can not ioremap virtual addr for"
 -                                      "G33 hw status page\n");
 -                      ret = -ENOMEM;
 -                      goto out_free;
 -              }
 -              dev_priv->hws_vaddr = dev_priv->hws_map.handle;
 -              I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
 +      if (!dev_priv->status_page_dmah) {
 +              DRM_ERROR("Can not allocate hardware status page\n");
 +              ret = -ENOMEM;
 +              goto out;
        }
 +      dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
 +      dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
  
 -      memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
 +      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  
 -      DRM_DEBUG("Enabled hardware status page\n");
 +      I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 +      DRM_DEBUG("hws kernel virt: 0x%p\n", dev_priv->hw_status_page);
  
 -      return 0;
 -
 -out_free:
 -      /* free hws */
  out:
        return ret;
  }
  
 -static void i915_cleanup_hwstatus(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (!IS_G33(dev)) {
 -              if (dev_priv->status_page_dmah)
 -                      drm_pci_free(dev, dev_priv->status_page_dmah);
 -      } else {
 -              if (dev_priv->hws_map.handle)
 -                      drm_core_ioremapfree(&dev_priv->hws_map, dev);
 -              if (dev_priv->hws)
 -                      drm_memrange_put_block(dev_priv->hws);
 -      }
 -      I915_WRITE(HWS_PGA, 0x1ffff000);
 -}
 -
  static int i915_load_modeset_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
  
        /* Basic memrange allocator for stolen space (aka vram) */
 -      drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
 +      drm_mm_init(&dev_priv->vram, 0, prealloc_size);
        /* Let GEM Manage from end of prealloc space to end of aperture */
        i915_gem_do_init(dev, prealloc_size, agp_size);
  
 +      if (!I915_NEED_GFX_HWS(dev))
 +              i915_init_hws_phys(dev);
 +
        ret = i915_gem_init_ringbuffer(dev);
        if (ret)
                goto out;
  
 -      ret = i915_init_hwstatus(dev);
 -      if (ret)
 -              goto destroy_ringbuffer;
 -
        /* Allow hardware batchbuffers unless told otherwise.
         */
        dev_priv->allow_batchbuffer = 1;
        if (dev_priv->wq == 0) {
                DRM_DEBUG("Error\n");
                ret = -EINVAL;
 -              goto destroy_hws;
 +              goto destroy_ringbuffer;
        }
  
        ret = intel_init_bios(dev);
@@@ -186,6 -247,8 +186,6 @@@ modeset_cleanup
        intel_modeset_cleanup(dev);
  destroy_wq:
        destroy_workqueue(dev_priv->wq);
 -destroy_hws:
 -      i915_cleanup_hwstatus(dev);
  destroy_ringbuffer:
        i915_gem_cleanup_ringbuffer(dev);
  out:
  int i915_driver_load(struct drm_device *dev, unsigned long flags)
  {
        struct drm_i915_private *dev_priv;
 -      int ret = 0;
 +      int ret = 0, num_pipes = 2;
 +      u32 tmp;
  
        dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
        if (dev_priv == NULL)
                goto free_priv;
        }
  
 -      INIT_LIST_HEAD(&dev_priv->mm.active_list);
 -      INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 -      INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
 -      INIT_LIST_HEAD(&dev_priv->mm.request_list);
 -      INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 -                        i915_gem_retire_work_handler);
 -      dev_priv->mm.next_gem_seqno = 1;
 +      i915_gem_load(dev);
  
  #ifdef __linux__
  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 -        intel_init_chipset_flush_compat(dev);
 +      intel_init_chipset_flush_compat(dev);
 +#endif
 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
 +      intel_opregion_init(dev);
  #endif
  #endif
  
 +      tmp = I915_READ(PIPEASTAT);
 +      I915_WRITE(PIPEASTAT, tmp);
 +      tmp = I915_READ(PIPEBSTAT);
 +      I915_WRITE(PIPEBSTAT, tmp);
 +
 +      atomic_set(&dev_priv->irq_received, 0);
 +      I915_WRITE(HWSTAM, 0xeffe);
 +      I915_WRITE(IMR, 0x0);
 +      I915_WRITE(IER, 0x0);
 +
 +      DRM_SPININIT(&dev_priv->swaps_lock, "swap");
 +      INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
 +      dev_priv->swaps_pending = 0;
 +
 +      DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
 +      dev_priv->user_irq_refcount = 0;
 +      dev_priv->irq_mask_reg = ~0;
 +
 +      ret = drm_vblank_init(dev, num_pipes);
 +      if (ret)
 +              goto out_rmmap;
 +
 +      ret = drm_hotplug_init(dev);
 +      if (ret)
 +              goto out_rmmap;
 +
 +      dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 +      dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 +
 +      i915_enable_interrupt(dev);
 +      DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
 +
 +      /*
 +       * Initialize the hardware status page IRQ location.
 +       */
 +
 +      I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
 +
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                ret = i915_load_modeset_init(dev);
                if (ret < 0) {
                        goto out_rmmap;
                }
        }
 +
        return 0;
  
  out_rmmap:
@@@ -331,23 -357,6 +331,23 @@@ int i915_driver_unload(struct drm_devic
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 +      u32 temp;
 +
 +      dev_priv->vblank_pipe = 0;
 +
 +      dev_priv->irq_enabled = 0;
 +
 +      I915_WRITE(HWSTAM, 0xffffffff);
 +      I915_WRITE(IMR, 0xffffffff);
 +      I915_WRITE(IER, 0x0);
 +
 +      temp = I915_READ(PIPEASTAT);
 +      I915_WRITE(PIPEASTAT, temp);
 +      temp = I915_READ(PIPEBSTAT);
 +      I915_WRITE(PIPEBSTAT, temp);
 +      temp = I915_READ(IIR);
 +      I915_WRITE(IIR, temp);
 +
        I915_WRITE(PRB0_CTL, 0);
  
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                dev_priv->sarea_bo = NULL;
        }
  #endif
 -      i915_cleanup_hwstatus(dev);
  
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                mutex_lock(&dev->struct_mutex);
                i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
 -              drm_memrange_takedown(&dev_priv->vram);
 +              drm_mm_takedown(&dev_priv->vram);
                i915_gem_lastclose(dev);
 +              if (!I915_NEED_GFX_HWS(dev))
 +                      drm_pci_free(dev, dev_priv->status_page_dmah);
        }
  
 +        drm_rmmap(dev, dev_priv->mmio_map);
 +
  #ifdef __linux__
 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
 +      intel_opregion_free(dev);
 +#endif
  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 -        intel_init_chipset_flush_compat(dev);
 +      intel_fini_chipset_flush_compat(dev);
  #endif
  #endif
  
 -        DRM_DEBUG("%p\n", dev_priv->mmio_map);
 -        drm_rmmap(dev, dev_priv->mmio_map);
 -
        drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
  
        dev->dev_private = NULL;
@@@ -437,7 -443,7 +437,7 @@@ void i915_master_destroy(struct drm_dev
                return;
  
        if (master_priv->sarea)
-               drm_rmmap(dev, master_priv->sarea);
+               drm_rmmap_locked(dev, master_priv->sarea);
                
        drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
  
@@@ -77,9 -77,6 +77,9 @@@ static int r300_emit_cliprects(drm_rade
                                return -EFAULT;
                        }
  
 +                      box.x2--; /* Hardware expects inclusive bottom-right corner */
 +                      box.y2--;
 +
                        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
                                box.x1 = (box.x1) &
                                        R300_CLIPRECT_MASK;
@@@ -98,8 -95,8 +98,8 @@@
                                        R300_CLIPRECT_MASK;
                                box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
                                        R300_CLIPRECT_MASK;
 -
                        }
 +
                        OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
                                 (box.y1 << R300_CLIPRECT_Y_SHIFT));
                        OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
                ADVANCE_RING();
        }
  
 +      /* flus cache and wait idle clean after cliprect change */
 +      BEGIN_RING(2);
 +      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
 +      OUT_RING(R300_RB3D_DC_FLUSH);
 +      ADVANCE_RING();
 +      BEGIN_RING(2);
 +      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
 +      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
 +      ADVANCE_RING();
 +      /* set flush flag */
 +      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
 +
        return 0;
  }
  
@@@ -166,8 -151,6 +166,6 @@@ void r300_init_reg_flags(struct drm_dev
                for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
                        r300_reg_flags[i]|=(mark);
  
- #define MARK_SAFE             1
- #define MARK_CHECK_OFFSET     2
  
  #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
  
        ADD_RANGE(0x21DC, 1);
        ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
        ADD_RANGE(R300_VAP_CLIP_X_0, 4);
 -      ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
 +      ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
        ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
        ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
        ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
        ADD_RANGE(R300_GB_ENABLE, 1);
        ADD_RANGE(R300_GB_MSPOS0, 5);
 -      ADD_RANGE(R300_TX_CNTL, 1);
 +      ADD_RANGE(R300_TX_INVALTAGS, 1);
        ADD_RANGE(R300_TX_ENABLE, 1);
        ADD_RANGE(0x4200, 4);
        ADD_RANGE(0x4214, 1);
        ADD_RANGE(0x42C0, 2);
        ADD_RANGE(R300_RS_CNTL_0, 2);
  
 -      ADD_RANGE(0x43A4, 2);
 +      ADD_RANGE(R300_SC_HYPERZ, 2);
        ADD_RANGE(0x43E8, 1);
  
        ADD_RANGE(0x46A4, 5);
        ADD_RANGE(0x4E50, 9);
        ADD_RANGE(0x4E88, 1);
        ADD_RANGE(0x4EA0, 2);
 -      ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
 -      ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4);
 -      ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);    /* check offset */
 -      ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
 -      ADD_RANGE(0x4F28, 1);
 -      ADD_RANGE(0x4F30, 2);
 -      ADD_RANGE(0x4F44, 1);
 -      ADD_RANGE(0x4F54, 1);
 +      ADD_RANGE(R300_ZB_CNTL, 3);
 +      ADD_RANGE(R300_ZB_FORMAT, 4);
 +      ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);      /* check offset */
 +      ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
 +      ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
 +      ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
  
        ADD_RANGE(R300_TX_FILTER_0, 16);
        ADD_RANGE(R300_TX_FILTER1_0, 16);
        ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
  
        /* Sporadic registers used as primitives are emitted */
 -      ADD_RANGE(R300_RB3D_ZCACHE_CTLSTAT, 1);
 +      ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
        ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
        ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
        ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
  
+       ADD_RANGE(R500_SU_REG_DEST, 1);
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV410) {
+               ADD_RANGE(R300_DST_PIPE_CONFIG, 1);
+       }
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
                ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
                ADD_RANGE(R500_US_CONFIG, 2);
                ADD_RANGE(R500_RS_INST_0, 16);
                ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
                ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
 -
 +              ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
+               ADD_RANGE(R500_GA_US_VECTOR_INDEX, 2);
        } else {
                ADD_RANGE(R300_PFS_CNTL_0, 3);
                ADD_RANGE(R300_PFS_NODE_0, 4);
                ADD_RANGE(R300_RS_ROUTE_0, 8);
  
        }
+       /* add 2d blit engine registers for DDX */
+       ADD_RANGE(RADEON_SRC_Y_X, 3); /* 1434, 1438, 143c, 
+                                        SRC_Y_X, DST_Y_X, DST_HEIGHT_WIDTH
+                                      */
+       ADD_RANGE(RADEON_DP_GUI_MASTER_CNTL, 1); /* 146c */
+       ADD_RANGE(RADEON_DP_BRUSH_BKGD_CLR, 2); /* 1478, 147c */
+       ADD_RANGE(RADEON_DP_SRC_FRGD_CLR, 2); /* 15d8, 15dc */
+       ADD_RANGE(RADEON_DP_CNTL, 1); /* 16c0 */
+       ADD_RANGE(RADEON_DP_WRITE_MASK, 1); /* 16cc */
+       ADD_RANGE(RADEON_DEFAULT_SC_BOTTOM_RIGHT, 1); /* 16e8 */
+       ADD_RANGE(RADEON_DSTCACHE_CTLSTAT, 1);
+       ADD_RANGE(RADEON_WAIT_UNTIL, 1);
+       ADD_RANGE_MARK(RADEON_DST_OFFSET, 1, MARK_CHECK_OFFSET);
+       ADD_RANGE_MARK(RADEON_SRC_OFFSET, 1, MARK_CHECK_OFFSET);
+       ADD_RANGE_MARK(RADEON_DST_PITCH_OFFSET, 1, MARK_CHECK_OFFSET);
+       ADD_RANGE_MARK(RADEON_SRC_PITCH_OFFSET, 1, MARK_CHECK_OFFSET);
+       /* TODO SCISSOR */
+       ADD_RANGE_MARK(R300_SC_SCISSOR0, 2, MARK_CHECK_SCISSOR);
+       ADD_RANGE(R300_SC_CLIP_0_A, 2);
+       ADD_RANGE(R300_SC_CLIP_RULE, 1);
+       ADD_RANGE(R300_SC_SCREENDOOR, 1);
+       ADD_RANGE(R300_VAP_PVS_CODE_CNTL_0, 4);
+       ADD_RANGE(R300_VAP_PVS_VECTOR_INDX_REG, 2);
  }
  
static __inline__ int r300_check_range(unsigned reg, int count)
+ int r300_check_range(unsigned reg, int count)
  {
        int i;
        if (reg & ~0xffff)
        return 0;
  }
  
+ int r300_get_reg_flags(unsigned reg)
+ {
+       if (reg & ~0xffff)
+               return -1;
+       return r300_reg_flags[(reg >> 2)];
+ }
  static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
                                                          dev_priv,
                                                          drm_radeon_kcmd_buffer_t
@@@ -404,28 -432,15 +445,28 @@@ static __inline__ int r300_emit_vpu(drm
        if (sz * 16 > cmdbuf->bufsz)
                return -EINVAL;
  
 -      BEGIN_RING(5 + sz * 4);
 -      /* Wait for VAP to come to senses.. */
 -      /* there is no need to emit it multiple times, (only once before VAP is programmed,
 -         but this optimization is for later */
 -      OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
 +      /* VAP is very sensitive so we purge cache before we program it
 +       * and we also flush its state before & after */
 +      BEGIN_RING(6);
 +      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
 +      OUT_RING(R300_RB3D_DC_FLUSH);
 +      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
 +      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
 +      OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
 +      OUT_RING(0);
 +      ADVANCE_RING();
 +      /* set flush flag */
 +      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
 +
 +      BEGIN_RING(3 + sz * 4);
        OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
        OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
        OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
 +      ADVANCE_RING();
  
 +      BEGIN_RING(2);
 +      OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
 +      OUT_RING(0);
        ADVANCE_RING();
  
        cmdbuf->buf += sz * 16;
@@@ -453,15 -468,6 +494,15 @@@ static __inline__ int r300_emit_clear(d
        OUT_RING_TABLE((int *)cmdbuf->buf, 8);
        ADVANCE_RING();
  
 +      BEGIN_RING(4);
 +      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
 +      OUT_RING(R300_RB3D_DC_FLUSH);
 +      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
 +      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
 +      ADVANCE_RING();
 +      /* set flush flag */
 +      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
 +
        cmdbuf->buf += 8 * 4;
        cmdbuf->bufsz -= 8 * 4;
  
@@@ -581,23 -587,22 +622,23 @@@ static __inline__ int r300_emit_bitblt_
        return 0;
  }
  
 -static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
 -                                           drm_radeon_kcmd_buffer_t *cmdbuf)
 +static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
 +                                          drm_radeon_kcmd_buffer_t *cmdbuf)
  {
 -      u32 *cmd = (u32 *) cmdbuf->buf;
 -      int count, ret;
 +      u32 *cmd;
 +      int count;
 +      int expected_count;
        RING_LOCALS;
  
 -      count=(cmd[0]>>16) & 0x3fff;
 +      cmd = (u32 *) cmdbuf->buf;
 +      count = (cmd[0]>>16) & 0x3fff;
 +      expected_count = cmd[1] >> 16;
 +      if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
 +              expected_count = (expected_count+1)/2;
  
 -      if ((cmd[1] & 0x8000ffff) != 0x80000810) {
 -              DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
 -              return -EINVAL;
 -      }
 -      ret = !radeon_check_offset(dev_priv, cmd[2]);
 -      if (ret) {
 -              DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
 +      if (count && count != expected_count) {
 +              DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
 +                      count, expected_count);
                return -EINVAL;
        }
  
        cmdbuf->buf += (count+2)*4;
        cmdbuf->bufsz -= (count+2)*4;
  
 +      if (!count) {
 +              drm_r300_cmd_header_t header;
 +
 +              if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
 +                      DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
 +                      return -EINVAL;
 +              }
 +
 +              header.u = *(unsigned int *)cmdbuf->buf;
 +
 +              cmdbuf->buf += sizeof(header);
 +              cmdbuf->bufsz -= sizeof(header);
 +              cmd = (u32 *) cmdbuf->buf;
 +
 +              if (header.header.cmd_type != R300_CMD_PACKET3 ||
 +                  header.packet3.packet != R300_CMD_PACKET3_RAW ||
 +                  cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
 +                      DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
 +                      return -EINVAL;
 +              }
 +
 +              if ((cmd[1] & 0x8000ffff) != 0x80000810) {
 +                      DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
 +                      return -EINVAL;
 +              }
 +              if (!radeon_check_offset(dev_priv, cmd[2])) {
 +                      DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
 +                      return -EINVAL;
 +              }
 +              if (cmd[3] != expected_count) {
 +                      DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
 +                              cmd[3], expected_count);
 +                      return -EINVAL;
 +              }
 +
 +              BEGIN_RING(4);
 +              OUT_RING(cmd[0]);
 +              OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
 +              ADVANCE_RING();
 +
 +              cmdbuf->buf += 4*4;
 +              cmdbuf->bufsz -= 4*4;
 +      }
 +
        return 0;
  }
  
@@@ -696,22 -657,11 +737,22 @@@ static __inline__ int r300_emit_raw_pac
        case RADEON_CNTL_BITBLT_MULTI:
                return r300_emit_bitblt_multi(dev_priv, cmdbuf);
  
 -      case RADEON_CP_INDX_BUFFER:     /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
 -              return r300_emit_indx_buffer(dev_priv, cmdbuf);
 -      case RADEON_CP_3D_DRAW_IMMD_2:  /* triggers drawing using in-packet vertex data */
 -      case RADEON_CP_3D_DRAW_VBUF_2:  /* triggers drawing of vertex buffers setup elsewhere */
 -      case RADEON_CP_3D_DRAW_INDX_2:  /* triggers drawing using indices to vertex buffer */
 +      case RADEON_CP_INDX_BUFFER:
 +              DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
 +              return -EINVAL;
 +      case RADEON_CP_3D_DRAW_IMMD_2:
 +              /* triggers drawing using in-packet vertex data */
 +      case RADEON_CP_3D_DRAW_VBUF_2:
 +              /* triggers drawing of vertex buffers setup elsewhere */
 +              dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
 +                                         RADEON_PURGE_EMITED);
 +              break;
 +      case RADEON_CP_3D_DRAW_INDX_2:
 +              /* triggers drawing using indices to vertex buffer */
 +              /* whenever we send vertex we clear flush & purge */
 +              dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
 +                                         RADEON_PURGE_EMITED);
 +              return r300_emit_draw_indx_2(dev_priv, cmdbuf);
        case RADEON_WAIT_FOR_IDLE:
        case RADEON_CP_NOP:
                /* these packets are safe */
@@@ -807,53 -757,16 +848,53 @@@ static __inline__ int r300_emit_packet3
   */
  static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
  {
 +      uint32_t cache_z, cache_3d, cache_2d;
        RING_LOCALS;
  
 -      BEGIN_RING(6);
 +      cache_z = R300_ZC_FLUSH;
 +      cache_2d = R300_DC_FLUSH_2D;
 +      cache_3d = R300_DC_FLUSH_3D;
 +      if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
 +              /* we can purge, primitive where draw since last purge */
 +              cache_z |= R300_ZC_FREE;
 +              cache_2d |= R300_DC_FREE_2D;
 +              cache_3d |= R300_DC_FREE_3D;
 +      }
 +
 +      /* flush & purge zbuffer */
 +      BEGIN_RING(2);
 +      OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
 +      OUT_RING(cache_z);
 +      ADVANCE_RING();
 +      /* flush & purge 3d */
 +      BEGIN_RING(2);
        OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
 -      OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
 -      OUT_RING(CP_PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
 -      OUT_RING(R300_RB3D_ZCACHE_UNKNOWN_03);
 -      OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
 -      OUT_RING(0x0);
 +      OUT_RING(cache_3d);
 +      ADVANCE_RING();
 +      /* flush & purge texture */
 +      BEGIN_RING(2);
 +      OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
 +      OUT_RING(0);
 +      ADVANCE_RING();
 +      /* FIXME: is this one really needed ? */
 +      BEGIN_RING(2);
 +      OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
 +      OUT_RING(0);
 +      ADVANCE_RING();
 +      BEGIN_RING(2);
 +      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
 +      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
 +      ADVANCE_RING();
 +      /* flush & purge 2d through E2 as RB2D will trigger lockup */
 +      BEGIN_RING(4);
 +      OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
 +      OUT_RING(cache_2d);
 +      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
 +      OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
 +               RADEON_WAIT_HOST_IDLECLEAN);
        ADVANCE_RING();
 +      /* set flush & purge flags */
 +      dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
  }
  
  /**
@@@ -1035,7 -948,8 +1076,7 @@@ int r300_do_cp_cmdbuf(struct drm_devic
  
        DRM_DEBUG("\n");
  
 -      /* See the comment above r300_emit_begin3d for why this call must be here,
 -       * and what the cleanup gotos are for. */
 +      /* pacify */
        r300_pacify(dev_priv);
  
        if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
diff --combined shared-core/r300_reg.h
@@@ -129,15 -129,6 +129,6 @@@ USE OR OTHER DEALINGS IN THE SOFTWARE
  /* END: Wild guesses */
  
  #define R300_SE_VTE_CNTL                  0x20b0
- #     define     R300_VPORT_X_SCALE_ENA                0x00000001
- #     define     R300_VPORT_X_OFFSET_ENA               0x00000002
- #     define     R300_VPORT_Y_SCALE_ENA                0x00000004
- #     define     R300_VPORT_Y_OFFSET_ENA               0x00000008
- #     define     R300_VPORT_Z_SCALE_ENA                0x00000010
- #     define     R300_VPORT_Z_OFFSET_ENA               0x00000020
- #     define     R300_VTX_XY_FMT                       0x00000100
- #     define     R300_VTX_Z_FMT                        0x00000200
- #     define     R300_VTX_W0_FMT                       0x00000400
  #     define     R300_VTX_W0_NORMALIZE                 0x00000800
  #     define     R300_VTX_ST_DENORMALIZED              0x00001000
  
   * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
   * avoids bugs caused by still running shaders reading bad data from memory.
   */
 -#define R300_VAP_PVS_WAITIDLE               0x2284 /* GUESS */
 +#define R300_VAP_PVS_STATE_FLUSH_REG        0x2284
  
  /* Absolutely no clue what this register is about. */
  #define R300_VAP_UNKNOWN_2288               0x2288
  #     define R300_OFIFO_HIGHWATER_SHIFT       22      /* two bits only */
  #     define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT       24
  
- #define R300_GB_SELECT        0x401C
  #     define R300_GB_FOG_SELECT_C0A           0
  #     define R300_GB_FOG_SELECT_C1A           1
  #     define R300_GB_FOG_SELECT_C2A           2
  /* gap */
  
  /* Zero to flush caches. */
 -#define R300_TX_CNTL                        0x4100
 +#define R300_TX_INVALTAGS                   0x4100
  #define R300_TX_FLUSH                       0x0
  
  /* The upper enable bits are guessed, based on fglrx reported limits. */
  #             define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
  /* END: Rasterization / Interpolators - many guesses */
  
 +/* Hierarchical Z Enable */
 +#define R300_SC_HYPERZ                   0x43a4
 +#     define R300_SC_HYPERZ_DISABLE     (0 << 0)
 +#     define R300_SC_HYPERZ_ENABLE      (1 << 0)
 +#     define R300_SC_HYPERZ_MIN         (0 << 1)
 +#     define R300_SC_HYPERZ_MAX         (1 << 1)
 +#     define R300_SC_HYPERZ_ADJ_256     (0 << 2)
 +#     define R300_SC_HYPERZ_ADJ_128     (1 << 2)
 +#     define R300_SC_HYPERZ_ADJ_64      (2 << 2)
 +#     define R300_SC_HYPERZ_ADJ_32      (3 << 2)
 +#     define R300_SC_HYPERZ_ADJ_16      (4 << 2)
 +#     define R300_SC_HYPERZ_ADJ_8       (5 << 2)
 +#     define R300_SC_HYPERZ_ADJ_4       (6 << 2)
 +#     define R300_SC_HYPERZ_ADJ_2       (7 << 2)
 +#     define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
 +#     define R300_SC_HYPERZ_HZ_Z0MIN    (1 << 5)
 +#     define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
 +#     define R300_SC_HYPERZ_HZ_Z0MAX    (1 << 6)
 +
 +#define R300_SC_EDGERULE                 0x43a8
 +
  /* BEGIN: Scissors and cliprects */
  
  /* There are four clipping rectangles. Their corner coordinates are inclusive.
  /* 32 bit chroma key */
  #define R300_TX_CHROMA_KEY_0                      0x4580
  /* ff00ff00 == { 0, 1.0, 0, 1.0 } */
- #define R300_TX_BORDER_COLOR_0              0x45C0
  
  /* END: Texture specification */
  
  
  /* gap */
  
- #define R300_RB3D_COLOROFFSET0              0x4E28
  #       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
  #define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
  #define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
   * Bit 17: 4x2 tiles
   * Bit 18: Extremely weird tile like, but some pixels duplicated?
   */
- #define R300_RB3D_COLORPITCH0               0x4E38
  #       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
  #       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
  #       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
  #define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
  #define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
  
 +#define R300_RB3D_AARESOLVE_CTL             0x4E88
  /* gap */
  
  /* Guess by Vladimir.
   * for this.
   * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
   */
 -#define R300_RB3D_ZSTENCIL_CNTL_0                   0x4F00
 -#       define R300_RB3D_Z_DISABLED_1            0x00000010
 -#       define R300_RB3D_Z_DISABLED_2            0x00000014
 -#       define R300_RB3D_Z_TEST                  0x00000012
 -#       define R300_RB3D_Z_TEST_AND_WRITE        0x00000016
 -#       define R300_RB3D_Z_WRITE_ONLY          0x00000006
 -
 -#       define R300_RB3D_Z_TEST                  0x00000012
 -#       define R300_RB3D_Z_TEST_AND_WRITE        0x00000016
 -#       define R300_RB3D_Z_WRITE_ONLY          0x00000006
 -#     define R300_RB3D_STENCIL_ENABLE          0x00000001
 -
 -#define R300_RB3D_ZSTENCIL_CNTL_1                   0x4F04
 +#define R300_ZB_CNTL                             0x4F00
 +#     define R300_STENCIL_ENABLE               (1 << 0)
 +#     define R300_Z_ENABLE                     (1 << 1)
 +#     define R300_Z_WRITE_ENABLE               (1 << 2)
 +#     define R300_Z_SIGNED_COMPARE             (1 << 3)
 +#     define R300_STENCIL_FRONT_BACK           (1 << 4)
 +
 +#define R300_ZB_ZSTENCILCNTL                   0x4f04
        /* functions */
  #     define R300_ZS_NEVER                    0
  #     define R300_ZS_LESS                     1
  #     define R300_ZS_INVERT                   5
  #     define R300_ZS_INCR_WRAP                6
  #     define R300_ZS_DECR_WRAP                7
 +#     define R300_Z_FUNC_SHIFT                0
        /* front and back refer to operations done for front
           and back faces, i.e. separate stencil function support */
 -#     define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT           0
 -#     define R300_RB3D_ZS1_FRONT_FUNC_SHIFT           3
 -#     define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT        6
 -#     define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT       9
 -#     define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT      12
 -#     define R300_RB3D_ZS1_BACK_FUNC_SHIFT           15
 -#     define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT        18
 -#     define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT       21
 -#     define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT       24
 -
 -#define R300_RB3D_ZSTENCIL_CNTL_2                   0x4F08
 -#     define R300_RB3D_ZS2_STENCIL_REF_SHIFT          0
 -#     define R300_RB3D_ZS2_STENCIL_MASK               0xFF
 -#     define R300_RB3D_ZS2_STENCIL_MASK_SHIFT         8
 -#     define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT   16
 +#     define R300_S_FRONT_FUNC_SHIFT          3
 +#     define R300_S_FRONT_SFAIL_OP_SHIFT      6
 +#     define R300_S_FRONT_ZPASS_OP_SHIFT      9
 +#     define R300_S_FRONT_ZFAIL_OP_SHIFT      12
 +#     define R300_S_BACK_FUNC_SHIFT           15
 +#     define R300_S_BACK_SFAIL_OP_SHIFT       18
 +#     define R300_S_BACK_ZPASS_OP_SHIFT       21
 +#     define R300_S_BACK_ZFAIL_OP_SHIFT       24
 +
 +#define R300_ZB_STENCILREFMASK                        0x4f08
 +#     define R300_STENCILREF_SHIFT       0
 +#     define R300_STENCILREF_MASK        0x000000ff
 +#     define R300_STENCILMASK_SHIFT      8
 +#     define R300_STENCILMASK_MASK       0x0000ff00
 +#     define R300_STENCILWRITEMASK_SHIFT 16
 +#     define R300_STENCILWRITEMASK_MASK  0x00ff0000
  
  /* gap */
  
 -#define R300_RB3D_ZSTENCIL_FORMAT                   0x4F10
 -#     define R300_DEPTH_FORMAT_16BIT_INT_Z     (0 << 0)
 -#     define R300_DEPTH_FORMAT_24BIT_INT_Z     (2 << 0)
 -      /* 16 bit format or some aditional bit ? */
 -#     define R300_DEPTH_FORMAT_UNK32          (32 << 0)
 +#define R300_ZB_FORMAT                             0x4f10
 +#     define R300_DEPTHFORMAT_16BIT_INT_Z   (0 << 0)
 +#     define R300_DEPTHFORMAT_16BIT_13E3    (1 << 0)
 +#     define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL   (2 << 0)
 +/* reserved up to (15 << 0) */
 +#     define R300_INVERT_13E3_LEADING_ONES  (0 << 4)
 +#     define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
  
 -#define R300_RB3D_EARLY_Z                           0x4F14
 -#     define R300_EARLY_Z_DISABLE              (0 << 0)
 -#     define R300_EARLY_Z_ENABLE               (1 << 0)
 +#define R300_ZB_ZTOP                             0x4F14
 +#     define R300_ZTOP_DISABLE                 (0 << 0)
 +#     define R300_ZTOP_ENABLE                  (1 << 0)
  
  /* gap */
  
 -#define R300_RB3D_ZCACHE_CTLSTAT            0x4F18 /* GUESS */
 -#       define R300_RB3D_ZCACHE_UNKNOWN_01  0x1
 -#       define R300_RB3D_ZCACHE_UNKNOWN_03  0x3
 +#define R300_ZB_ZCACHE_CTLSTAT            0x4f18
 +#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT      (0 << 0)
 +#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
 +#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT       (0 << 1)
 +#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE            (1 << 1)
 +#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE            (0 << 31)
 +#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY            (1 << 31)
 +
 +#define R300_ZB_BW_CNTL                     0x4f1c
 +#     define R300_HIZ_DISABLE                              (0 << 0)
 +#     define R300_HIZ_ENABLE                               (1 << 0)
 +#     define R300_HIZ_MIN                                  (0 << 1)
 +#     define R300_HIZ_MAX                                  (1 << 1)
 +#     define R300_FAST_FILL_DISABLE                        (0 << 2)
 +#     define R300_FAST_FILL_ENABLE                         (1 << 2)
 +#     define R300_RD_COMP_DISABLE                          (0 << 3)
 +#     define R300_RD_COMP_ENABLE                           (1 << 3)
 +#     define R300_WR_COMP_DISABLE                          (0 << 4)
 +#     define R300_WR_COMP_ENABLE                           (1 << 4)
 +#     define R300_ZB_CB_CLEAR_RMW                          (0 << 5)
 +#     define R300_ZB_CB_CLEAR_CACHE_LINEAR                 (1 << 5)
 +#     define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE   (0 << 6)
 +#     define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE    (1 << 6)
 +
 +#     define R500_ZEQUAL_OPTIMIZE_ENABLE                   (0 << 7)
 +#     define R500_ZEQUAL_OPTIMIZE_DISABLE                  (1 << 7)
 +#     define R500_SEQUAL_OPTIMIZE_ENABLE                   (0 << 8)
 +#     define R500_SEQUAL_OPTIMIZE_DISABLE                  (1 << 8)
 +
 +#     define R500_BMASK_ENABLE                             (0 << 10)
 +#     define R500_BMASK_DISABLE                            (1 << 10)
 +#     define R500_HIZ_EQUAL_REJECT_DISABLE                 (0 << 11)
 +#     define R500_HIZ_EQUAL_REJECT_ENABLE                  (1 << 11)
 +#     define R500_HIZ_FP_EXP_BITS_DISABLE                  (0 << 12)
 +#     define R500_HIZ_FP_EXP_BITS_1                        (1 << 12)
 +#     define R500_HIZ_FP_EXP_BITS_2                        (2 << 12)
 +#     define R500_HIZ_FP_EXP_BITS_3                        (3 << 12)
 +#     define R500_HIZ_FP_EXP_BITS_4                        (4 << 12)
 +#     define R500_HIZ_FP_EXP_BITS_5                        (5 << 12)
 +#     define R500_HIZ_FP_INVERT_LEADING_ONES               (0 << 15)
 +#     define R500_HIZ_FP_INVERT_LEADING_ZEROS              (1 << 15)
 +#     define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE      (0 << 16)
 +#     define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE     (1 << 16)
 +#     define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE           (0 << 17)
 +#     define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE          (1 << 17)
 +#     define R500_PEQ_PACKING_DISABLE                      (0 << 18)
 +#     define R500_PEQ_PACKING_ENABLE                       (1 << 18)
 +#     define R500_COVERED_PTR_MASKING_DISABLE              (0 << 18)
 +#     define R500_COVERED_PTR_MASKING_ENABLE               (1 << 18)
 +
  
  /* gap */
  
 -#define R300_RB3D_DEPTHOFFSET               0x4F20
 -#define R300_RB3D_DEPTHPITCH                0x4F24
 -#       define R300_DEPTHPITCH_MASK              0x00001FF8 /* GUESS */
 -#       define R300_DEPTH_TILE_ENABLE            (1 << 16) /* GUESS */
 -#       define R300_DEPTH_MICROTILE_ENABLE       (1 << 17) /* GUESS */
 -#       define R300_DEPTH_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
 -#       define R300_DEPTH_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
 -#       define R300_DEPTH_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
 +/* Z Buffer Address Offset.
 + * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
 + */
 +#define R300_ZB_DEPTHOFFSET               0x4f20
 +
 +/* Z Buffer Pitch and Endian Control */
 +#define R300_ZB_DEPTHPITCH                0x4f24
 +#       define R300_DEPTHPITCH_MASK              0x00003FFC
 +#       define R300_DEPTHMACROTILE_DISABLE      (0 << 16)
 +#       define R300_DEPTHMACROTILE_ENABLE       (1 << 16)
 +#       define R300_DEPTHMICROTILE_LINEAR       (0 << 17)
 +#       define R300_DEPTHMICROTILE_TILED        (1 << 17)
 +#       define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
 +#       define R300_DEPTHENDIAN_NO_SWAP         (0 << 18)
 +#       define R300_DEPTHENDIAN_WORD_SWAP       (1 << 18)
 +#       define R300_DEPTHENDIAN_DWORD_SWAP      (2 << 18)
 +#       define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
 +
 +/* Z Buffer Clear Value */
 +#define R300_ZB_DEPTHCLEARVALUE                  0x4f28
 +
 +#define R300_ZB_ZMASK_OFFSET                   0x4f30
 +#define R300_ZB_ZMASK_PITCH                    0x4f34
 +#define R300_ZB_ZMASK_WRINDEX                  0x4f38
 +#define R300_ZB_ZMASK_DWORD                    0x4f3c
 +#define R300_ZB_ZMASK_RDINDEX                  0x4f40
 +
 +/* Hierarchical Z Memory Offset */
 +#define R300_ZB_HIZ_OFFSET                       0x4f44
 +
 +/* Hierarchical Z Write Index */
 +#define R300_ZB_HIZ_WRINDEX                      0x4f48
 +
 +/* Hierarchical Z Data */
 +#define R300_ZB_HIZ_DWORD                        0x4f4c
 +
 +/* Hierarchical Z Read Index */
 +#define R300_ZB_HIZ_RDINDEX                      0x4f50
 +
 +/* Hierarchical Z Pitch */
 +#define R300_ZB_HIZ_PITCH                        0x4f54
 +
 +/* Z Buffer Z Pass Counter Data */
 +#define R300_ZB_ZPASS_DATA                       0x4f58
 +
 +/* Z Buffer Z Pass Counter Address */
 +#define R300_ZB_ZPASS_ADDR                       0x4f5c
 +
 +/* Depth buffer X and Y coordinate offset */
 +#define R300_ZB_DEPTHXY_OFFSET                   0x4f60
 +#     define R300_DEPTHX_OFFSET_SHIFT  1
 +#     define R300_DEPTHX_OFFSET_MASK   0x000007FE
 +#     define R300_DEPTHY_OFFSET_SHIFT  17
 +#     define R300_DEPTHY_OFFSET_MASK   0x07FE0000
 +
 +/* Sets the fifo sizes */
 +#define R500_ZB_FIFO_SIZE                        0x4fd0
 +#     define R500_OP_FIFO_SIZE_FULL   (0 << 0)
 +#     define R500_OP_FIFO_SIZE_HALF   (1 << 0)
 +#     define R500_OP_FIFO_SIZE_QUATER (2 << 0)
 +#     define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
 +
 +/* Stencil Reference Value and Mask for backfacing quads */
 +/* R300_ZB_STENCILREFMASK handles front face */
 +#define R500_ZB_STENCILREFMASK_BF                0x4fd4
 +#     define R500_STENCILREF_SHIFT       0
 +#     define R500_STENCILREF_MASK        0x000000ff
 +#     define R500_STENCILMASK_SHIFT      8
 +#     define R500_STENCILMASK_MASK       0x0000ff00
 +#     define R500_STENCILWRITEMASK_SHIFT 16
 +#     define R500_STENCILWRITEMASK_MASK  0x00ff0000
  
  /* BEGIN: Vertex program instruction set */
  
diff --combined shared-core/radeon_cp.c
@@@ -40,7 -40,6 +40,7 @@@
  #define RADEON_FIFO_DEBUG     0
  
  static int radeon_do_cleanup_cp(struct drm_device * dev);
 +static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
  
  static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
  {
@@@ -145,12 -144,8 +145,12 @@@ static void radeon_write_agp_base(drm_r
        } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
                R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
                R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
 +      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
 +                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
 +              RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
 +              RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
        } else {
 -              RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_base_lo);
 +              RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
                if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
                        RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
        }
@@@ -190,7 -185,7 +190,7 @@@ void radeon_pll_errata_after_data(struc
        }
  }
  
int RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr)
u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr)
  {
        uint32_t data;
  
@@@ -295,8 -290,23 +295,8 @@@ static int radeon_do_pixcache_flush(drm
                        DRM_UDELAY(1);
                }
        } else {
 -              /* 3D */
 -              tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT);
 -              tmp |= RADEON_RB3D_DC_FLUSH_ALL;
 -              RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
 -
 -              /* 2D */
 -              tmp = RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT);
 -              tmp |= RADEON_RB3D_DC_FLUSH_ALL;
 -              RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
 -
 -              for (i = 0; i < dev_priv->usec_timeout; i++) {
 -                      if (!(RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT)
 -                        & RADEON_RB3D_DC_BUSY)) {
 -                              return 0;
 -                      }
 -                      DRM_UDELAY(1);
 -              }
 +              /* don't flush or purge cache here or lockup */
 +              return 0;
        }
  
  #if RADEON_FIFO_DEBUG
@@@ -319,9 -329,6 +319,9 @@@ static int radeon_do_wait_for_fifo(drm_
                        return 0;
                DRM_UDELAY(1);
        }
 +      DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
 +               RADEON_READ(RADEON_RBBM_STATUS),
 +               RADEON_READ(R300_VAP_CNTL_STATUS));
  
  #if RADEON_FIFO_DEBUG
        DRM_ERROR("failed!\n");
@@@ -348,9 -355,6 +348,9 @@@ static int radeon_do_wait_for_idle(drm_
                }
                DRM_UDELAY(1);
        }
 +      DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
 +               RADEON_READ(RADEON_RBBM_STATUS),
 +               RADEON_READ(R300_VAP_CNTL_STATUS));
  
  #if RADEON_FIFO_DEBUG
        DRM_ERROR("failed!\n");
@@@ -444,7 -448,6 +444,7 @@@ static void radeon_cp_load_microcode(dr
                   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
                   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
                   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
 +                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
                   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
                DRM_INFO("Loading R300 Microcode\n");
                for (i = 0; i < 256; i++) {
@@@ -533,20 -536,14 +533,20 @@@ static void radeon_do_cp_start(drm_rade
  
        dev_priv->cp_running = 1;
  
 -      BEGIN_RING(6);
 -
 +      BEGIN_RING(8);
 +      /* isync can only be written through cp on r5xx write it here */
 +      OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
 +      OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
 +               RADEON_ISYNC_ANY3D_IDLE2D |
 +               RADEON_ISYNC_WAIT_IDLEGUI |
 +               RADEON_ISYNC_CPSCRATCH_IDLEGUI);
        RADEON_PURGE_CACHE();
        RADEON_PURGE_ZCACHE();
        RADEON_WAIT_UNTIL_IDLE();
 -
        ADVANCE_RING();
        COMMIT_RING();
 +
 +      dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
  }
  
  /* Reset the Command Processor.  This will not flush any pending
@@@ -661,8 -658,8 +661,8 @@@ static void radeon_cp_init_ring_buffer(
                                         ((dev_priv->gart_vm_start - 1) & 0xffff0000)
                                         | (dev_priv->fb_location >> 16));
        
-       if (dev_priv->mm.ring) {
-               ring_start = dev_priv->mm.ring->offset +
+       if (dev_priv->mm.ring.bo) {
+               ring_start = dev_priv->mm.ring.bo->offset +
                        dev_priv->gart_vm_start;
        } else
  #if __OS_HAS_AGP
        dev_priv->ring.tail = cur_read_ptr;
  
  
-       if (dev_priv->mm.ring_read_ptr) {
+       if (dev_priv->mm.ring_read.bo) {
                RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
-                            dev_priv->mm.ring_read_ptr->offset +
+                            dev_priv->mm.ring_read.bo->offset +
                             dev_priv->gart_vm_start);
        } else
  #if __OS_HAS_AGP
        RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
                     + RADEON_SCRATCH_REG_OFFSET);
  
-       if (dev_priv->mm.ring_read_ptr)
+       if (dev_priv->mm.ring_read.bo)
                dev_priv->scratch = ((__volatile__ u32 *)
-                                    dev_priv->mm.ring_read_ptr_map.virtual +
+                                    dev_priv->mm.ring_read.kmap.virtual +
                                     (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
        else
                dev_priv->scratch = ((__volatile__ u32 *)
        radeon_do_wait_for_idle(dev_priv);
  
        /* Sync everything up */
+       if (dev_priv->chip_family > CHIP_RV280) {
        RADEON_WRITE(RADEON_ISYNC_CNTL,
                     (RADEON_ISYNC_ANY2D_IDLE3D |
                      RADEON_ISYNC_ANY3D_IDLE2D |
                      RADEON_ISYNC_WAIT_IDLEGUI |
                      RADEON_ISYNC_CPSCRATCH_IDLEGUI));
+       } else {
+       RADEON_WRITE(RADEON_ISYNC_CNTL,
+                    (RADEON_ISYNC_ANY2D_IDLE3D |
+                     RADEON_ISYNC_ANY3D_IDLE2D |
+                     RADEON_ISYNC_WAIT_IDLEGUI));
+       }
  }
  
  static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
        u32 tmp;
        void *ring_read_ptr;
  
-       if (dev_priv->mm.ring_read_ptr)
-               ring_read_ptr = dev_priv->mm.ring_read_ptr_map.virtual;
+       if (dev_priv->mm.ring_read.bo)
+               ring_read_ptr = dev_priv->mm.ring_read.kmap.virtual;
        else
                ring_read_ptr = dev_priv->ring_rptr->handle;
  
@@@ -861,7 -864,14 +867,7 @@@ static void radeon_set_igpgart(drm_rade
                IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
                                                      RS480_REQ_TYPE_SNOOP_DIS));
  
 -              if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
 -                      IGP_WRITE_MCIND(RS690_MC_AGP_BASE,
 -                                      (unsigned int)dev_priv->gart_vm_start);
 -                      IGP_WRITE_MCIND(RS690_MC_AGP_BASE_2, 0);
 -              } else {
 -                      RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
 -                      RADEON_WRITE(RS480_AGP_BASE_2, 0);
 -              }
 +              radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
  
                dev_priv->gart_size = 32*1024*1024;
                temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 
@@@ -1060,7 -1070,7 +1066,7 @@@ static int radeon_do_init_cp(struct drm
        dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
                                           (dev_priv->color_fmt << 10) |
                                           (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0));
 -      
 +
        dev_priv->depth_clear.rb3d_zstencilcntl =
            (dev_priv->depth_fmt |
             RADEON_Z_TEST_ALWAYS |
                dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
                /* if we have an offset set from userspace */
                if (dev_priv->pcigart_offset_set) {
 -
                        /* if it came from userspace - remap it */
                        if (dev_priv->pcigart_offset_set == 1) {
                                dev_priv->gart_info.bus_addr =
@@@ -1353,8 -1364,7 +1359,7 @@@ static int radeon_do_cleanup_cp(struct 
                if (dev_priv->gart_info.bus_addr) {
                        /* Turn off PCI GART */
                        radeon_set_pcigart(dev_priv, 0);
-                       if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
-                               DRM_ERROR("failed to cleanup PCI GART!\n");
+                       drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info);
                }
  
                if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
                        if (dev_priv->pcigart_offset_set == 1) {
                                drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
                                dev_priv->gart_info.addr = NULL;
+                               dev_priv->pcigart_offset_set = 0;
                        }
                }
        }
@@@ -1404,7 -1415,6 +1410,7 @@@ static int radeon_do_resume_cp(struct d
        radeon_cp_init_ring_buffer(dev, dev_priv);
  
        radeon_do_engine_reset(dev);
 +      radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
  
        DRM_DEBUG("radeon_do_resume_cp() complete\n");
  
@@@ -1553,8 -1563,10 +1559,10 @@@ void radeon_do_release(struct drm_devic
                radeon_mem_takedown(&(dev_priv->gart_heap));
                radeon_mem_takedown(&(dev_priv->fb_heap));
  
-               radeon_gem_mm_fini(dev);
+               if (dev_priv->user_mm_enable) {
+                       radeon_gem_mm_fini(dev);
+                       dev_priv->user_mm_enable = false;
+               }
  
                /* deallocate kernel resources */
                radeon_do_cleanup_cp(dev);
@@@ -2270,6 -2282,7 +2278,7 @@@ static void radeon_set_dynamic_clock(st
  int radeon_modeset_cp_init(struct drm_device *dev)
  {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+       uint32_t tmp;
  
        /* allocate a ring and ring rptr bits from GART space */
        /* these are allocated in GEM files */
        dev_priv->ring.size = RADEON_DEFAULT_RING_SIZE;
        dev_priv->cp_mode = RADEON_CSQ_PRIBM_INDBM;
  
-       dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual;
-       dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual +
+       dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual;
+       dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual +
                dev_priv->ring.size / sizeof(u32);
        dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8);
        dev_priv->ring.rptr_update = 4096;
        dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
        dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
  
-       dev_priv->new_memmap = 1;
+       dev_priv->new_memmap = true;
  
+       r300_init_reg_flags(dev);
+               
        radeon_cp_load_microcode(dev_priv);
        
-       DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring->offset, dev_priv->mm.ring_read_ptr->offset);
+       DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring.bo->offset, dev_priv->mm.ring_read.bo->offset);
  
        radeon_cp_init_ring_buffer(dev, dev_priv);
  
+       /* need to enable BUS mastering in Buscntl */
+       tmp = RADEON_READ(RADEON_BUS_CNTL);
+       tmp &= ~RADEON_BUS_MASTER_DIS;
+       RADEON_WRITE(RADEON_BUS_CNTL, tmp);
        radeon_do_engine_reset(dev);
        radeon_test_writeback(dev_priv);
  
@@@ -2367,8 -2387,8 +2383,8 @@@ int radeon_modeset_preinit(struct drm_d
  
        if (dev_priv->is_atom_bios) {
                dev_priv->mode_info.atom_context = atom_parse(&card, dev_priv->bios);
-               radeon_get_clock_info(dev);
        }
+       radeon_get_clock_info(dev);
        return 0;
  }
  
@@@ -2523,7 -2543,7 +2539,7 @@@ void radeon_master_destroy(struct drm_d
  
        master_priv->sarea_priv = NULL;
        if (master_priv->sarea)
-               drm_rmmap(dev, master_priv->sarea);
+               drm_rmmap_locked(dev, master_priv->sarea);
                
        drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
  
diff --combined shared-core/radeon_cs.c
index 0000000,d961189..b0c4abe
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,411 +1,411 @@@
 -      case R300_RB3D_DEPTHOFFSET:
+ /*
+  * Copyright 2008 Jerome Glisse.
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+  * to deal in the Software without restriction, including without limitation
+  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+  * and/or sell copies of the Software, and to permit persons to whom the
+  * Software is furnished to do so, subject to the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the next
+  * paragraph) shall be included in all copies or substantial portions of the
+  * Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+  * DEALINGS IN THE SOFTWARE.
+  *
+  * Authors:
+  *    Jerome Glisse <glisse@freedesktop.org>
+  */
+ #include "drmP.h"
+ #include "radeon_drm.h"
+ #include "radeon_drv.h"
+ #include "r300_reg.h"
+ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
+ {
+       struct drm_radeon_private *dev_priv = dev->dev_private;
+       struct drm_radeon_cs *cs = data;
+       uint32_t *packets = NULL;
+       uint32_t cs_id;
+       uint32_t card_offset;
+       void *ib = NULL;
+       long size;
+       int r;
+       RING_LOCALS;
+       /* set command stream id to 0 which is fake id */
+       cs_id = 0;
+       DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t));
+       if (dev_priv == NULL) {
+               DRM_ERROR("called with no initialization\n");
+               return -EINVAL;
+       }
+       if (!cs->dwords) {
+               return 0;
+       }
+       /* limit cs to 64K ib */
+       if (cs->dwords > (16 * 1024)) {
+               return -EINVAL;
+       }
+       /* copy cs from userspace maybe we should copy into ib to save
+        * one copy but ib will be mapped wc so not good for cmd checking
+        * somethings worth testing i guess (Jerome)
+        */
+       size = cs->dwords * sizeof(uint32_t);
+       packets = drm_alloc(size, DRM_MEM_DRIVER);
+       if (packets == NULL) {
+               return -ENOMEM;
+       }
+       if (DRM_COPY_FROM_USER(packets, (void __user *)(unsigned long)cs->packets, size)) {
+               r = -EFAULT;
+               goto out;
+       }
+       /* get ib */
+       r = dev_priv->cs.ib_get(dev, &ib, cs->dwords, &card_offset);
+       if (r) {
+               goto out;
+       }
+       /* now parse command stream */
+       r = dev_priv->cs.parse(dev, fpriv, ib, packets, cs->dwords);
+       if (r) {
+               goto out;
+       }
+       BEGIN_RING(4);
+       OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
+       OUT_RING(card_offset);
+       OUT_RING(cs->dwords);
+       OUT_RING(CP_PACKET2());
+       ADVANCE_RING();
+       /* emit cs id sequence */
+       dev_priv->cs.id_emit(dev, &cs_id);
+       COMMIT_RING();
+       DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t));
+ out:
+       dev_priv->cs.ib_free(dev, ib, cs->dwords);
+       drm_free(packets, size, DRM_MEM_DRIVER);
+       return r;
+ }
+ /* for non-mm */
+ static int radeon_nomm_relocate(struct drm_device *dev, struct drm_file *file_priv, uint32_t *reloc, uint32_t *offset)
+ {
+       *offset = reloc[1];
+       return 0;
+ }
+ #define RELOC_SIZE 2
+ #define RADEON_2D_OFFSET_MASK 0x3fffff
+ static __inline__ int radeon_cs_relocate_packet0(struct drm_device *dev, struct drm_file *file_priv,
+                                                uint32_t *packets, uint32_t offset_dw)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       uint32_t hdr = packets[offset_dw];
+       uint32_t reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2;
+       uint32_t val = packets[offset_dw + 1];
+       uint32_t packet3_hdr = packets[offset_dw+2];
+       uint32_t tmp, offset;
+       int ret;
+       /* this is too strict we may want to expand the length in the future and have
+        old kernels ignore it. */ 
+       if (packet3_hdr != (RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16))) {
+               DRM_ERROR("Packet 3 was %x should have been %x\n", packet3_hdr, RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16));
+               return -EINVAL;
+       }
+       
+       switch(reg) {
+       case RADEON_DST_PITCH_OFFSET:
+       case RADEON_SRC_PITCH_OFFSET:
+               /* pass in the start of the reloc */
+               ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + 2, &offset);
+               if (ret)
+                       return ret;
+               tmp = (val & RADEON_2D_OFFSET_MASK) << 10;
+               val &= ~RADEON_2D_OFFSET_MASK;
+               offset += tmp;
+               offset >>= 10;
+               val |= offset;
+               break;
+       case R300_RB3D_COLOROFFSET0:
 -      case R300_RB3D_DEPTHOFFSET:
++      case R300_ZB_DEPTHOFFSET:
+       case R300_TX_OFFSET_0:
+       case R300_TX_OFFSET_0+4:
+               ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + 2, &offset);
+               if (ret)
+                       return ret;
+               offset &= 0xffffffe0;
+               val += offset;
+               break;
+       default:
+               break;
+       }
+       packets[offset_dw + 1] = val;
+       return 0;
+ }
+ static int radeon_cs_relocate_packet3(struct drm_device *dev, struct drm_file *file_priv,
+                                     uint32_t *packets, uint32_t offset_dw)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       uint32_t hdr = packets[offset_dw];
+       int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+       uint32_t reg = hdr & 0xff00;
+       uint32_t offset, val, tmp;
+       int ret;
+       switch(reg) {
+       case RADEON_CNTL_HOSTDATA_BLT:
+       {
+               val = packets[offset_dw + 2];
+               ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + num_dw + 2, &offset);
+               if (ret)
+                       return ret;
+               tmp = (val & RADEON_2D_OFFSET_MASK) << 10;
+               val &= ~RADEON_2D_OFFSET_MASK;
+               offset += tmp;
+               offset >>= 10;
+               val |= offset;
+               packets[offset_dw + 2] = val;
+       }
+       default:
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static __inline__ int radeon_cs_check_offset(struct drm_device *dev,
+                                            uint32_t reg, uint32_t val)
+ {
+       uint32_t offset;
+       switch(reg) {
+       case RADEON_DST_PITCH_OFFSET:
+       case RADEON_SRC_PITCH_OFFSET:
+               offset = val & ((1 << 22) - 1);
+               offset <<= 10;
+               break;
+       case R300_RB3D_COLOROFFSET0:
++      case R300_ZB_DEPTHOFFSET:
+               offset = val;
+               break;
+       case R300_TX_OFFSET_0:
+       case R300_TX_OFFSET_0+4:
+               offset = val & 0xffffffe0;
+               break;
+       }
+       
+       return 0;
+ }
+ int radeon_cs_packet0(struct drm_device *dev, struct drm_file *file_priv,
+                     uint32_t *packets, uint32_t offset_dw)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       uint32_t hdr = packets[offset_dw];
+       int num_dw = ((hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16) + 2;
+       int need_reloc = 0;
+       int reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2;
+       int count_dw = 1;
+       int ret;
+       while (count_dw < num_dw) {
+               /* need to have something like the r300 validation here - 
+                  list of allowed registers */
+               int flags;
+               ret = r300_check_range(reg, 1);
+               switch(ret) {
+               case -1:
+                       DRM_ERROR("Illegal register %x\n", reg);
+                       break;
+               case 0:
+                       break;
+               case 1:
+                       flags = r300_get_reg_flags(reg);
+                       if (flags == MARK_CHECK_OFFSET) {
+                               if (num_dw > 2) {
+                                       DRM_ERROR("Cannot relocate inside type stream of reg0 packets\n");
+                                       return -EINVAL;
+                               }
+                               ret = radeon_cs_relocate_packet0(dev, file_priv, packets, offset_dw);
+                               if (ret)
+                                       return ret;
+                               DRM_DEBUG("need to relocate %x %d\n", reg, flags);
+                               /* okay it should be followed by a NOP */
+                       } else if (flags == MARK_CHECK_SCISSOR) {
+                               DRM_DEBUG("need to validate scissor %x %d\n", reg, flags);
+                       } else {
+                               DRM_DEBUG("illegal register %x %d\n", reg, flags);
+                               return -EINVAL;
+                       }
+                       break;
+               }
+               count_dw++;
+               reg += 4;
+       }
+       return 0;
+ }
+ int radeon_cs_parse(struct drm_device *dev, struct drm_file *file_priv,
+                   void *ib, uint32_t *packets, uint32_t dwords)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       volatile int rb;
+       int size_dw = dwords;
+       /* scan the packet for various things */
+       int count_dw = 0;
+       int ret = 0;
+       while (count_dw < size_dw && ret == 0) {
+               int hdr = packets[count_dw];
+               int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+               int reg;
+               switch (hdr & RADEON_CP_PACKET_MASK) {
+               case RADEON_CP_PACKET0:
+                       ret = radeon_cs_packet0(dev, file_priv, packets, count_dw);
+                       break;
+               case RADEON_CP_PACKET1:
+               case RADEON_CP_PACKET2:
+                       reg = hdr & RADEON_CP_PACKET0_REG_MASK;
+                       DRM_DEBUG("Packet 1/2: %d  %x\n", num_dw, reg);
+                       break;
+               case RADEON_CP_PACKET3:
+                       reg = hdr & 0xff00;
+                       
+                       switch(reg) {
+                       case RADEON_CNTL_HOSTDATA_BLT:
+                               radeon_cs_relocate_packet3(dev, file_priv, packets, count_dw);
+                               break;
+                       case RADEON_CNTL_BITBLT_MULTI:
+                       case RADEON_3D_LOAD_VBPNTR:     /* load vertex array pointers */
+                       case RADEON_CP_INDX_BUFFER:
+                               DRM_ERROR("need relocate packet 3 for %x\n", reg);
+                               break;
+                       case RADEON_CP_3D_DRAW_IMMD_2:  /* triggers drawing using in-packet vertex data */
+                       case RADEON_CP_3D_DRAW_VBUF_2:  /* triggers drawing of vertex buffers setup elsewhere */
+                       case RADEON_CP_3D_DRAW_INDX_2:  /* triggers drawing using indices to vertex buffer */
+                       case RADEON_WAIT_FOR_IDLE:
+                       case RADEON_CP_NOP:
+                               break;
+                       default:
+                               DRM_ERROR("unknown packet 3 %x\n", reg);
+                               ret = -EINVAL;
+                       }
+                       break;
+               }
+               count_dw += num_dw+2;
+       }
+       if (ret)
+               return ret;
+            
+       /* copy the packet into the IB */
+       memcpy(ib, packets, dwords * sizeof(uint32_t));
+       /* read back last byte to flush WC buffers */
+       rb = readl((ib + (dwords-1) * sizeof(uint32_t)));
+       return 0;
+ }
+ uint32_t radeon_cs_id_get(struct drm_radeon_private *radeon)
+ {
+       /* FIXME: protect with a spinlock */
+       /* FIXME: check if wrap affect last reported wrap & sequence */
+       radeon->cs.id_scnt = (radeon->cs.id_scnt + 1) & 0x00FFFFFF;
+       if (!radeon->cs.id_scnt) {
+               /* increment wrap counter */
+               radeon->cs.id_wcnt += 0x01000000;
+               /* valid sequence counter start at 1 */
+               radeon->cs.id_scnt = 1;
+       }
+       return (radeon->cs.id_scnt | radeon->cs.id_wcnt);
+ }
+ void r100_cs_id_emit(struct drm_device *dev, uint32_t *id)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       RING_LOCALS;
+       /* ISYNC_CNTL should have CPSCRACTH bit set */
+       *id = radeon_cs_id_get(dev_priv);
+       /* emit id in SCRATCH4 (not used yet in old drm) */
+       BEGIN_RING(2);
+       OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG4, 0));
+       OUT_RING(*id);
+       ADVANCE_RING(); 
+ }
+ void r300_cs_id_emit(struct drm_device *dev, uint32_t *id)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       RING_LOCALS;
+       /* ISYNC_CNTL should not have CPSCRACTH bit set */
+       *id = radeon_cs_id_get(dev_priv);
+       /* emit id in SCRATCH6 */
+       BEGIN_RING(6);
+       OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 0));
+       OUT_RING(6);
+       OUT_RING(CP_PACKET0(R300_CP_RESYNC_DATA, 0));
+       OUT_RING(*id);
+       OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       OUT_RING(R300_RB3D_DC_FINISH);
+       ADVANCE_RING(); 
+ }
+ uint32_t r100_cs_id_last_get(struct drm_device *dev)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       return RADEON_READ(RADEON_SCRATCH_REG4);
+ }
+ uint32_t r300_cs_id_last_get(struct drm_device *dev)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       return RADEON_READ(RADEON_SCRATCH_REG6);
+ }
+ int radeon_cs_init(struct drm_device *dev)
+ {
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       if (dev_priv->chip_family < CHIP_RV280) {
+               dev_priv->cs.id_emit = r100_cs_id_emit;
+               dev_priv->cs.id_last_get = r100_cs_id_last_get;
+       } else if (dev_priv->chip_family < CHIP_R600) {
+               dev_priv->cs.id_emit = r300_cs_id_emit;
+               dev_priv->cs.id_last_get = r300_cs_id_last_get;
+       }
+       dev_priv->cs.parse = radeon_cs_parse;
+       /* ib get depends on memory manager or not so memory manager */
+       dev_priv->cs.relocate = radeon_nomm_relocate;
+       return 0;
+ }
diff --combined shared-core/radeon_drv.h
@@@ -39,7 -39,7 +39,7 @@@
  
  #define DRIVER_NAME           "radeon"
  #define DRIVER_DESC           "ATI Radeon"
 -#define DRIVER_DATE           "20080528"
 +#define DRIVER_DATE           "20080613"
  
  /* Interface history:
   *
@@@ -195,11 -195,11 +195,11 @@@ enum radeon_mac_model 
  
  
  #define GET_RING_HEAD(dev_priv)       (dev_priv->writeback_works ? \
-                                (dev_priv->mm.ring_read_ptr ? readl(dev_priv->mm.ring_read_ptr_map.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \
+                                (dev_priv->mm.ring_read.bo ? readl(dev_priv->mm.ring_read.kmap.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \
                                 RADEON_READ(RADEON_CP_RB_RPTR))
  
- #define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read_ptr ? \
-                                    writel((val), dev_priv->mm.ring_read_ptr_map.virtual) : \
+ #define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read.bo ? \
+                                    writel((val), dev_priv->mm.ring_read.kmap.virtual) : \
                                     DRM_WRITE32((dev_priv)->ring_rptr, 0, (val)))
  
  typedef struct drm_radeon_freelist {
@@@ -261,6 -261,11 +261,11 @@@ struct radeon_virt_surface 
        struct drm_file *file_priv;
  };
  
+ struct radeon_mm_obj {
+       struct drm_buffer_object *bo;
+       struct drm_bo_kmap_obj kmap;
+ };
  struct radeon_mm_info {
        uint64_t vram_offset; // Offset into GPU space
        uint64_t vram_size;
        
        uint64_t gart_start;
        uint64_t gart_size;
+       
+       struct radeon_mm_obj pcie_table;
+       struct radeon_mm_obj ring;
+       struct radeon_mm_obj ring_read;
  
-       struct drm_buffer_object *pcie_table;
-       struct drm_bo_kmap_obj pcie_table_map;
-       struct drm_buffer_object *ring;
-       struct drm_bo_kmap_obj ring_map;
-       struct drm_buffer_object *ring_read_ptr;
-       struct drm_bo_kmap_obj ring_read_ptr_map;
+       struct radeon_mm_obj dma_bufs;
+       struct drm_map fake_agp_map;
  };
  
  #include "radeon_mode.h"
@@@ -286,14 -289,35 +289,38 @@@ struct drm_radeon_master_private 
        drm_radeon_sarea_t *sarea_priv;
  };
  
 +#define RADEON_FLUSH_EMITED   (1 < 0)
 +#define RADEON_PURGE_EMITED   (1 < 1)
 +
+ /* command submission struct */
+ struct drm_radeon_cs_priv {
+       uint32_t id_wcnt;
+       uint32_t id_scnt;
+       uint32_t id_last_wcnt;
+       uint32_t id_last_scnt;
+       int (*parse)(struct drm_device *dev, struct drm_file *file_priv,
+                    void *ib, uint32_t *packets, uint32_t dwords);
+       void (*id_emit)(struct drm_device *dev, uint32_t *id);
+       uint32_t (*id_last_get)(struct drm_device *dev);
+       /* this ib handling callback are for hidding memory manager drm
+        * from memory manager less drm, free have to emit ib discard
+        * sequence into the ring */
+       int (*ib_get)(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset);
+       uint32_t (*ib_get_ptr)(struct drm_device *dev, void *ib);
+       void (*ib_free)(struct drm_device *dev, void *ib, uint32_t dwords);
+       /* do a relocation either MM or non-MM */
+       int (*relocate)(struct drm_device *dev, struct drm_file *file_priv,
+                        uint32_t *reloc, uint32_t *offset);
+ };
  typedef struct drm_radeon_private {
  
        drm_radeon_ring_buffer_t ring;
  
-       int new_memmap;
+       bool new_memmap;
+       bool user_mm_enable;
  
        int gart_size;
        u32 gart_vm_start;
        struct radeon_surface surfaces[RADEON_MAX_SURFACES];
        struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
  
 -
        u32 scratch_ages[5];
  
        unsigned int crtc_last_cnt;
        uint32_t flags;         /* see radeon_chip_flags */
        unsigned long fb_aper_offset;
  
 -      int num_gb_pipes;
 -
+       bool mm_enabled;
        struct radeon_mm_info mm;
        drm_local_map_t *mmio;
  
 -      uint32_t chip_family;
  
        unsigned long pcigart_offset;
        unsigned int pcigart_offset_set;
        u32 ram_width;
  
        enum radeon_pll_errata pll_errata;
 +      
 +      int num_gb_pipes;
 +      int track_flush;
 +      uint32_t chip_family; /* extract from flags */
+       struct radeon_mm_obj **ib_objs;
+       /* ib bitmap */
+       uint64_t ib_alloc_bitmap; // TO DO replace with a real bitmap
+       struct drm_radeon_cs_priv cs;
  } drm_radeon_private_t;
  
  typedef struct drm_radeon_buf_priv {
@@@ -455,7 -485,6 +488,7 @@@ extern void radeon_mem_release(struct d
                               struct mem_block *heap);
  
                                /* radeon_irq.c */
 +extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
  extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
  
@@@ -672,14 -701,15 +705,15 @@@ extern int r300_do_cp_cmdbuf(struct drm
  #define RADEON_SCRATCH_REG3           0x15ec
  #define RADEON_SCRATCH_REG4           0x15f0
  #define RADEON_SCRATCH_REG5           0x15f4
+ #define RADEON_SCRATCH_REG6           0x15f8
  #define RADEON_SCRATCH_UMSK           0x0770
  #define RADEON_SCRATCH_ADDR           0x0774
  
  #define RADEON_SCRATCHOFF( x )                (RADEON_SCRATCH_REG_OFFSET + 4*(x))
  
  #define GET_SCRATCH( x )      (dev_priv->writeback_works ?                    \
-                                (dev_priv->mm.ring_read_ptr ? \
-                                 readl(dev_priv->mm.ring_read_ptr_map.virtual + RADEON_SCRATCHOFF(0)) : \
+                                (dev_priv->mm.ring_read.bo ? \
+                                 readl(dev_priv->mm.ring_read.kmap.virtual + RADEON_SCRATCHOFF(0)) : \
                                  DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(x))) : \
                                 RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x)))
  
  #define RADEON_PP_TXFILTER_1          0x1c6c
  #define RADEON_PP_TXFILTER_2          0x1c84
  
 -#define RADEON_RB2D_DSTCACHE_CTLSTAT  0x342c
 -#     define RADEON_RB2D_DC_FLUSH             (3 << 0)
 -#     define RADEON_RB2D_DC_FREE              (3 << 2)
 -#     define RADEON_RB2D_DC_FLUSH_ALL         0xf
 -#     define RADEON_RB2D_DC_BUSY              (1 << 31)
  #define RADEON_RB3D_CNTL              0x1c3c
  #     define RADEON_ALPHA_BLEND_ENABLE        (1 << 0)
  #     define RADEON_PLANE_MASK_ENABLE         (1 << 1)
  #define R300_ZB_ZCACHE_CTLSTAT                  0x4f18
  #     define R300_ZC_FLUSH                    (1 << 0)
  #     define R300_ZC_FREE                     (1 << 1)
 -#     define R300_ZC_FLUSH_ALL                0x3
  #     define R300_ZC_BUSY                     (1 << 31)
  #define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
 +#     define R300_RB3D_DC_FLUSH               (2 << 0)
 +#     define R300_RB3D_DC_FREE                (2 << 2)
  #     define R300_RB3D_DC_FINISH              (1 << 4)
  #define RADEON_RB3D_ZSTENCILCNTL      0x1c2c
  #     define RADEON_Z_TEST_MASK               (7 << 4)
  #define RADEON_READ8(reg)     DRM_READ8(  dev_priv->mmio, (reg) )
  #define RADEON_WRITE8(reg,val)        DRM_WRITE8( dev_priv->mmio, (reg), (val) )
  
- extern int RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr);
+ extern u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr);
  extern void RADEON_WRITE_PLL(struct drm_radeon_private *dev_priv, int addr, uint32_t data);
  
- #define RADEON_WRITE_PCIE( addr, val )                                        \
+ #define RADEON_WRITE_P(reg, val, mask)                \
+ do {                                          \
+       uint32_t tmp = RADEON_READ(reg);        \
+       tmp &= (mask);                          \
+       tmp |= ((val) & ~(mask));               \
+       RADEON_WRITE(reg, tmp);                 \
+ } while(0)
+ #define RADEON_WRITE_PLL_P(dev_priv, addr, val, mask)         \
+ do {                                                          \
+       uint32_t tmp_ = RADEON_READ_PLL(dev_priv, addr);        \
+       tmp_ &= (mask);                                         \
+       tmp_ |= ((val) & ~(mask));                              \
+       RADEON_WRITE_PLL(dev_priv, addr, tmp_);                 \
+ } while (0)
+ #define RADEON_WRITE_PCIE(addr, val)                                  \
  do {                                                                  \
-       RADEON_WRITE8( RADEON_PCIE_INDEX,                               \
+       RADEON_WRITE8(RADEON_PCIE_INDEX,                                \
                        ((addr) & 0xff));                               \
-       RADEON_WRITE( RADEON_PCIE_DATA, (val) );                        \
+       RADEON_WRITE(RADEON_PCIE_DATA, (val));                  \
  } while (0)
  
- #define R500_WRITE_MCIND( addr, val )                                 \
+ #define R500_WRITE_MCIND(addr, val)                                   \
  do {                                                          \
        RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));    \
        RADEON_WRITE(R520_MC_IND_DATA, (val));                  \
        RADEON_WRITE(R520_MC_IND_INDEX, 0);     \
  } while (0)
  
- #define RS480_WRITE_MCIND( addr, val )                                \
+ #define RS480_WRITE_MCIND(addr, val)                          \
  do {                                                                  \
-       RADEON_WRITE( RS480_NB_MC_INDEX,                                \
+       RADEON_WRITE(RS480_NB_MC_INDEX,                         \
                        ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN);       \
-       RADEON_WRITE( RS480_NB_MC_DATA, (val) );                        \
-       RADEON_WRITE( RS480_NB_MC_INDEX, 0xff );                        \
+       RADEON_WRITE(RS480_NB_MC_DATA, (val));                  \
+       RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);                  \
  } while (0)
  
- #define RS690_WRITE_MCIND( addr, val )                                        \
+ #define RS690_WRITE_MCIND(addr, val)                                  \
  do {                                                          \
        RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK));    \
        RADEON_WRITE(RS690_MC_DATA, val);                       \
        RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);    \
  } while (0)
  
- #define IGP_WRITE_MCIND( addr, val )                          \
+ #define IGP_WRITE_MCIND(addr, val)                            \
  do {                                                                  \
-         if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)       \
-               RS690_WRITE_MCIND( addr, val );                         \
-       else                                                            \
-               RS480_WRITE_MCIND( addr, val );                         \
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)       \
+               RS690_WRITE_MCIND(addr, val);                           \
+       else                                                            \
+               RS480_WRITE_MCIND(addr, val);                           \
  } while (0)
  
  #define CP_PACKET0( reg, n )                                          \
  
  #define RADEON_FLUSH_CACHE() do {                                     \
        if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
-               OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));  \
-               OUT_RING(RADEON_RB3D_DC_FLUSH);                         \
+               OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));  \
+               OUT_RING(RADEON_RB3D_DC_FLUSH);                         \
        } else {                                                        \
-               OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
-               OUT_RING(R300_RB3D_DC_FLUSH);                           \
-         }                                                               \
+               OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
+               OUT_RING(RADEON_RB3D_DC_FLUSH);                         \
+       }                                                               \
  } while (0)
  
  #define RADEON_PURGE_CACHE() do {                                     \
        if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
-               OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
-               OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE);   \
+               OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));  \
 -              OUT_RING(RADEON_RB3D_DC_FLUSH_ALL);                     \
++              OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE);   \
        } else {                                                        \
-               OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
-               OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE );      \
-         }                                                               \
+               OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
 -              OUT_RING(RADEON_RB3D_DC_FLUSH_ALL);                     \
++              OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE );      \
+       }                                                               \
  } while (0)
  
  #define RADEON_FLUSH_ZCACHE() do {                                    \
        if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
-               OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
-               OUT_RING( RADEON_RB3D_ZC_FLUSH );                       \
+               OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));    \
+               OUT_RING(RADEON_RB3D_ZC_FLUSH);                         \
        } else {                                                        \
-               OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) );    \
-               OUT_RING( R300_ZC_FLUSH );                              \
-         }                                                               \
+               OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));        \
+               OUT_RING(R300_ZC_FLUSH);                                \
+       }                                                               \
  } while (0)
  
  #define RADEON_PURGE_ZCACHE() do {                                    \
        if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
-               OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));    \
-               OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE);   \
+               OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));    \
 -              OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL);                     \
++              OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE);   \
        } else {                                                        \
-               OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));        \
-               OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE);                 \
-         }                                                               \
 -              OUT_RING(CP_PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));      \
 -              OUT_RING(R300_ZC_FLUSH_ALL);                            \
++              OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));        \
++              OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE);                 \
+       }                                                               \
  } while (0)
  
  /* ================================================================
@@@ -1380,7 -1432,7 +1432,7 @@@ do {                                                                    
  #define VB_AGE_TEST_WITH_RETURN( dev_priv )                           \
  do {                                                                  \
        struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;         \
-       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;               \
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;       \
        if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) {         \
                int __ret = radeon_do_cp_idle( dev_priv );              \
                if ( __ret ) return __ret;                              \
@@@ -1556,6 -1608,23 +1608,23 @@@ static inline int radeon_update_breadcr
  
  #define radeon_is_dce3(dev_priv) ((dev_priv->chip_family >= CHIP_RV620))
  
+ #define radeon_is_rv100(dev_priv) ((dev_priv->chip_family == CHIP_RV100) || \
+                                  (dev_priv->chip_family == CHIP_RV200) || \
+                                  (dev_priv->chip_family == CHIP_RS100) || \
+                                  (dev_priv->chip_family == CHIP_RS200) || \
+                                  (dev_priv->chip_family == CHIP_RV250) || \
+                                  (dev_priv->chip_family == CHIP_RV280) || \
+                                  (dev_priv->chip_family == CHIP_RS300))
+ #define radeon_is_r300(dev_priv) ((dev_priv->chip_family == CHIP_R300)  || \
+                                 (dev_priv->chip_family == CHIP_RV350) || \
+                                 (dev_priv->chip_family == CHIP_R350)  || \
+                                 (dev_priv->chip_family == CHIP_RV380) || \
+                                 (dev_priv->chip_family == CHIP_R420)  || \
+                                 (dev_priv->chip_family == CHIP_RV410) || \
+                                 (dev_priv->chip_family == CHIP_RS400) || \
+                                 (dev_priv->chip_family == CHIP_RS480))
  #define radeon_bios8(dev_priv, v) (dev_priv->bios[v])
  #define radeon_bios16(dev_priv, v) (dev_priv->bios[v] | (dev_priv->bios[(v) + 1] << 8))
  #define radeon_bios32(dev_priv, v) ((dev_priv->bios[v]) | \
                                    (dev_priv->bios[(v) + 2] << 16) | \
                                    (dev_priv->bios[(v) + 3] << 24))
  
+ extern void radeon_pll_errata_after_index(struct drm_radeon_private *dev_priv);
  extern int radeon_emit_irq(struct drm_device * dev);
  
  extern void radeon_gem_free_object(struct drm_gem_object *obj);
@@@ -1592,4 -1662,14 +1662,14 @@@ extern void radeon_set_pcigart(drm_rade
  extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
  extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
  extern void radeon_cp_dispatch_flip(struct drm_device * dev, struct drm_master *master);
+ extern int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
+ extern int radeon_cs_init(struct drm_device *dev);
+ void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master);
+ #define MARK_SAFE             1
+ #define MARK_CHECK_OFFSET     2
+ #define MARK_CHECK_SCISSOR    3
+ extern int r300_check_range(unsigned reg, int count);
+ extern int r300_get_reg_flags(unsigned reg);
  #endif                                /* __RADEON_DRV_H__ */