OSDN Git Service

Merge Linus master to drm-next
authorDave Airlie <airlied@redhat.com>
Thu, 20 Aug 2009 03:38:04 +0000 (13:38 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 20 Aug 2009 03:38:04 +0000 (13:38 +1000)
linux-next conflict reported needed resolution.

Conflicts:
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c

13 files changed:
1  2 
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_tt.c
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_module.h

@@@ -68,10 -68,10 +68,10 @@@ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm
   */
  static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
  {
 -      { DRM_MODE_SCALE_NON_GPU, "Non-GPU" },
 -      { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" },
 -      { DRM_MODE_SCALE_NO_SCALE, "No scale" },
 -      { DRM_MODE_SCALE_ASPECT, "Aspect" },
 +      { DRM_MODE_SCALE_NONE, "None" },
 +      { DRM_MODE_SCALE_FULLSCREEN, "Full" },
 +      { DRM_MODE_SCALE_CENTER, "Center" },
 +      { DRM_MODE_SCALE_ASPECT, "Full aspect" },
  };
  
  static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
@@@ -108,7 -108,6 +108,7 @@@ static struct drm_prop_enum_list drm_tv
        { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
        { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
        { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
 +      { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
  };
  
  DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
@@@ -119,7 -118,6 +119,7 @@@ static struct drm_prop_enum_list drm_tv
        { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
        { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
        { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
 +      { DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
  };
  
  DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
@@@ -148,7 -146,6 +148,7 @@@ static struct drm_conn_prop_enum_list d
        { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
        { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
        { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
 +      { DRM_MODE_CONNECTOR_TV, "TV", 0 },
  };
  
  static struct drm_prop_enum_list drm_encoder_enum_list[] =
@@@ -261,31 -258,6 +261,6 @@@ void *drm_mode_object_find(struct drm_d
  EXPORT_SYMBOL(drm_mode_object_find);
  
  /**
-  * drm_crtc_from_fb - find the CRTC structure associated with an fb
-  * @dev: DRM device
-  * @fb: framebuffer in question
-  *
-  * LOCKING:
-  * Caller must hold mode_config lock.
-  *
-  * Find CRTC in the mode_config structure that matches @fb.
-  *
-  * RETURNS:
-  * Pointer to the CRTC or NULL if it wasn't found.
-  */
- struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
-                                 struct drm_framebuffer *fb)
- {
-       struct drm_crtc *crtc;
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (crtc->fb == fb)
-                       return crtc;
-       }
-       return NULL;
- }
- /**
   * drm_framebuffer_init - initialize a framebuffer
   * @dev: DRM device
   *
@@@ -331,11 -303,20 +306,20 @@@ void drm_framebuffer_cleanup(struct drm
  {
        struct drm_device *dev = fb->dev;
        struct drm_crtc *crtc;
+       struct drm_mode_set set;
+       int ret;
  
        /* remove from any CRTC */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (crtc->fb == fb)
-                       crtc->fb = NULL;
+               if (crtc->fb == fb) {
+                       /* should turn off the crtc */
+                       memset(&set, 0, sizeof(struct drm_mode_set));
+                       set.crtc = crtc;
+                       set.fb = NULL;
+                       ret = crtc->funcs->set_config(&set);
+                       if (ret)
+                               DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+               }
        }
  
        drm_mode_object_put(dev, &fb->base);
@@@ -718,42 -699,6 +702,42 @@@ int drm_mode_create_tv_properties(struc
                drm_property_add_enum(dev->mode_config.tv_mode_property, i,
                                      i, modes[i]);
  
 +      dev->mode_config.tv_brightness_property =
 +              drm_property_create(dev, DRM_MODE_PROP_RANGE,
 +                                  "brightness", 2);
 +      dev->mode_config.tv_brightness_property->values[0] = 0;
 +      dev->mode_config.tv_brightness_property->values[1] = 100;
 +
 +      dev->mode_config.tv_contrast_property =
 +              drm_property_create(dev, DRM_MODE_PROP_RANGE,
 +                                  "contrast", 2);
 +      dev->mode_config.tv_contrast_property->values[0] = 0;
 +      dev->mode_config.tv_contrast_property->values[1] = 100;
 +
 +      dev->mode_config.tv_flicker_reduction_property =
 +              drm_property_create(dev, DRM_MODE_PROP_RANGE,
 +                                  "flicker reduction", 2);
 +      dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
 +      dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
 +
 +      dev->mode_config.tv_overscan_property =
 +              drm_property_create(dev, DRM_MODE_PROP_RANGE,
 +                                  "overscan", 2);
 +      dev->mode_config.tv_overscan_property->values[0] = 0;
 +      dev->mode_config.tv_overscan_property->values[1] = 100;
 +
 +      dev->mode_config.tv_saturation_property =
 +              drm_property_create(dev, DRM_MODE_PROP_RANGE,
 +                                  "saturation", 2);
 +      dev->mode_config.tv_saturation_property->values[0] = 0;
 +      dev->mode_config.tv_saturation_property->values[1] = 100;
 +
 +      dev->mode_config.tv_hue_property =
 +              drm_property_create(dev, DRM_MODE_PROP_RANGE,
 +                                  "hue", 2);
 +      dev->mode_config.tv_hue_property->values[0] = 0;
 +      dev->mode_config.tv_hue_property->values[1] = 100;
 +
        return 0;
  }
  EXPORT_SYMBOL(drm_mode_create_tv_properties);
@@@ -1099,7 -1044,7 +1083,7 @@@ int drm_mode_getresources(struct drm_de
                if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
                        list_for_each_entry(crtc, &dev->mode_config.crtc_list,
                                            head) {
 -                              DRM_DEBUG("CRTC ID is %d\n", crtc->base.id);
 +                              DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
                                if (put_user(crtc->base.id, crtc_id + copied)) {
                                        ret = -EFAULT;
                                        goto out;
                        list_for_each_entry(encoder,
                                            &dev->mode_config.encoder_list,
                                            head) {
 -                              DRM_DEBUG("ENCODER ID is %d\n",
 +                              DRM_DEBUG_KMS("ENCODER ID is %d\n",
                                          encoder->base.id);
                                if (put_user(encoder->base.id, encoder_id +
                                             copied)) {
                        list_for_each_entry(connector,
                                            &dev->mode_config.connector_list,
                                            head) {
 -                              DRM_DEBUG("CONNECTOR ID is %d\n",
 +                              DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
                                          connector->base.id);
                                if (put_user(connector->base.id,
                                             connector_id + copied)) {
        }
        card_res->count_connectors = connector_count;
  
 -      DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
 +      DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
                  card_res->count_connectors, card_res->count_encoders);
  
  out:
@@@ -1285,7 -1230,7 +1269,7 @@@ int drm_mode_getconnector(struct drm_de
  
        memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
  
 -      DRM_DEBUG("connector id %d:\n", out_resp->connector_id);
 +      DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
  
        mutex_lock(&dev->mode_config.mutex);
  
@@@ -1461,7 -1406,7 +1445,7 @@@ int drm_mode_setcrtc(struct drm_device 
        obj = drm_mode_object_find(dev, crtc_req->crtc_id,
                                   DRM_MODE_OBJECT_CRTC);
        if (!obj) {
 -              DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
 +              DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
                ret = -EINVAL;
                goto out;
        }
                        list_for_each_entry(crtcfb,
                                            &dev->mode_config.crtc_list, head) {
                                if (crtcfb == crtc) {
 -                                      DRM_DEBUG("Using current fb for setmode\n");
 +                                      DRM_DEBUG_KMS("Using current fb for "
 +                                                      "setmode\n");
                                        fb = crtc->fb;
                                }
                        }
                        obj = drm_mode_object_find(dev, crtc_req->fb_id,
                                                   DRM_MODE_OBJECT_FB);
                        if (!obj) {
 -                              DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
 +                              DRM_DEBUG_KMS("Unknown FB ID%d\n",
 +                                              crtc_req->fb_id);
                                ret = -EINVAL;
                                goto out;
                        }
        }
  
        if (crtc_req->count_connectors == 0 && mode) {
 -              DRM_DEBUG("Count connectors is 0 but mode set\n");
 +              DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
                ret = -EINVAL;
                goto out;
        }
  
-       if (crtc_req->count_connectors > 0 && !mode && !fb) {
+       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
 -              DRM_DEBUG("Count connectors is %d but no mode or fb set\n",
 +              DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
                          crtc_req->count_connectors);
                ret = -EINVAL;
                goto out;
                        obj = drm_mode_object_find(dev, out_id,
                                                   DRM_MODE_OBJECT_CONNECTOR);
                        if (!obj) {
 -                              DRM_DEBUG("Connector id %d unknown\n", out_id);
 +                              DRM_DEBUG_KMS("Connector id %d unknown\n",
 +                                              out_id);
                                ret = -EINVAL;
                                goto out;
                        }
        set.mode = mode;
        set.connectors = connector_set;
        set.num_connectors = crtc_req->count_connectors;
-       set.fb =fb;
+       set.fb = fb;
        ret = crtc->funcs->set_config(&set);
  
  out:
@@@ -1570,7 -1512,7 +1554,7 @@@ int drm_mode_cursor_ioctl(struct drm_de
        struct drm_crtc *crtc;
        int ret = 0;
  
 -      DRM_DEBUG("\n");
 +      DRM_DEBUG_KMS("\n");
  
        if (!req->flags) {
                DRM_ERROR("no operation set\n");
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
        if (!obj) {
 -              DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id);
 +              DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
                ret = -EINVAL;
                goto out;
        }
@@@ -94,7 -94,7 +94,7 @@@ int drm_helper_probe_single_connector_m
        int count = 0;
        int mode_flags = 0;
  
 -      DRM_DEBUG("%s\n", drm_get_connector_name(connector));
 +      DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector));
        /* set all modes to the unverified state */
        list_for_each_entry_safe(mode, t, &connector->modes, head)
                mode->status = MODE_UNVERIFIED;
        connector->status = connector->funcs->detect(connector);
  
        if (connector->status == connector_status_disconnected) {
 -              DRM_DEBUG("%s is disconnected\n",
 +              DRM_DEBUG_KMS("%s is disconnected\n",
                          drm_get_connector_name(connector));
                /* TODO set EDID to NULL */
                return 0;
  
        drm_mode_sort(&connector->modes);
  
 -      DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector));
 +      DRM_DEBUG_KMS("Probed modes for %s\n",
 +                              drm_get_connector_name(connector));
        list_for_each_entry_safe(mode, t, &connector->modes, head) {
                mode->vrefresh = drm_mode_vrefresh(mode);
  
@@@ -185,13 -184,12 +185,13 @@@ static void drm_helper_add_std_modes(st
                drm_mode_list_concat(&connector->probed_modes,
                                     &connector->modes);
  
 -              DRM_DEBUG("Adding mode %s to %s\n", stdmode->name,
 +              DRM_DEBUG_KMS("Adding mode %s to %s\n", stdmode->name,
                          drm_get_connector_name(connector));
        }
        drm_mode_sort(&connector->modes);
  
 -      DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector));
 +      DRM_DEBUG_KMS("Added std modes on %s\n",
 +                      drm_get_connector_name(connector));
        list_for_each_entry_safe(mode, t, &connector->modes, head) {
                mode->vrefresh = drm_mode_vrefresh(mode);
  
@@@ -314,7 -312,7 +314,7 @@@ static void drm_enable_connectors(struc
  
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                enabled[i] = drm_connector_enabled(connector, true);
 -              DRM_DEBUG("connector %d enabled? %s\n", connector->base.id,
 +              DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
                          enabled[i] ? "yes" : "no");
                any_enabled |= enabled[i];
                i++;
@@@ -344,7 -342,7 +344,7 @@@ static bool drm_target_preferred(struc
                        continue;
                }
  
 -              DRM_DEBUG("looking for preferred mode on connector %d\n",
 +              DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
                          connector->base.id);
  
                modes[i] = drm_has_preferred_mode(connector, width, height);
                        list_for_each_entry(modes[i], &connector->modes, head)
                                break;
                }
 -              DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name :
 +              DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
                          "none");
                i++;
        }
@@@ -454,7 -452,7 +454,7 @@@ static void drm_setup_crtcs(struct drm_
        int width, height;
        int i, ret;
  
 -      DRM_DEBUG("\n");
 +      DRM_DEBUG_KMS("\n");
  
        width = dev->mode_config.max_width;
        height = dev->mode_config.max_height;
        if (!ret)
                DRM_ERROR("Unable to find initial modes\n");
  
 -      DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height);
 +      DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
  
        drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
  
                }
  
                if (mode && crtc) {
 -                      DRM_DEBUG("desired mode %s set on crtc %d\n",
 +                      DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
                                  mode->name, crtc->base.id);
                        crtc->desired_mode = mode;
                        connector->encoder->crtc = crtc;
@@@ -708,14 -706,14 +708,14 @@@ int drm_crtc_helper_set_config(struct d
        struct drm_encoder **save_encoders, *new_encoder;
        struct drm_framebuffer *old_fb = NULL;
        bool save_enabled;
-       bool mode_changed = false;
-       bool fb_changed = false;
+       bool mode_changed = false; /* if true do a full mode set */
+       bool fb_changed = false; /* if true and !mode_changed just do a flip */
        struct drm_connector *connector;
        int count = 0, ro, fail = 0;
        struct drm_crtc_helper_funcs *crtc_funcs;
        int ret = 0;
  
 -      DRM_DEBUG("\n");
 +      DRM_DEBUG_KMS("\n");
  
        if (!set)
                return -EINVAL;
  
        crtc_funcs = set->crtc->helper_private;
  
 -      DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n",
 +      DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:"
 +                      " %d (x, y) (%i, %i)\n",
                  set->crtc, set->crtc->base.id, set->fb, set->connectors,
                  (int)set->num_connectors, set->x, set->y);
  
        if (set->crtc->fb != set->fb) {
                /* If we have no fb then treat it as a full mode set */
                if (set->crtc->fb == NULL) {
 -                      DRM_DEBUG("crtc has no fb, full mode set\n");
 +                      DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
                        mode_changed = true;
+               } else if (set->fb == NULL) {
+                       mode_changed = true;
                } else if ((set->fb->bits_per_pixel !=
                         set->crtc->fb->bits_per_pixel) ||
                         set->fb->depth != set->crtc->fb->depth)
                fb_changed = true;
  
        if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
 -              DRM_DEBUG("modes are different, full mode set\n");
 +              DRM_DEBUG_KMS("modes are different, full mode set\n");
                drm_mode_debug_printmodeline(&set->crtc->mode);
                drm_mode_debug_printmodeline(set->mode);
                mode_changed = true;
                }
  
                if (new_encoder != connector->encoder) {
 -                      DRM_DEBUG("encoder changed, full mode switch\n");
 +                      DRM_DEBUG_KMS("encoder changed, full mode switch\n");
                        mode_changed = true;
                        connector->encoder = new_encoder;
                }
                        goto fail_set_mode;
                }
                if (new_crtc != connector->encoder->crtc) {
 -                      DRM_DEBUG("crtc changed, full mode switch\n");
 +                      DRM_DEBUG_KMS("crtc changed, full mode switch\n");
                        mode_changed = true;
                        connector->encoder->crtc = new_crtc;
                }
 -              DRM_DEBUG("setting connector %d crtc to %p\n",
 +              DRM_DEBUG_KMS("setting connector %d crtc to %p\n",
                          connector->base.id, new_crtc);
        }
  
                set->crtc->fb = set->fb;
                set->crtc->enabled = (set->mode != NULL);
                if (set->mode != NULL) {
 -                      DRM_DEBUG("attempting to set mode from userspace\n");
 +                      DRM_DEBUG_KMS("attempting to set mode from"
 +                                      " userspace\n");
                        drm_mode_debug_printmodeline(set->mode);
                        if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
                                                      set->x, set->y,
@@@ -905,7 -903,7 +907,7 @@@ EXPORT_SYMBOL(drm_crtc_helper_set_confi
  
  bool drm_helper_plugged_event(struct drm_device *dev)
  {
 -      DRM_DEBUG("\n");
 +      DRM_DEBUG_KMS("\n");
  
        drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
                                         dev->mode_config.max_height);
  /* use +hsync +vsync for detailed mode */
  #define EDID_QUIRK_DETAILED_SYNC_PP           (1 << 6)
  
 +#define LEVEL_DMT     0
 +#define LEVEL_GTF     1
 +#define LEVEL_CVT     2
 +
  static struct edid_quirk {
        char *vendor;
        int product_id;
@@@ -244,31 -240,25 +244,31 @@@ static void edid_fixup_preferred(struc
  /**
   * drm_mode_std - convert standard mode info (width, height, refresh) into mode
   * @t: standard timing params
 + * @timing_level: standard timing level
   *
   * Take the standard timing params (in this case width, aspect, and refresh)
 - * and convert them into a real mode using CVT.
 + * and convert them into a real mode using CVT/GTF/DMT.
   *
   * Punts for now, but should eventually use the FB layer's CVT based mode
   * generation code.
   */
  struct drm_display_mode *drm_mode_std(struct drm_device *dev,
 -                                    struct std_timing *t)
 +                                    struct std_timing *t,
 +                                    int timing_level)
  {
        struct drm_display_mode *mode;
 -      int hsize = t->hsize * 8 + 248, vsize;
 +      int hsize, vsize;
 +      int vrefresh_rate;
        unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
                >> EDID_TIMING_ASPECT_SHIFT;
 -
 -      mode = drm_mode_create(dev);
 -      if (!mode)
 -              return NULL;
 -
 +      unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
 +              >> EDID_TIMING_VFREQ_SHIFT;
 +
 +      /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
 +      hsize = t->hsize * 8 + 248;
 +      /* vrefresh_rate = vfreq + 60 */
 +      vrefresh_rate = vfreq + 60;
 +      /* the vdisplay is calculated based on the aspect ratio */
        if (aspect_ratio == 0)
                vsize = (hsize * 10) / 16;
        else if (aspect_ratio == 1)
        else
                vsize = (hsize * 9) / 16;
  
 -      drm_mode_set_name(mode);
 -
 +      mode = NULL;
 +      switch (timing_level) {
 +      case LEVEL_DMT:
 +              mode = drm_mode_create(dev);
 +              if (mode) {
 +                      mode->hdisplay = hsize;
 +                      mode->vdisplay = vsize;
 +                      drm_mode_set_name(mode);
 +              }
 +              break;
 +      case LEVEL_GTF:
 +              mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
 +              break;
 +      case LEVEL_CVT:
 +              mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
 +              break;
 +      }
        return mode;
  }
  
@@@ -476,19 -451,6 +476,19 @@@ static int add_established_modes(struc
  
        return modes;
  }
 +/**
 + * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
 + * @edid: EDID block to scan
 + */
 +static int standard_timing_level(struct edid *edid)
 +{
 +      if (edid->revision >= 2) {
 +              if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
 +                      return LEVEL_CVT;
 +              return LEVEL_GTF;
 +      }
 +      return LEVEL_DMT;
 +}
  
  /**
   * add_standard_modes - get std. modes from EDID and add them
@@@ -501,9 -463,6 +501,9 @@@ static int add_standard_modes(struct dr
  {
        struct drm_device *dev = connector->dev;
        int i, modes = 0;
 +      int timing_level;
 +
 +      timing_level = standard_timing_level(edid);
  
        for (i = 0; i < EDID_STD_TIMINGS; i++) {
                struct std_timing *t = &edid->standard_timings[i];
                if (t->hsize == 1 && t->vfreq_aspect == 1)
                        continue;
  
 -              newmode = drm_mode_std(dev, &edid->standard_timings[i]);
 +              newmode = drm_mode_std(dev, &edid->standard_timings[i],
 +                                      timing_level);
                if (newmode) {
                        drm_mode_probed_add(connector, newmode);
                        modes++;
@@@ -538,21 -496,46 +538,50 @@@ static int add_detailed_info(struct drm
  {
        struct drm_device *dev = connector->dev;
        int i, j, modes = 0;
 +      int timing_level;
 +
 +      timing_level = standard_timing_level(edid);
  
        for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
                struct detailed_timing *timing = &edid->detailed_timings[i];
                struct detailed_non_pixel *data = &timing->data.other_data;
                struct drm_display_mode *newmode;
  
-               /* EDID up to and including 1.2 may put monitor info here */
-               if (edid->version == 1 && edid->revision < 3)
-                       continue;
-               /* Detailed mode timing */
-               if (timing->pixel_clock) {
+               /* X server check is version 1.1 or higher */
+               if (edid->version == 1 && edid->revision >= 1 &&
+                   !timing->pixel_clock) {
+                       /* Other timing or info */
+                       switch (data->type) {
+                       case EDID_DETAIL_MONITOR_SERIAL:
+                               break;
+                       case EDID_DETAIL_MONITOR_STRING:
+                               break;
+                       case EDID_DETAIL_MONITOR_RANGE:
+                               /* Get monitor range data */
+                               break;
+                       case EDID_DETAIL_MONITOR_NAME:
+                               break;
+                       case EDID_DETAIL_MONITOR_CPDATA:
+                               break;
+                       case EDID_DETAIL_STD_MODES:
+                               /* Five modes per detailed section */
+                               for (j = 0; j < 5; i++) {
+                                       struct std_timing *std;
+                                       struct drm_display_mode *newmode;
+                                       std = &data->data.timings[j];
 -                                      newmode = drm_mode_std(dev, std);
++                                      newmode = drm_mode_std(dev, std,
++                                                             timing_level);
+                                       if (newmode) {
+                                               drm_mode_probed_add(connector, newmode);
+                                               modes++;
+                                       }
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+               } else {
                        newmode = drm_mode_detailed(dev, edid, timing, quirks);
                        if (!newmode)
                                continue;
                        drm_mode_probed_add(connector, newmode);
  
                        modes++;
-                       continue;
-               }
-               /* Other timing or info */
-               switch (data->type) {
-               case EDID_DETAIL_MONITOR_SERIAL:
-                       break;
-               case EDID_DETAIL_MONITOR_STRING:
-                       break;
-               case EDID_DETAIL_MONITOR_RANGE:
-                       /* Get monitor range data */
-                       break;
-               case EDID_DETAIL_MONITOR_NAME:
-                       break;
-               case EDID_DETAIL_MONITOR_CPDATA:
-                       break;
-               case EDID_DETAIL_STD_MODES:
-                       /* Five modes per detailed section */
-                       for (j = 0; j < 5; i++) {
-                               struct std_timing *std;
-                               struct drm_display_mode *newmode;
-                               std = &data->data.timings[j];
-                               newmode = drm_mode_std(dev, std,
-                                                       timing_level);
-                               if (newmode) {
-                                       drm_mode_probed_add(connector, newmode);
-                                       modes++;
-                               }
-                       }
-                       break;
-               default:
-                       break;
                }
        }
  
@@@ -8,8 -8,6 +8,8 @@@
   * Copyright Â© 2007 Dave Airlie
   * Copyright Â© 2007-2008 Intel Corporation
   *   Jesse Barnes <jesse.barnes@intel.com>
 + * Copyright 2005-2006 Luc Verhaegen
 + * Copyright (c) 2001, Andy Ritger  aritger@nvidia.com
   *
   * Permission is hereby granted, free of charge, to any person obtaining a
   * copy of this software and associated documentation files (the "Software"),
@@@ -40,6 -38,7 +40,6 @@@
  #include "drm.h"
  #include "drm_crtc.h"
  
 -#define DRM_MODESET_DEBUG     "drm_mode"
  /**
   * drm_mode_debug_printmodeline - debug print a mode
   * @dev: DRM device
@@@ -52,8 -51,8 +52,8 @@@
   */
  void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
  {
 -      DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
 -              "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
 +      DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
 +                      "0x%x 0x%x\n",
                mode->base.id, mode->name, mode->vrefresh, mode->clock,
                mode->hdisplay, mode->hsync_start,
                mode->hsync_end, mode->htotal,
  EXPORT_SYMBOL(drm_mode_debug_printmodeline);
  
  /**
 + * drm_cvt_mode -create a modeline based on CVT algorithm
 + * @dev: DRM device
 + * @hdisplay: hdisplay size
 + * @vdisplay: vdisplay size
 + * @vrefresh  : vrefresh rate
 + * @reduced : Whether the GTF calculation is simplified
 + * @interlaced:Whether the interlace is supported
 + *
 + * LOCKING:
 + * none.
 + *
 + * return the modeline based on CVT algorithm
 + *
 + * This function is called to generate the modeline based on CVT algorithm
 + * according to the hdisplay, vdisplay, vrefresh.
 + * It is based from the VESA(TM) Coordinated Video Timing Generator by
 + * Graham Loveridge April 9, 2003 available at
 + * http://www.vesa.org/public/CVT/CVTd6r1.xls
 + *
 + * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
 + * What I have done is to translate it by using integer calculation.
 + */
 +#define HV_FACTOR                     1000
 +struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
 +                                    int vdisplay, int vrefresh,
 +                                    bool reduced, bool interlaced)
 +{
 +      /* 1) top/bottom margin size (% of height) - default: 1.8, */
 +#define       CVT_MARGIN_PERCENTAGE           18
 +      /* 2) character cell horizontal granularity (pixels) - default 8 */
 +#define       CVT_H_GRANULARITY               8
 +      /* 3) Minimum vertical porch (lines) - default 3 */
 +#define       CVT_MIN_V_PORCH                 3
 +      /* 4) Minimum number of vertical back porch lines - default 6 */
 +#define       CVT_MIN_V_BPORCH                6
 +      /* Pixel Clock step (kHz) */
 +#define CVT_CLOCK_STEP                        250
 +      struct drm_display_mode *drm_mode;
 +      bool margins = false;
 +      unsigned int vfieldrate, hperiod;
 +      int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
 +      int interlace;
 +
 +      /* allocate the drm_display_mode structure. If failure, we will
 +       * return directly
 +       */
 +      drm_mode = drm_mode_create(dev);
 +      if (!drm_mode)
 +              return NULL;
 +
 +      /* the CVT default refresh rate is 60Hz */
 +      if (!vrefresh)
 +              vrefresh = 60;
 +
 +      /* the required field fresh rate */
 +      if (interlaced)
 +              vfieldrate = vrefresh * 2;
 +      else
 +              vfieldrate = vrefresh;
 +
 +      /* horizontal pixels */
 +      hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
 +
 +      /* determine the left&right borders */
 +      hmargin = 0;
 +      if (margins) {
 +              hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
 +              hmargin -= hmargin % CVT_H_GRANULARITY;
 +      }
 +      /* find the total active pixels */
 +      drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
 +
 +      /* find the number of lines per field */
 +      if (interlaced)
 +              vdisplay_rnd = vdisplay / 2;
 +      else
 +              vdisplay_rnd = vdisplay;
 +
 +      /* find the top & bottom borders */
 +      vmargin = 0;
 +      if (margins)
 +              vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
 +
 +      drm_mode->vdisplay = vdisplay + 2 * vmargin;
 +
 +      /* Interlaced */
 +      if (interlaced)
 +              interlace = 1;
 +      else
 +              interlace = 0;
 +
 +      /* Determine VSync Width from aspect ratio */
 +      if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
 +              vsync = 4;
 +      else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
 +              vsync = 5;
 +      else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
 +              vsync = 6;
 +      else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
 +              vsync = 7;
 +      else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
 +              vsync = 7;
 +      else /* custom */
 +              vsync = 10;
 +
 +      if (!reduced) {
 +              /* simplify the GTF calculation */
 +              /* 4) Minimum time of vertical sync + back porch interval (µs)
 +               * default 550.0
 +               */
 +              int tmp1, tmp2;
 +#define CVT_MIN_VSYNC_BP      550
 +              /* 3) Nominal HSync width (% of line period) - default 8 */
 +#define CVT_HSYNC_PERCENTAGE  8
 +              unsigned int hblank_percentage;
 +              int vsyncandback_porch, vback_porch, hblank;
 +
 +              /* estimated the horizontal period */
 +              tmp1 = HV_FACTOR * 1000000  -
 +                              CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
 +              tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
 +                              interlace;
 +              hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
 +
 +              tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
 +              /* 9. Find number of lines in sync + backporch */
 +              if (tmp1 < (vsync + CVT_MIN_V_PORCH))
 +                      vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
 +              else
 +                      vsyncandback_porch = tmp1;
 +              /* 10. Find number of lines in back porch */
 +              vback_porch = vsyncandback_porch - vsync;
 +              drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
 +                              vsyncandback_porch + CVT_MIN_V_PORCH;
 +              /* 5) Definition of Horizontal blanking time limitation */
 +              /* Gradient (%/kHz) - default 600 */
 +#define CVT_M_FACTOR  600
 +              /* Offset (%) - default 40 */
 +#define CVT_C_FACTOR  40
 +              /* Blanking time scaling factor - default 128 */
 +#define CVT_K_FACTOR  128
 +              /* Scaling factor weighting - default 20 */
 +#define CVT_J_FACTOR  20
 +#define CVT_M_PRIME   (CVT_M_FACTOR * CVT_K_FACTOR / 256)
 +#define CVT_C_PRIME   ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
 +                       CVT_J_FACTOR)
 +              /* 12. Find ideal blanking duty cycle from formula */
 +              hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
 +                                      hperiod / 1000;
 +              /* 13. Blanking time */
 +              if (hblank_percentage < 20 * HV_FACTOR)
 +                      hblank_percentage = 20 * HV_FACTOR;
 +              hblank = drm_mode->hdisplay * hblank_percentage /
 +                       (100 * HV_FACTOR - hblank_percentage);
 +              hblank -= hblank % (2 * CVT_H_GRANULARITY);
 +              /* 14. find the total pixes per line */
 +              drm_mode->htotal = drm_mode->hdisplay + hblank;
 +              drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
 +              drm_mode->hsync_start = drm_mode->hsync_end -
 +                      (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
 +              drm_mode->hsync_start += CVT_H_GRANULARITY -
 +                      drm_mode->hsync_start % CVT_H_GRANULARITY;
 +              /* fill the Vsync values */
 +              drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
 +              drm_mode->vsync_end = drm_mode->vsync_start + vsync;
 +      } else {
 +              /* Reduced blanking */
 +              /* Minimum vertical blanking interval time (µs)- default 460 */
 +#define CVT_RB_MIN_VBLANK     460
 +              /* Fixed number of clocks for horizontal sync */
 +#define CVT_RB_H_SYNC         32
 +              /* Fixed number of clocks for horizontal blanking */
 +#define CVT_RB_H_BLANK                160
 +              /* Fixed number of lines for vertical front porch - default 3*/
 +#define CVT_RB_VFPORCH                3
 +              int vbilines;
 +              int tmp1, tmp2;
 +              /* 8. Estimate Horizontal period. */
 +              tmp1 = HV_FACTOR * 1000000 -
 +                      CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
 +              tmp2 = vdisplay_rnd + 2 * vmargin;
 +              hperiod = tmp1 / (tmp2 * vfieldrate);
 +              /* 9. Find number of lines in vertical blanking */
 +              vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
 +              /* 10. Check if vertical blanking is sufficient */
 +              if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
 +                      vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
 +              /* 11. Find total number of lines in vertical field */
 +              drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
 +              /* 12. Find total number of pixels in a line */
 +              drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
 +              /* Fill in HSync values */
 +              drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
 +              drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
 +      }
 +      /* 15/13. Find pixel clock frequency (kHz for xf86) */
 +      drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
 +      drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
 +      /* 18/16. Find actual vertical frame frequency */
 +      /* ignore - just set the mode flag for interlaced */
 +      if (interlaced)
 +              drm_mode->vtotal *= 2;
 +      /* Fill the mode line name */
 +      drm_mode_set_name(drm_mode);
 +      if (reduced)
 +              drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
 +                                      DRM_MODE_FLAG_NVSYNC);
 +      else
 +              drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
 +                                      DRM_MODE_FLAG_NHSYNC);
 +      if (interlaced)
 +              drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
 +
 +    return drm_mode;
 +}
 +EXPORT_SYMBOL(drm_cvt_mode);
 +
 +/**
 + * drm_gtf_mode - create the modeline based on GTF algorithm
 + *
 + * @dev               :drm device
 + * @hdisplay  :hdisplay size
 + * @vdisplay  :vdisplay size
 + * @vrefresh  :vrefresh rate.
 + * @interlaced        :whether the interlace is supported
 + * @margins   :whether the margin is supported
 + *
 + * LOCKING.
 + * none.
 + *
 + * return the modeline based on GTF algorithm
 + *
 + * This function is to create the modeline based on the GTF algorithm.
 + * Generalized Timing Formula is derived from:
 + *    GTF Spreadsheet by Andy Morrish (1/5/97)
 + *    available at http://www.vesa.org
 + *
 + * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
 + * What I have done is to translate it by using integer calculation.
 + * I also refer to the function of fb_get_mode in the file of
 + * drivers/video/fbmon.c
 + */
 +struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
 +                                    int vdisplay, int vrefresh,
 +                                    bool interlaced, int margins)
 +{
 +      /* 1) top/bottom margin size (% of height) - default: 1.8, */
 +#define       GTF_MARGIN_PERCENTAGE           18
 +      /* 2) character cell horizontal granularity (pixels) - default 8 */
 +#define       GTF_CELL_GRAN                   8
 +      /* 3) Minimum vertical porch (lines) - default 3 */
 +#define       GTF_MIN_V_PORCH                 1
 +      /* width of vsync in lines */
 +#define V_SYNC_RQD                    3
 +      /* width of hsync as % of total line */
 +#define H_SYNC_PERCENT                        8
 +      /* min time of vsync + back porch (microsec) */
 +#define MIN_VSYNC_PLUS_BP             550
 +      /* blanking formula gradient */
 +#define GTF_M                         600
 +      /* blanking formula offset */
 +#define GTF_C                         40
 +      /* blanking formula scaling factor */
 +#define GTF_K                         128
 +      /* blanking formula scaling factor */
 +#define GTF_J                         20
 +      /* C' and M' are part of the Blanking Duty Cycle computation */
 +#define GTF_C_PRIME           (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
 +#define GTF_M_PRIME           (GTF_K * GTF_M / 256)
 +      struct drm_display_mode *drm_mode;
 +      unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
 +      int top_margin, bottom_margin;
 +      int interlace;
 +      unsigned int hfreq_est;
 +      int vsync_plus_bp, vback_porch;
 +      unsigned int vtotal_lines, vfieldrate_est, hperiod;
 +      unsigned int vfield_rate, vframe_rate;
 +      int left_margin, right_margin;
 +      unsigned int total_active_pixels, ideal_duty_cycle;
 +      unsigned int hblank, total_pixels, pixel_freq;
 +      int hsync, hfront_porch, vodd_front_porch_lines;
 +      unsigned int tmp1, tmp2;
 +
 +      drm_mode = drm_mode_create(dev);
 +      if (!drm_mode)
 +              return NULL;
 +
 +      /* 1. In order to give correct results, the number of horizontal
 +       * pixels requested is first processed to ensure that it is divisible
 +       * by the character size, by rounding it to the nearest character
 +       * cell boundary:
 +       */
 +      hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
 +      hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
 +
 +      /* 2. If interlace is requested, the number of vertical lines assumed
 +       * by the calculation must be halved, as the computation calculates
 +       * the number of vertical lines per field.
 +       */
 +      if (interlaced)
 +              vdisplay_rnd = vdisplay / 2;
 +      else
 +              vdisplay_rnd = vdisplay;
 +
 +      /* 3. Find the frame rate required: */
 +      if (interlaced)
 +              vfieldrate_rqd = vrefresh * 2;
 +      else
 +              vfieldrate_rqd = vrefresh;
 +
 +      /* 4. Find number of lines in Top margin: */
 +      top_margin = 0;
 +      if (margins)
 +              top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
 +                              1000;
 +      /* 5. Find number of lines in bottom margin: */
 +      bottom_margin = top_margin;
 +
 +      /* 6. If interlace is required, then set variable interlace: */
 +      if (interlaced)
 +              interlace = 1;
 +      else
 +              interlace = 0;
 +
 +      /* 7. Estimate the Horizontal frequency */
 +      {
 +              tmp1 = (1000000  - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
 +              tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
 +                              2 + interlace;
 +              hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
 +      }
 +
 +      /* 8. Find the number of lines in V sync + back porch */
 +      /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
 +      vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
 +      vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
 +      /*  9. Find the number of lines in V back porch alone: */
 +      vback_porch = vsync_plus_bp - V_SYNC_RQD;
 +      /*  10. Find the total number of lines in Vertical field period: */
 +      vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
 +                      vsync_plus_bp + GTF_MIN_V_PORCH;
 +      /*  11. Estimate the Vertical field frequency: */
 +      vfieldrate_est = hfreq_est / vtotal_lines;
 +      /*  12. Find the actual horizontal period: */
 +      hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
 +
 +      /*  13. Find the actual Vertical field frequency: */
 +      vfield_rate = hfreq_est / vtotal_lines;
 +      /*  14. Find the Vertical frame frequency: */
 +      if (interlaced)
 +              vframe_rate = vfield_rate / 2;
 +      else
 +              vframe_rate = vfield_rate;
 +      /*  15. Find number of pixels in left margin: */
 +      if (margins)
 +              left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
 +                              1000;
 +      else
 +              left_margin = 0;
 +
 +      /* 16.Find number of pixels in right margin: */
 +      right_margin = left_margin;
 +      /* 17.Find total number of active pixels in image and left and right */
 +      total_active_pixels = hdisplay_rnd + left_margin + right_margin;
 +      /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
 +      ideal_duty_cycle = GTF_C_PRIME * 1000 -
 +                              (GTF_M_PRIME * 1000000 / hfreq_est);
 +      /* 19.Find the number of pixels in the blanking time to the nearest
 +       * double character cell: */
 +      hblank = total_active_pixels * ideal_duty_cycle /
 +                      (100000 - ideal_duty_cycle);
 +      hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
 +      hblank = hblank * 2 * GTF_CELL_GRAN;
 +      /* 20.Find total number of pixels: */
 +      total_pixels = total_active_pixels + hblank;
 +      /* 21.Find pixel clock frequency: */
 +      pixel_freq = total_pixels * hfreq_est / 1000;
 +      /* Stage 1 computations are now complete; I should really pass
 +       * the results to another function and do the Stage 2 computations,
 +       * but I only need a few more values so I'll just append the
 +       * computations here for now */
 +      /* 17. Find the number of pixels in the horizontal sync period: */
 +      hsync = H_SYNC_PERCENT * total_pixels / 100;
 +      hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
 +      hsync = hsync * GTF_CELL_GRAN;
 +      /* 18. Find the number of pixels in horizontal front porch period */
 +      hfront_porch = hblank / 2 - hsync;
 +      /*  36. Find the number of lines in the odd front porch period: */
 +      vodd_front_porch_lines = GTF_MIN_V_PORCH ;
 +
 +      /* finally, pack the results in the mode struct */
 +      drm_mode->hdisplay = hdisplay_rnd;
 +      drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
 +      drm_mode->hsync_end = drm_mode->hsync_start + hsync;
 +      drm_mode->htotal = total_pixels;
 +      drm_mode->vdisplay = vdisplay_rnd;
 +      drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
 +      drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
 +      drm_mode->vtotal = vtotal_lines;
 +
 +      drm_mode->clock = pixel_freq;
 +
 +      drm_mode_set_name(drm_mode);
 +      drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
 +
 +      if (interlaced) {
 +              drm_mode->vtotal *= 2;
 +              drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
 +      }
 +
 +      return drm_mode;
 +}
 +EXPORT_SYMBOL(drm_gtf_mode);
 +/**
   * drm_mode_set_name - set the name on a mode
   * @mode: name will be set in this mode
   *
@@@ -818,7 -403,8 +818,7 @@@ void drm_mode_prune_invalid(struct drm_
                        list_del(&mode->head);
                        if (verbose) {
                                drm_mode_debug_printmodeline(mode);
 -                              DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
 -                                      "Not using %s mode %d\n",
 +                              DRM_DEBUG_KMS("Not using %s mode %d\n",
                                        mode->name, mode->status);
                        }
                        drm_mode_destroy(dev, mode);
@@@ -980,6 -566,8 +980,8 @@@ void drm_mode_connector_list_update(str
                                found_it = 1;
                                /* if equal delete the probed mode */
                                mode->status = pmode->status;
+                               /* Merge type bits together */
+                               mode->type |= pmode->type;
                                list_del(&pmode->head);
                                drm_mode_destroy(connector->dev, pmode);
                                break;
@@@ -33,6 -33,8 +33,6 @@@
  #include "i915_drm.h"
  #include "i915_drv.h"
  
 -#define I915_DRV      "i915_drv"
 -
  /* Really want an OS-independent resettable timer.  Would like to have
   * this loop run for (eg) 3 sec, but have the timer reset every time
   * the head pointer changes, so that EBUSY only happens if the ring
@@@ -99,7 -101,7 +99,7 @@@ static int i915_init_phys_hws(struct dr
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
  
        I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 -      DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
 +      DRM_DEBUG_DRIVER("Enabled hardware status page\n");
        return 0;
  }
  
@@@ -185,7 -187,8 +185,7 @@@ static int i915_initialize(struct drm_d
                master_priv->sarea_priv = (drm_i915_sarea_t *)
                        ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
        } else {
 -              DRM_DEBUG_DRIVER(I915_DRV,
 -                              "sarea not found assuming DRI2 userspace\n");
 +              DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
        }
  
        if (init->ring_size != 0) {
@@@ -235,7 -238,7 +235,7 @@@ static int i915_dma_resume(struct drm_d
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  
 -      DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
 +      DRM_DEBUG_DRIVER("%s\n", __func__);
  
        if (dev_priv->ring.map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
 -      DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
 +      DRM_DEBUG_DRIVER("hw status page @ %p\n",
                                dev_priv->hw_status_page);
  
        if (dev_priv->status_gfx_addr != 0)
                I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 -      DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
 +      DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  
        return 0;
  }
@@@ -549,7 -552,7 +549,7 @@@ static int i915_dispatch_flip(struct dr
        if (!master_priv->sarea_priv)
                return -EINVAL;
  
 -      DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
 +      DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
                          __func__,
                         dev_priv->current_page,
                         master_priv->sarea_priv->pf_current_page);
@@@ -630,7 -633,8 +630,7 @@@ static int i915_batchbuffer(struct drm_
                return -EINVAL;
        }
  
 -      DRM_DEBUG_DRIVER(I915_DRV,
 -                      "i915 batchbuffer, start %x used %d cliprects %d\n",
 +      DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
                        batch->start, batch->used, batch->num_cliprects);
  
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@@ -677,7 -681,8 +677,7 @@@ static int i915_cmdbuffer(struct drm_de
        void *batch_data;
        int ret;
  
 -      DRM_DEBUG_DRIVER(I915_DRV,
 -                      "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
 +      DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
                        cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
  
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@@ -730,7 -735,7 +730,7 @@@ static int i915_flip_bufs(struct drm_de
  {
        int ret;
  
 -      DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
 +      DRM_DEBUG_DRIVER("%s\n", __func__);
  
        RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  
@@@ -773,7 -778,7 +773,7 @@@ static int i915_getparam(struct drm_dev
                value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
                break;
        default:
 -              DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
 +              DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                        param->param);
                return -EINVAL;
        }
@@@ -814,7 -819,7 +814,7 @@@ static int i915_setparam(struct drm_dev
                dev_priv->fence_reg_start = param->value;
                break;
        default:
 -              DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
 +              DRM_DEBUG_DRIVER("unknown parameter %d\n",
                                        param->param);
                return -EINVAL;
        }
@@@ -841,7 -846,7 +841,7 @@@ static int i915_set_status_page(struct 
                return 0;
        }
  
 -      DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
 +      DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
  
        dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
  
  
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
        I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 -      DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
 +      DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
                                dev_priv->status_gfx_addr);
 -      DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
 +      DRM_DEBUG_DRIVER("load hws at %p\n",
                                dev_priv->hw_status_page);
        return 0;
  }
@@@ -1181,6 -1186,13 +1181,13 @@@ int i915_driver_load(struct drm_device 
        if (ret)
                goto out_iomapfree;
  
+       dev_priv->wq = create_workqueue("i915");
+       if (dev_priv->wq == NULL) {
+               DRM_ERROR("Failed to create our workqueue.\n");
+               ret = -ENOMEM;
+               goto out_iomapfree;
+       }
        /* enable GEM by default */
        dev_priv->has_gem = 1;
  
        if (!I915_NEED_GFX_HWS(dev)) {
                ret = i915_init_phys_hws(dev);
                if (ret != 0)
-                       goto out_iomapfree;
+                       goto out_workqueue_free;
        }
  
        i915_get_mem_freq(dev);
                ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
                if (ret < 0) {
                        DRM_ERROR("failed to init modeset\n");
-                       goto out_rmmap;
+                       goto out_workqueue_free;
                }
        }
  
  
        return 0;
  
+ out_workqueue_free:
+       destroy_workqueue(dev_priv->wq);
  out_iomapfree:
        io_mapping_free(dev_priv->mm.gtt_mapping);
  out_rmmap:
@@@ -1264,6 -1278,8 +1273,8 @@@ int i915_driver_unload(struct drm_devic
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
+       destroy_workqueue(dev_priv->wq);
        io_mapping_free(dev_priv->mm.gtt_mapping);
        if (dev_priv->mm.gtt_mtrr >= 0) {
                mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
@@@ -1305,7 -1321,7 +1316,7 @@@ int i915_driver_open(struct drm_device 
  {
        struct drm_i915_file_private *i915_file_priv;
  
 -      DRM_DEBUG_DRIVER(I915_DRV, "\n");
 +      DRM_DEBUG_DRIVER("\n");
        i915_file_priv = (struct drm_i915_file_private *)
            kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
  
  #include "i915_drv.h"
  #include <linux/acpi.h>
  
 -#define I915_LVDS "i915_lvds"
 -
 -/*
 - * the following four scaling options are defined.
 - * #define DRM_MODE_SCALE_NON_GPU     0
 - * #define DRM_MODE_SCALE_FULLSCREEN  1
 - * #define DRM_MODE_SCALE_NO_SCALE    2
 - * #define DRM_MODE_SCALE_ASPECT      3
 - */
 -
  /* Private structure for the integrated LVDS support */
  struct intel_lvds_priv {
        int fitting_mode;
@@@ -326,7 -336,7 +326,7 @@@ static bool intel_lvds_mode_fixup(struc
        I915_WRITE(BCLRPAT_B, 0);
  
        switch (lvds_priv->fitting_mode) {
 -      case DRM_MODE_SCALE_NO_SCALE:
 +      case DRM_MODE_SCALE_CENTER:
                /*
                 * For centered modes, we have to calculate border widths &
                 * heights and modify the values programmed into the CRTC.
@@@ -662,8 -672,9 +662,8 @@@ static int intel_lvds_set_property(stru
                                connector->encoder) {
                struct drm_crtc *crtc = connector->encoder->crtc;
                struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
 -              if (value == DRM_MODE_SCALE_NON_GPU) {
 -                      DRM_DEBUG_KMS(I915_LVDS,
 -                                      "non_GPU property is unsupported\n");
 +              if (value == DRM_MODE_SCALE_NONE) {
 +                      DRM_DEBUG_KMS("no scaling not supported\n");
                        return 0;
                }
                if (lvds_priv->fitting_mode == value) {
@@@ -720,7 -731,8 +720,7 @@@ static const struct drm_encoder_funcs i
  
  static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
  {
 -      DRM_DEBUG_KMS(I915_LVDS,
 -                    "Skipping LVDS initialization for %s\n", id->ident);
 +      DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
        return 1;
  }
  
@@@ -768,6 -780,14 +768,14 @@@ static const struct dmi_system_id intel
        },
        {
                .callback = intel_no_lvds_dmi_callback,
+               .ident = "AOpen Mini PC MP915",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+                       DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
+               },
+       },
+       {
+               .callback = intel_no_lvds_dmi_callback,
                .ident = "Aopen i945GTt-VFA",
                .matches = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
@@@ -872,6 -892,10 +880,10 @@@ void intel_lvds_init(struct drm_device 
        if (IS_IGDNG(dev)) {
                if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
                        return;
+               if (dev_priv->edp_support) {
+                       DRM_DEBUG("disable LVDS for eDP support\n");
+                       return;
+               }
                gpio = PCH_GPIOC;
        }
  
@@@ -1001,7 -1025,7 +1013,7 @@@ out
        return;
  
  failed:
 -      DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n");
 +      DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
        if (intel_output->ddc_bus)
                intel_i2c_destroy(intel_output->ddc_bus);
        drm_connector_cleanup(connector);
  #include "drm.h"
  #include "drm_crtc.h"
  #include "intel_drv.h"
+ #include "drm_edid.h"
  #include "i915_drm.h"
  #include "i915_drv.h"
  #include "intel_sdvo_regs.h"
  
  #undef SDVO_DEBUG
 -#define I915_SDVO     "i915_sdvo"
  struct intel_sdvo_priv {
        u8 slave_addr;
  
        /* Pixel clock limitations reported by the SDVO device, in kHz */
        int pixel_clock_min, pixel_clock_max;
  
+       /*
+       * For multiple function SDVO device,
+       * this is for current attached outputs.
+       */
+       uint16_t attached_output;
        /**
         * This is set if we're going to treat the device as TV-out.
         *
        u32 save_SDVOX;
  };
  
+ static bool
+ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags);
  /**
   * Writes the SDVOB or SDVOC with the given value, but always writes both
   * SDVOB and SDVOC to work around apparent hardware issues (according to
@@@ -177,7 -188,7 +187,7 @@@ static bool intel_sdvo_read_byte(struc
                return true;
        }
  
 -      DRM_DEBUG("i2c transfer returned %d\n", ret);
 +      DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
        return false;
  }
  
@@@ -287,7 -298,7 +297,7 @@@ static void intel_sdvo_debug_write(stru
        struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
        int i;
  
 -      DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ",
 +      DRM_DEBUG_KMS("%s: W: %02X ",
                                SDVO_NAME(sdvo_priv), cmd);
        for (i = 0; i < args_len; i++)
                DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
@@@ -340,7 -351,7 +350,7 @@@ static void intel_sdvo_debug_response(s
        struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
        int i;
  
 -      DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv));
 +      DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
        for (i = 0; i < response_len; i++)
                DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
        for (; i < 8; i++)
@@@ -657,10 -668,10 +667,10 @@@ static int intel_sdvo_get_clock_rate_mu
        status = intel_sdvo_read_response(intel_output, &response, 1);
  
        if (status != SDVO_CMD_STATUS_SUCCESS) {
 -              DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
 +              DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
                return SDVO_CLOCK_RATE_MULT_1X;
        } else {
 -              DRM_DEBUG("Current clock rate multiplier: %d\n", response);
 +              DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
        }
  
        return response;
@@@ -941,14 -952,14 +951,14 @@@ static void intel_sdvo_set_tv_format(st
        format = &sdvo_priv->tv_format;
        memset(&unset, 0, sizeof(unset));
        if (memcmp(format, &unset, sizeof(*format))) {
 -              DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n",
 +              DRM_DEBUG_KMS("%s: Choosing default TV format of NTSC-M\n",
                                SDVO_NAME(sdvo_priv));
                format->ntsc_m = 1;
                intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, format,
                                sizeof(*format));
                status = intel_sdvo_read_response(output, NULL, 0);
                if (status != SDVO_CMD_STATUS_SUCCESS)
 -                      DRM_DEBUG("%s: Failed to set TV format\n",
 +                      DRM_DEBUG_KMS("%s: Failed to set TV format\n",
                                        SDVO_NAME(sdvo_priv));
        }
  }
@@@ -1219,8 -1230,8 +1229,8 @@@ static void intel_sdvo_dpms(struct drm_
                 * a given it the status is a success, we succeeded.
                 */
                if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
 -                      DRM_DEBUG("First %s output reported failure to sync\n",
 -                                 SDVO_NAME(sdvo_priv));
 +                      DRM_DEBUG_KMS("First %s output reported failure to "
 +                                      "sync\n", SDVO_NAME(sdvo_priv));
                }
  
                if (0)
@@@ -1315,8 -1326,8 +1325,8 @@@ static void intel_sdvo_restore(struct d
                        intel_wait_for_vblank(dev);
                status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
                if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
 -                      DRM_DEBUG("First %s output reported failure to sync\n",
 -                                 SDVO_NAME(sdvo_priv));
 +                      DRM_DEBUG_KMS("First %s output reported failure to "
 +                                      "sync\n", SDVO_NAME(sdvo_priv));
        }
  
        intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
@@@ -1394,7 -1405,7 +1404,7 @@@ int intel_sdvo_supports_hotplug(struct 
        u8 response[2];
        u8 status;
        struct intel_output *intel_output;
 -      DRM_DEBUG("\n");
 +      DRM_DEBUG_KMS("\n");
  
        if (!connector)
                return 0;
@@@ -1434,41 -1445,96 +1444,96 @@@ void intel_sdvo_set_hotplug(struct drm_
        intel_sdvo_read_response(intel_output, &response, 2);
  }
  
- static void
- intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+ static bool
+ intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
+ {
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       int caps = 0;
+       if (sdvo_priv->caps.output_flags &
+               (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+               caps++;
+       if (sdvo_priv->caps.output_flags &
+               (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
+               caps++;
+       if (sdvo_priv->caps.output_flags &
+               (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0))
+               caps++;
+       if (sdvo_priv->caps.output_flags &
+               (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
+               caps++;
+       if (sdvo_priv->caps.output_flags &
+               (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
+               caps++;
+       if (sdvo_priv->caps.output_flags &
+               (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
+               caps++;
+       if (sdvo_priv->caps.output_flags &
+               (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
+               caps++;
+       return (caps > 1);
+ }
+ enum drm_connector_status
+ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
  {
        struct intel_output *intel_output = to_intel_output(connector);
        struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       enum drm_connector_status status = connector_status_connected;
        struct edid *edid = NULL;
  
        edid = drm_get_edid(&intel_output->base,
                            intel_output->ddc_bus);
        if (edid != NULL) {
-               sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+               /* Don't report the output as connected if it's a DVI-I
+                * connector with a non-digital EDID coming out.
+                */
+               if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
+                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
+                               sdvo_priv->is_hdmi =
+                                       drm_detect_hdmi_monitor(edid);
+                       else
+                               status = connector_status_disconnected;
+               }
                kfree(edid);
                intel_output->base.display_info.raw_edid = NULL;
-       }
+       } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+               status = connector_status_disconnected;
+       return status;
  }
  
  static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
  {
-       u8 response[2];
+       uint16_t response;
        u8 status;
        struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
  
        intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
        status = intel_sdvo_read_response(intel_output, &response, 2);
  
-       DRM_DEBUG_KMS("SDVO response %d %d\n", response[0], response[1]);
 -      DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8);
++      DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
  
        if (status != SDVO_CMD_STATUS_SUCCESS)
                return connector_status_unknown;
  
-       if ((response[0] != 0) || (response[1] != 0)) {
-               intel_sdvo_hdmi_sink_detect(connector);
-               return connector_status_connected;
-       } else
+       if (response == 0)
                return connector_status_disconnected;
+       if (intel_sdvo_multifunc_encoder(intel_output) &&
+               sdvo_priv->attached_output != response) {
+               if (sdvo_priv->controlled_output != response &&
+                       intel_sdvo_output_setup(intel_output, response) != true)
+                       return connector_status_unknown;
+               sdvo_priv->attached_output = response;
+       }
+       return intel_sdvo_hdmi_sink_detect(connector, response);
  }
  
  static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
@@@ -1865,16 -1931,101 +1930,100 @@@ intel_sdvo_get_slave_addr(struct drm_de
                return 0x72;
  }
  
 -              DRM_DEBUG_KMS(I915_SDVO,
 -                              "%s: Unknown SDVO output type (0x%02x%02x)\n",
 -                                SDVO_NAME(sdvo_priv),
 -                                bytes[0], bytes[1]);
+ static bool
+ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
+ {
+       struct drm_connector *connector = &intel_output->base;
+       struct drm_encoder *encoder = &intel_output->enc;
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       bool ret = true, registered = false;
+       sdvo_priv->is_tv = false;
+       intel_output->needs_tv_clock = false;
+       sdvo_priv->is_lvds = false;
+       if (device_is_registered(&connector->kdev)) {
+               drm_sysfs_connector_remove(connector);
+               registered = true;
+       }
+       if (flags &
+           (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
+               if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
+                       sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
+               else
+                       sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
+               encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+               connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+               if (intel_sdvo_get_supp_encode(intel_output,
+                                              &sdvo_priv->encode) &&
+                   intel_sdvo_get_digital_encoding_mode(intel_output) &&
+                   sdvo_priv->is_hdmi) {
+                       /* enable hdmi encoding mode if supported */
+                       intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
+                       intel_sdvo_set_colorimetry(intel_output,
+                                                  SDVO_COLORIMETRY_RGB256);
+                       connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+               }
+       } else if (flags & SDVO_OUTPUT_SVID0) {
+               sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
+               encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+               connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+               sdvo_priv->is_tv = true;
+               intel_output->needs_tv_clock = true;
+       } else if (flags & SDVO_OUTPUT_RGB0) {
+               sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
+               encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+               connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+       } else if (flags & SDVO_OUTPUT_RGB1) {
+               sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
+               encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+               connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+       } else if (flags & SDVO_OUTPUT_LVDS0) {
+               sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
+               encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+               connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+               sdvo_priv->is_lvds = true;
+       } else if (flags & SDVO_OUTPUT_LVDS1) {
+               sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
+               encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+               connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+               sdvo_priv->is_lvds = true;
+       } else {
+               unsigned char bytes[2];
+               sdvo_priv->controlled_output = 0;
+               memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
++              DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
++                            SDVO_NAME(sdvo_priv),
++                            bytes[0], bytes[1]);
+               ret = false;
+       }
+       if (ret && registered)
+               ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
+       return ret;
+ }
  bool intel_sdvo_init(struct drm_device *dev, int output_device)
  {
        struct drm_connector *connector;
        struct intel_output *intel_output;
        struct intel_sdvo_priv *sdvo_priv;
  
-       int connector_type;
        u8 ch[0x40];
        int i;
-       int encoder_type;
  
        intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
        if (!intel_output) {
        /* Read the regs to test if we can talk to the device */
        for (i = 0; i < 0x40; i++) {
                if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
 -                      DRM_DEBUG_KMS(I915_SDVO,
 -                                      "No SDVO device found on SDVO%c\n",
 +                      DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
                                        output_device == SDVOB ? 'B' : 'C');
                        goto err_i2c;
                }
        intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
  
        /* In defaut case sdvo lvds is false */
-       sdvo_priv->is_lvds = false;
        intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
  
-       if (sdvo_priv->caps.output_flags &
-           (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
-               if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
-                       sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
-               else
-                       sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-               encoder_type = DRM_MODE_ENCODER_TMDS;
-               connector_type = DRM_MODE_CONNECTOR_DVID;
-               if (intel_sdvo_get_supp_encode(intel_output,
-                                              &sdvo_priv->encode) &&
-                   intel_sdvo_get_digital_encoding_mode(intel_output) &&
-                   sdvo_priv->is_hdmi) {
-                       /* enable hdmi encoding mode if supported */
-                       intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
-                       intel_sdvo_set_colorimetry(intel_output,
-                                                  SDVO_COLORIMETRY_RGB256);
-                       connector_type = DRM_MODE_CONNECTOR_HDMIA;
-               }
-       }
-       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
-       {
-               sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
-               encoder_type = DRM_MODE_ENCODER_TVDAC;
-               connector_type = DRM_MODE_CONNECTOR_SVIDEO;
-               sdvo_priv->is_tv = true;
-               intel_output->needs_tv_clock = true;
-       }
-       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
-       {
-               sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
-               encoder_type = DRM_MODE_ENCODER_DAC;
-               connector_type = DRM_MODE_CONNECTOR_VGA;
-       }
-       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
-       {
-               sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
-               encoder_type = DRM_MODE_ENCODER_DAC;
-               connector_type = DRM_MODE_CONNECTOR_VGA;
-       }
-       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
-       {
-               sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
-               encoder_type = DRM_MODE_ENCODER_LVDS;
-               connector_type = DRM_MODE_CONNECTOR_LVDS;
-               sdvo_priv->is_lvds = true;
-       }
-       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
-       {
-               sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
-               encoder_type = DRM_MODE_ENCODER_LVDS;
-               connector_type = DRM_MODE_CONNECTOR_LVDS;
-               sdvo_priv->is_lvds = true;
-       }
-       else
-       {
-               unsigned char bytes[2];
-               sdvo_priv->controlled_output = 0;
-               memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
-               DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
-                                 SDVO_NAME(sdvo_priv),
-                                 bytes[0], bytes[1]);
-               encoder_type = DRM_MODE_ENCODER_NONE;
-               connector_type = DRM_MODE_CONNECTOR_Unknown;
+       if (intel_sdvo_output_setup(intel_output,
+                                   sdvo_priv->caps.output_flags) != true) {
 -              DRM_DEBUG("SDVO output failed to setup on SDVO%c\n",
++              DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+                         output_device == SDVOB ? 'B' : 'C');
                goto err_i2c;
        }
  
        connector = &intel_output->base;
        drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
-                          connector_type);
+                          connector->connector_type);
        drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
  
-       drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
+       drm_encoder_init(dev, &intel_output->enc,
+                       &intel_sdvo_enc_funcs, intel_output->enc.encoder_type);
        drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
  
        drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
                                               &sdvo_priv->pixel_clock_max);
  
  
 -      DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, "
 +      DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
                        "clock range %dMHz - %dMHz, "
                        "input 1: %c, input 2: %c, "
                        "output 1: %c, output 2: %c\n",
@@@ -77,25 -77,9 +77,25 @@@ static int radeon_ttm_global_init(struc
        global_ref->release = &radeon_ttm_mem_global_release;
        r = ttm_global_item_ref(global_ref);
        if (r != 0) {
 -              DRM_ERROR("Failed referencing a global TTM memory object.\n");
 +              DRM_ERROR("Failed setting up TTM memory accounting "
 +                        "subsystem.\n");
                return r;
        }
 +
 +      rdev->mman.bo_global_ref.mem_glob =
 +              rdev->mman.mem_global_ref.object;
 +      global_ref = &rdev->mman.bo_global_ref.ref;
 +      global_ref->global_type = TTM_GLOBAL_TTM_BO;
 +      global_ref->size = sizeof(struct ttm_mem_global);
 +      global_ref->init = &ttm_bo_global_init;
 +      global_ref->release = &ttm_bo_global_release;
 +      r = ttm_global_item_ref(global_ref);
 +      if (r != 0) {
 +              DRM_ERROR("Failed setting up TTM BO subsystem.\n");
 +              ttm_global_item_unref(&rdev->mman.mem_global_ref);
 +              return r;
 +      }
 +
        rdev->mman.mem_global_referenced = true;
        return 0;
  }
  static void radeon_ttm_global_fini(struct radeon_device *rdev)
  {
        if (rdev->mman.mem_global_referenced) {
 +              ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
                ttm_global_item_unref(&rdev->mman.mem_global_ref);
                rdev->mman.mem_global_referenced = false;
        }
@@@ -303,11 -286,9 +303,11 @@@ static int radeon_move_vram_ram(struct 
        r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
  out_cleanup:
        if (tmp_mem.mm_node) {
 -              spin_lock(&rdev->mman.bdev.lru_lock);
 +              struct ttm_bo_global *glob = rdev->mman.bdev.glob;
 +
 +              spin_lock(&glob->lru_lock);
                drm_mm_put_block(tmp_mem.mm_node);
 -              spin_unlock(&rdev->mman.bdev.lru_lock);
 +              spin_unlock(&glob->lru_lock);
                return r;
        }
        return r;
@@@ -342,11 -323,9 +342,11 @@@ static int radeon_move_ram_vram(struct 
        }
  out_cleanup:
        if (tmp_mem.mm_node) {
 -              spin_lock(&rdev->mman.bdev.lru_lock);
 +              struct ttm_bo_global *glob = rdev->mman.bdev.glob;
 +
 +              spin_lock(&glob->lru_lock);
                drm_mm_put_block(tmp_mem.mm_node);
 -              spin_unlock(&rdev->mman.bdev.lru_lock);
 +              spin_unlock(&glob->lru_lock);
                return r;
        }
        return r;
@@@ -376,23 -355,26 +376,26 @@@ static int radeon_bo_move(struct ttm_bu
        if (!rdev->cp.ready) {
                /* use memcpy */
                DRM_ERROR("CP is not ready use memcpy.\n");
-               return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+               goto memcpy;
        }
  
        if (old_mem->mem_type == TTM_PL_VRAM &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
-               return radeon_move_vram_ram(bo, evict, interruptible,
+               r = radeon_move_vram_ram(bo, evict, interruptible,
                                            no_wait, new_mem);
        } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
                   new_mem->mem_type == TTM_PL_VRAM) {
-               return radeon_move_ram_vram(bo, evict, interruptible,
+               r = radeon_move_ram_vram(bo, evict, interruptible,
                                            no_wait, new_mem);
        } else {
                r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
-               if (unlikely(r)) {
-                       return r;
-               }
        }
+       if (r) {
+ memcpy:
+               r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+       }
        return r;
  }
  
@@@ -450,6 -432,8 +453,8 @@@ static struct ttm_bo_driver radeon_bo_d
        .sync_obj_flush = &radeon_sync_obj_flush,
        .sync_obj_unref = &radeon_sync_obj_unref,
        .sync_obj_ref = &radeon_sync_obj_ref,
+       .move_notify = &radeon_bo_move_notify,
+       .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
  };
  
  int radeon_ttm_init(struct radeon_device *rdev)
        }
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&rdev->mman.bdev,
 -                             rdev->mman.mem_global_ref.object,
 +                             rdev->mman.bo_global_ref.ref.object,
-                              &radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
+                              &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
+                              rdev->need_dma32);
        if (r) {
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
                return r;
        }
        r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
-                          ((rdev->mc.aper_size) >> PAGE_SHIFT));
+                          ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
        if (r) {
                DRM_ERROR("Failed initializing VRAM heap.\n");
                return r;
                return r;
        }
        DRM_INFO("radeon: %uM of VRAM memory ready\n",
-                rdev->mc.vram_size / (1024 * 1024));
+                rdev->mc.real_vram_size / (1024 * 1024));
        r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
                           ((rdev->mc.gtt_size) >> PAGE_SHIFT));
        if (r) {
  #define TTM_BO_HASH_ORDER 13
  
  static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
- static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
  static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
 +static void ttm_bo_global_kobj_release(struct kobject *kobj);
 +
 +static struct attribute ttm_bo_count = {
 +      .name = "bo_count",
 +      .mode = S_IRUGO
 +};
 +
 +static ssize_t ttm_bo_global_show(struct kobject *kobj,
 +                                struct attribute *attr,
 +                                char *buffer)
 +{
 +      struct ttm_bo_global *glob =
 +              container_of(kobj, struct ttm_bo_global, kobj);
 +
 +      return snprintf(buffer, PAGE_SIZE, "%lu\n",
 +                      (unsigned long) atomic_read(&glob->bo_count));
 +}
 +
 +static struct attribute *ttm_bo_global_attrs[] = {
 +      &ttm_bo_count,
 +      NULL
 +};
 +
 +static struct sysfs_ops ttm_bo_global_ops = {
 +      .show = &ttm_bo_global_show
 +};
 +
 +static struct kobj_type ttm_bo_glob_kobj_type  = {
 +      .release = &ttm_bo_global_kobj_release,
 +      .sysfs_ops = &ttm_bo_global_ops,
 +      .default_attrs = ttm_bo_global_attrs
 +};
 +
  
  static inline uint32_t ttm_bo_type_flags(unsigned type)
  {
@@@ -100,11 -66,10 +99,11 @@@ static void ttm_bo_release_list(struct 
  
        if (bo->ttm)
                ttm_tt_destroy(bo->ttm);
 +      atomic_dec(&bo->glob->bo_count);
        if (bo->destroy)
                bo->destroy(bo);
        else {
 -              ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
 +              ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
                kfree(bo);
        }
  }
@@@ -141,7 -106,7 +140,7 @@@ static void ttm_bo_add_to_lru(struct tt
                kref_get(&bo->list_kref);
  
                if (bo->ttm != NULL) {
 -                      list_add_tail(&bo->swap, &bdev->swap_lru);
 +                      list_add_tail(&bo->swap, &bo->glob->swap_lru);
                        kref_get(&bo->list_kref);
                }
        }
@@@ -176,7 -141,7 +175,7 @@@ int ttm_bo_reserve_locked(struct ttm_bu
                          bool interruptible,
                          bool no_wait, bool use_sequence, uint32_t sequence)
  {
 -      struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
        int ret;
  
        while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
                if (no_wait)
                        return -EBUSY;
  
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
                ret = ttm_bo_wait_unreserved(bo, interruptible);
 -              spin_lock(&bdev->lru_lock);
 +              spin_lock(&glob->lru_lock);
  
                if (unlikely(ret))
                        return ret;
@@@ -216,16 -181,16 +215,16 @@@ int ttm_bo_reserve(struct ttm_buffer_ob
                   bool interruptible,
                   bool no_wait, bool use_sequence, uint32_t sequence)
  {
 -      struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
        int put_count = 0;
        int ret;
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
                                    sequence);
        if (likely(ret == 0))
                put_count = ttm_bo_del_from_lru(bo);
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
  
        while (put_count--)
                kref_put(&bo->list_kref, ttm_bo_ref_bug);
  
  void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  {
 -      struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        ttm_bo_add_to_lru(bo);
        atomic_set(&bo->reserved, 0);
        wake_up_all(&bo->event_queue);
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
  }
  EXPORT_SYMBOL(ttm_bo_unreserve);
  
  static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  {
        struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
        uint32_t page_flags = 0;
  
        TTM_ASSERT_LOCKED(&bo->mutex);
        bo->ttm = NULL;
  
+       if (bdev->need_dma32)
+               page_flags |= TTM_PAGE_FLAG_DMA32;
        switch (bo->type) {
        case ttm_bo_type_device:
                if (zero_alloc)
                        page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
        case ttm_bo_type_kernel:
                bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
 -                                      page_flags, bdev->dummy_read_page);
 +                                      page_flags, glob->dummy_read_page);
                if (unlikely(bo->ttm == NULL))
                        ret = -ENOMEM;
                break;
        case ttm_bo_type_user:
                bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
                                        page_flags | TTM_PAGE_FLAG_USER,
 -                                      bdev->dummy_read_page);
 +                                      glob->dummy_read_page);
                if (unlikely(bo->ttm == NULL))
                        ret = -ENOMEM;
                break;
@@@ -339,6 -306,9 +341,9 @@@ static int ttm_bo_handle_move_mem(struc
  
        }
  
+       if (bdev->driver->move_notify)
+               bdev->driver->move_notify(bo, mem);
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
                ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
@@@ -390,7 -360,6 +395,7 @@@ out_err
  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
  {
        struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
        struct ttm_bo_driver *driver = bdev->driver;
        int ret;
  
  
                spin_unlock(&bo->lock);
  
 -              spin_lock(&bdev->lru_lock);
 +              spin_lock(&glob->lru_lock);
                ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
                BUG_ON(ret);
                if (bo->ttm)
                        bo->mem.mm_node = NULL;
                }
                put_count = ttm_bo_del_from_lru(bo);
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
  
                atomic_set(&bo->reserved, 0);
  
                return 0;
        }
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        if (list_empty(&bo->ddestroy)) {
                void *sync_obj = bo->sync_obj;
                void *sync_obj_arg = bo->sync_obj_arg;
  
                kref_get(&bo->list_kref);
                list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
                spin_unlock(&bo->lock);
  
                if (sync_obj)
                ret = 0;
  
        } else {
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
                spin_unlock(&bo->lock);
                ret = -EBUSY;
        }
  
  static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  {
 +      struct ttm_bo_global *glob = bdev->glob;
        struct ttm_buffer_object *entry, *nentry;
        struct list_head *list, *next;
        int ret;
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        list_for_each_safe(list, next, &bdev->ddestroy) {
                entry = list_entry(list, struct ttm_buffer_object, ddestroy);
                nentry = NULL;
                }
                kref_get(&entry->list_kref);
  
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
                ret = ttm_bo_cleanup_refs(entry, remove_all);
                kref_put(&entry->list_kref, ttm_bo_release_list);
  
 -              spin_lock(&bdev->lru_lock);
 +              spin_lock(&glob->lru_lock);
                if (nentry) {
                        bool next_onlist = !list_empty(next);
 -                      spin_unlock(&bdev->lru_lock);
 +                      spin_unlock(&glob->lru_lock);
                        kref_put(&nentry->list_kref, ttm_bo_release_list);
 -                      spin_lock(&bdev->lru_lock);
 +                      spin_lock(&glob->lru_lock);
                        /*
                         * Someone might have raced us and removed the
                         * next entry from the list. We don't bother restarting
                        break;
        }
        ret = !list_empty(&bdev->ddestroy);
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
  
        return ret;
  }
@@@ -554,7 -522,6 +559,7 @@@ static int ttm_bo_evict(struct ttm_buff
  {
        int ret = 0;
        struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
        struct ttm_mem_reg evict_mem;
        uint32_t proposed_placement;
  
                goto out;
        }
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        if (evict_mem.mm_node) {
                drm_mm_put_block(evict_mem.mm_node);
                evict_mem.mm_node = NULL;
        }
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
        bo->evicted = true;
  out:
        return ret;
@@@ -623,7 -590,6 +628,7 @@@ static int ttm_bo_mem_force_space(struc
                                  uint32_t mem_type,
                                  bool interruptible, bool no_wait)
  {
 +      struct ttm_bo_global *glob = bdev->glob;
        struct drm_mm_node *node;
        struct ttm_buffer_object *entry;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@@ -637,7 -603,7 +642,7 @@@ retry_pre_get
        if (unlikely(ret != 0))
                return ret;
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        do {
                node = drm_mm_search_free(&man->manager, num_pages,
                                          mem->page_alignment, 1);
                if (likely(ret == 0))
                        put_count = ttm_bo_del_from_lru(entry);
  
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
  
                if (unlikely(ret != 0))
                        return ret;
                if (ret)
                        return ret;
  
 -              spin_lock(&bdev->lru_lock);
 +              spin_lock(&glob->lru_lock);
        } while (1);
  
        if (!node) {
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
                return -ENOMEM;
        }
  
        node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
        if (unlikely(!node)) {
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
                goto retry_pre_get;
        }
  
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
        mem->mm_node = node;
        mem->mem_type = mem_type;
        return 0;
  }
  
+ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+                                     uint32_t cur_placement,
+                                     uint32_t proposed_placement)
+ {
+       uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
+       uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
+       /**
+        * Keep current caching if possible.
+        */
+       if ((cur_placement & caching) != 0)
+               result |= (cur_placement & caching);
+       else if ((man->default_caching & caching) != 0)
+               result |= man->default_caching;
+       else if ((TTM_PL_FLAG_CACHED & caching) != 0)
+               result |= TTM_PL_FLAG_CACHED;
+       else if ((TTM_PL_FLAG_WC & caching) != 0)
+               result |= TTM_PL_FLAG_WC;
+       else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
+               result |= TTM_PL_FLAG_UNCACHED;
+       return result;
+ }
  static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
                                 bool disallow_fixed,
                                 uint32_t mem_type,
-                                uint32_t mask, uint32_t *res_mask)
+                                uint32_t proposed_placement,
+                                uint32_t *masked_placement)
  {
        uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  
        if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
                return false;
  
-       if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
+       if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
                return false;
  
-       if ((mask & man->available_caching) == 0)
+       if ((proposed_placement & man->available_caching) == 0)
                return false;
-       if (mask & man->default_caching)
-               cur_flags |= man->default_caching;
-       else if (mask & TTM_PL_FLAG_CACHED)
-               cur_flags |= TTM_PL_FLAG_CACHED;
-       else if (mask & TTM_PL_FLAG_WC)
-               cur_flags |= TTM_PL_FLAG_WC;
-       else
-               cur_flags |= TTM_PL_FLAG_UNCACHED;
  
-       *res_mask = cur_flags;
+       cur_flags |= (proposed_placement & man->available_caching);
+       *masked_placement = cur_flags;
        return true;
  }
  
@@@ -736,7 -723,6 +762,7 @@@ int ttm_bo_mem_space(struct ttm_buffer_
                     bool interruptible, bool no_wait)
  {
        struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
        struct ttm_mem_type_manager *man;
  
        uint32_t num_prios = bdev->driver->num_mem_type_prio;
                if (!type_ok)
                        continue;
  
+               cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+                                                 cur_flags);
                if (mem_type == TTM_PL_SYSTEM)
                        break;
  
                                if (unlikely(ret))
                                        return ret;
  
 -                              spin_lock(&bdev->lru_lock);
 +                              spin_lock(&glob->lru_lock);
                                node = drm_mm_search_free(&man->manager,
                                                          mem->num_pages,
                                                          mem->page_alignment,
                                                          1);
                                if (unlikely(!node)) {
 -                                      spin_unlock(&bdev->lru_lock);
 +                                      spin_unlock(&glob->lru_lock);
                                        break;
                                }
                                node = drm_mm_get_block_atomic(node,
                                                               mem->num_pages,
                                                               mem->
                                                               page_alignment);
 -                              spin_unlock(&bdev->lru_lock);
 +                              spin_unlock(&glob->lru_lock);
                        } while (!node);
                }
                if (node)
                                          proposed_placement, &cur_flags))
                        continue;
  
+               cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+                                                 cur_flags);
                ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
                                             interruptible, no_wait);
  
@@@ -856,7 -848,7 +888,7 @@@ int ttm_bo_move_buffer(struct ttm_buffe
                       uint32_t proposed_placement,
                       bool interruptible, bool no_wait)
  {
 -      struct ttm_bo_device *bdev = bo->bdev;
 +      struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
        struct ttm_mem_reg mem;
  
  
  out_unlock:
        if (ret && mem.mm_node) {
 -              spin_lock(&bdev->lru_lock);
 +              spin_lock(&glob->lru_lock);
                drm_mm_put_block(mem.mm_node);
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
        }
        return ret;
  }
@@@ -1030,7 -1022,6 +1062,7 @@@ int ttm_buffer_object_init(struct ttm_b
        INIT_LIST_HEAD(&bo->ddestroy);
        INIT_LIST_HEAD(&bo->swap);
        bo->bdev = bdev;
 +      bo->glob = bdev->glob;
        bo->type = type;
        bo->num_pages = num_pages;
        bo->mem.mem_type = TTM_PL_SYSTEM;
        bo->seq_valid = false;
        bo->persistant_swap_storage = persistant_swap_storage;
        bo->acc_size = acc_size;
 +      atomic_inc(&bo->glob->bo_count);
  
        ret = ttm_bo_check_placement(bo, flags, 0ULL);
        if (unlikely(ret != 0))
@@@ -1082,13 -1072,13 +1114,13 @@@ out_err
  }
  EXPORT_SYMBOL(ttm_buffer_object_init);
  
 -static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
 +static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
                                 unsigned long num_pages)
  {
        size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
            PAGE_MASK;
  
 -      return bdev->ttm_bo_size + 2 * page_array_size;
 +      return glob->ttm_bo_size + 2 * page_array_size;
  }
  
  int ttm_buffer_object_create(struct ttm_bo_device *bdev,
  {
        struct ttm_buffer_object *bo;
        int ret;
 -      struct ttm_mem_global *mem_glob = bdev->mem_glob;
 +      struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  
        size_t acc_size =
 -          ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
 -      ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
 +          ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
 +      ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
        if (unlikely(ret != 0))
                return ret;
  
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  
        if (unlikely(bo == NULL)) {
 -              ttm_mem_global_free(mem_glob, acc_size, false);
 +              ttm_mem_global_free(mem_glob, acc_size);
                return -ENOMEM;
        }
  
@@@ -1160,7 -1150,6 +1192,7 @@@ static int ttm_bo_force_list_clean(stru
                                   struct list_head *head,
                                   unsigned mem_type, bool allow_errors)
  {
 +      struct ttm_bo_global *glob = bdev->glob;
        struct ttm_buffer_object *entry;
        int ret;
        int put_count;
         * Can't use standard list traversal since we're unlocking.
         */
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
  
        while (!list_empty(head)) {
                entry = list_first_entry(head, struct ttm_buffer_object, lru);
                kref_get(&entry->list_kref);
                ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
                put_count = ttm_bo_del_from_lru(entry);
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
                while (put_count--)
                        kref_put(&entry->list_kref, ttm_bo_ref_bug);
                BUG_ON(ret);
                ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
                ttm_bo_unreserve(entry);
                kref_put(&entry->list_kref, ttm_bo_release_list);
 -              spin_lock(&bdev->lru_lock);
 +              spin_lock(&glob->lru_lock);
        }
  
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
  
        return 0;
  }
  
  int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  {
-       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 +      struct ttm_bo_global *glob = bdev->glob;
+       struct ttm_mem_type_manager *man;
        int ret = -EINVAL;
  
        if (mem_type >= TTM_NUM_MEM_TYPES) {
                printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
                return ret;
        }
+       man = &bdev->man[mem_type];
  
        if (!man->has_type) {
                printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
        if (mem_type > 0) {
                ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
  
 -              spin_lock(&bdev->lru_lock);
 +              spin_lock(&glob->lru_lock);
                if (drm_mm_clean(&man->manager))
                        drm_mm_takedown(&man->manager);
                else
                        ret = -EBUSY;
  
 -              spin_unlock(&bdev->lru_lock);
 +              spin_unlock(&glob->lru_lock);
        }
  
        return ret;
@@@ -1295,83 -1284,11 +1328,83 @@@ int ttm_bo_init_mm(struct ttm_bo_devic
  }
  EXPORT_SYMBOL(ttm_bo_init_mm);
  
 +static void ttm_bo_global_kobj_release(struct kobject *kobj)
 +{
 +      struct ttm_bo_global *glob =
 +              container_of(kobj, struct ttm_bo_global, kobj);
 +
 +      printk(KERN_INFO TTM_PFX "Freeing bo global.\n");
 +      ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
 +      __free_page(glob->dummy_read_page);
 +      kfree(glob);
 +}
 +
 +void ttm_bo_global_release(struct ttm_global_reference *ref)
 +{
 +      struct ttm_bo_global *glob = ref->object;
 +
 +      kobject_del(&glob->kobj);
 +      kobject_put(&glob->kobj);
 +}
 +EXPORT_SYMBOL(ttm_bo_global_release);
 +
 +int ttm_bo_global_init(struct ttm_global_reference *ref)
 +{
 +      struct ttm_bo_global_ref *bo_ref =
 +              container_of(ref, struct ttm_bo_global_ref, ref);
 +      struct ttm_bo_global *glob = ref->object;
 +      int ret;
 +
 +      mutex_init(&glob->device_list_mutex);
 +      spin_lock_init(&glob->lru_lock);
 +      glob->mem_glob = bo_ref->mem_glob;
 +      glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 +
 +      if (unlikely(glob->dummy_read_page == NULL)) {
 +              ret = -ENOMEM;
 +              goto out_no_drp;
 +      }
 +
 +      INIT_LIST_HEAD(&glob->swap_lru);
 +      INIT_LIST_HEAD(&glob->device_list);
 +
 +      ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
 +      ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
 +      if (unlikely(ret != 0)) {
 +              printk(KERN_ERR TTM_PFX
 +                     "Could not register buffer object swapout.\n");
 +              goto out_no_shrink;
 +      }
 +
 +      glob->ttm_bo_extra_size =
 +              ttm_round_pot(sizeof(struct ttm_tt)) +
 +              ttm_round_pot(sizeof(struct ttm_backend));
 +
 +      glob->ttm_bo_size = glob->ttm_bo_extra_size +
 +              ttm_round_pot(sizeof(struct ttm_buffer_object));
 +
 +      atomic_set(&glob->bo_count, 0);
 +
 +      kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
 +      ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
 +      if (unlikely(ret != 0))
 +              kobject_put(&glob->kobj);
 +      return ret;
 +out_no_shrink:
 +      __free_page(glob->dummy_read_page);
 +out_no_drp:
 +      kfree(glob);
 +      return ret;
 +}
 +EXPORT_SYMBOL(ttm_bo_global_init);
 +
 +
  int ttm_bo_device_release(struct ttm_bo_device *bdev)
  {
        int ret = 0;
        unsigned i = TTM_NUM_MEM_TYPES;
        struct ttm_mem_type_manager *man;
 +      struct ttm_bo_global *glob = bdev->glob;
  
        while (i--) {
                man = &bdev->man[i];
                }
        }
  
 +      mutex_lock(&glob->device_list_mutex);
 +      list_del(&bdev->device_list);
 +      mutex_unlock(&glob->device_list_mutex);
 +
        if (!cancel_delayed_work(&bdev->wq))
                flush_scheduled_work();
  
        while (ttm_bo_delayed_delete(bdev, true))
                ;
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        if (list_empty(&bdev->ddestroy))
                TTM_DEBUG("Delayed destroy list was clean\n");
  
        if (list_empty(&bdev->man[0].lru))
                TTM_DEBUG("Swap list was clean\n");
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
  
 -      ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
        BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
        write_lock(&bdev->vm_lock);
        drm_mm_takedown(&bdev->addr_space_mm);
        write_unlock(&bdev->vm_lock);
  
 -      __free_page(bdev->dummy_read_page);
        return ret;
  }
  EXPORT_SYMBOL(ttm_bo_device_release);
  
 -/*
 - * This function is intended to be called on drm driver load.
 - * If you decide to call it from firstopen, you must protect the call
 - * from a potentially racing ttm_bo_driver_finish in lastclose.
 - * (This may happen on X server restart).
 - */
 -
  int ttm_bo_device_init(struct ttm_bo_device *bdev,
 -                     struct ttm_mem_global *mem_glob,
 -                     struct ttm_bo_driver *driver, uint64_t file_page_offset,
 +                     struct ttm_bo_global *glob,
 +                     struct ttm_bo_driver *driver,
-                      uint64_t file_page_offset)
++                     uint64_t file_page_offset,
+                      bool need_dma32)
  {
        int ret = -EINVAL;
  
 -      bdev->dummy_read_page = NULL;
        rwlock_init(&bdev->vm_lock);
 -      spin_lock_init(&bdev->lru_lock);
 +      spin_lock_init(&glob->lru_lock);
  
        bdev->driver = driver;
 -      bdev->mem_glob = mem_glob;
  
        memset(bdev->man, 0, sizeof(bdev->man));
  
 -      bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 -      if (unlikely(bdev->dummy_read_page == NULL)) {
 -              ret = -ENOMEM;
 -              goto out_err0;
 -      }
 -
        /*
         * Initialize the system memory buffer type.
         * Other types need to be driver / IOCTL initialized.
         */
        ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
        if (unlikely(ret != 0))
 -              goto out_err1;
 +              goto out_no_sys;
  
        bdev->addr_space_rb = RB_ROOT;
        ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
        if (unlikely(ret != 0))
 -              goto out_err2;
 +              goto out_no_addr_mm;
  
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        bdev->nice_mode = true;
        INIT_LIST_HEAD(&bdev->ddestroy);
 -      INIT_LIST_HEAD(&bdev->swap_lru);
        bdev->dev_mapping = NULL;
 -      ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
 -      ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
 -      if (unlikely(ret != 0)) {
 -              printk(KERN_ERR TTM_PFX
 -                     "Could not register buffer object swapout.\n");
 -              goto out_err2;
 -      }
 +      bdev->glob = glob;
+       bdev->need_dma32 = need_dma32;
  
 -      bdev->ttm_bo_extra_size =
 -              ttm_round_pot(sizeof(struct ttm_tt)) +
 -              ttm_round_pot(sizeof(struct ttm_backend));
 -
 -      bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
 -              ttm_round_pot(sizeof(struct ttm_buffer_object));
 +      mutex_lock(&glob->device_list_mutex);
 +      list_add_tail(&bdev->device_list, &glob->device_list);
 +      mutex_unlock(&glob->device_list_mutex);
  
        return 0;
 -out_err2:
 +out_no_addr_mm:
        ttm_bo_clean_mm(bdev, 0);
 -out_err1:
 -      __free_page(bdev->dummy_read_page);
 -out_err0:
 +out_no_sys:
        return ret;
  }
  EXPORT_SYMBOL(ttm_bo_device_init);
@@@ -1511,6 -1454,7 +1546,7 @@@ void ttm_bo_unmap_virtual(struct ttm_bu
  
        unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
  }
+ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  
  static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
  {
@@@ -1632,6 -1576,10 +1668,10 @@@ int ttm_bo_wait(struct ttm_buffer_objec
                        driver->sync_obj_unref(&sync_obj);
                        driver->sync_obj_unref(&tmp_obj);
                        spin_lock(&bo->lock);
+               } else {
+                       spin_unlock(&bo->lock);
+                       driver->sync_obj_unref(&sync_obj);
+                       spin_lock(&bo->lock);
                }
        }
        return 0;
@@@ -1699,21 -1647,21 +1739,21 @@@ void ttm_bo_synccpu_write_release(struc
  
  static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  {
 -      struct ttm_bo_device *bdev =
 -          container_of(shrink, struct ttm_bo_device, shrink);
 +      struct ttm_bo_global *glob =
 +          container_of(shrink, struct ttm_bo_global, shrink);
        struct ttm_buffer_object *bo;
        int ret = -EBUSY;
        int put_count;
        uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
  
 -      spin_lock(&bdev->lru_lock);
 +      spin_lock(&glob->lru_lock);
        while (ret == -EBUSY) {
 -              if (unlikely(list_empty(&bdev->swap_lru))) {
 -                      spin_unlock(&bdev->lru_lock);
 +              if (unlikely(list_empty(&glob->swap_lru))) {
 +                      spin_unlock(&glob->lru_lock);
                        return -EBUSY;
                }
  
 -              bo = list_first_entry(&bdev->swap_lru,
 +              bo = list_first_entry(&glob->swap_lru,
                                      struct ttm_buffer_object, swap);
                kref_get(&bo->list_kref);
  
  
                ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
                if (unlikely(ret == -EBUSY)) {
 -                      spin_unlock(&bdev->lru_lock);
 +                      spin_unlock(&glob->lru_lock);
                        ttm_bo_wait_unreserved(bo, false);
                        kref_put(&bo->list_kref, ttm_bo_release_list);
 -                      spin_lock(&bdev->lru_lock);
 +                      spin_lock(&glob->lru_lock);
                }
        }
  
        BUG_ON(ret != 0);
        put_count = ttm_bo_del_from_lru(bo);
 -      spin_unlock(&bdev->lru_lock);
 +      spin_unlock(&glob->lru_lock);
  
        while (put_count--)
                kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@@ -1788,6 -1736,6 +1828,6 @@@ out
  
  void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  {
 -      while (ttm_bo_swapout(&bdev->shrink) == 0)
 +      while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
                ;
  }
@@@ -41,9 -41,9 +41,9 @@@ void ttm_bo_free_old_node(struct ttm_bu
        struct ttm_mem_reg *old_mem = &bo->mem;
  
        if (old_mem->mm_node) {
 -              spin_lock(&bo->bdev->lru_lock);
 +              spin_lock(&bo->glob->lru_lock);
                drm_mm_put_block(old_mem->mm_node);
 -              spin_unlock(&bo->bdev->lru_lock);
 +              spin_unlock(&bo->glob->lru_lock);
        }
        old_mem->mm_node = NULL;
  }
@@@ -136,7 -136,8 +136,8 @@@ static int ttm_copy_io_page(void *dst, 
  }
  
  static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
-                               unsigned long page)
+                               unsigned long page,
+                               pgprot_t prot)
  {
        struct page *d = ttm_tt_get_page(ttm, page);
        void *dst;
                return -ENOMEM;
  
        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-       dst = kmap(d);
+ #ifdef CONFIG_X86
+       dst = kmap_atomic_prot(d, KM_USER0, prot);
+ #else
+       if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+               dst = vmap(&d, 1, 0, prot);
+       else
+               dst = kmap(d);
+ #endif
        if (!dst)
                return -ENOMEM;
  
        memcpy_fromio(dst, src, PAGE_SIZE);
-       kunmap(d);
+ #ifdef CONFIG_X86
+       kunmap_atomic(dst, KM_USER0);
+ #else
+       if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+               vunmap(dst);
+       else
+               kunmap(d);
+ #endif
        return 0;
  }
  
  static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
-                               unsigned long page)
+                               unsigned long page,
+                               pgprot_t prot)
  {
        struct page *s = ttm_tt_get_page(ttm, page);
        void *src;
                return -ENOMEM;
  
        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-       src = kmap(s);
+ #ifdef CONFIG_X86
+       src = kmap_atomic_prot(s, KM_USER0, prot);
+ #else
+       if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+               src = vmap(&s, 1, 0, prot);
+       else
+               src = kmap(s);
+ #endif
        if (!src)
                return -ENOMEM;
  
        memcpy_toio(dst, src, PAGE_SIZE);
-       kunmap(s);
+ #ifdef CONFIG_X86
+       kunmap_atomic(src, KM_USER0);
+ #else
+       if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+               vunmap(src);
+       else
+               kunmap(s);
+ #endif
        return 0;
  }
  
@@@ -214,11 -249,17 +249,17 @@@ int ttm_bo_move_memcpy(struct ttm_buffe
  
        for (i = 0; i < new_mem->num_pages; ++i) {
                page = i * dir + add;
-               if (old_iomap == NULL)
-                       ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
-               else if (new_iomap == NULL)
-                       ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
-               else
+               if (old_iomap == NULL) {
+                       pgprot_t prot = ttm_io_prot(old_mem->placement,
+                                                   PAGE_KERNEL);
+                       ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+                                                  prot);
+               } else if (new_iomap == NULL) {
+                       pgprot_t prot = ttm_io_prot(new_mem->placement,
+                                                   PAGE_KERNEL);
+                       ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+                                                  prot);
+               } else
                        ret = ttm_copy_io_page(new_iomap, old_iomap, page);
                if (ret)
                        goto out1;
@@@ -509,8 -550,8 +550,8 @@@ int ttm_bo_move_accel_cleanup(struct tt
        if (evict) {
                ret = ttm_bo_wait(bo, false, false, false);
                spin_unlock(&bo->lock);
-               driver->sync_obj_unref(&bo->sync_obj);
+               if (tmp_obj)
+                       driver->sync_obj_unref(&tmp_obj);
                if (ret)
                        return ret;
  
  
                set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
                spin_unlock(&bo->lock);
+               if (tmp_obj)
+                       driver->sync_obj_unref(&tmp_obj);
  
                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                if (ret)
@@@ -86,10 -86,16 +86,16 @@@ void ttm_tt_cache_flush(struct page *pa
        unsigned long i;
  
        for (i = 0; i < num_pages; ++i) {
-               if (pages[i]) {
-                       unsigned long start = (unsigned long)page_address(pages[i]);
-                       flush_dcache_range(start, start + PAGE_SIZE);
-               }
+               struct page *page = pages[i];
+               void *page_virtual;
+               if (unlikely(page == NULL))
+                       continue;
+               page_virtual = kmap_atomic(page, KM_USER0);
+               flush_dcache_range((unsigned long) page_virtual,
+                                  (unsigned long) page_virtual + PAGE_SIZE);
+               kunmap_atomic(page_virtual, KM_USER0);
        }
  #else
        if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
@@@ -131,10 -137,17 +137,17 @@@ static void ttm_tt_free_page_directory(
  
  static struct page *ttm_tt_alloc_page(unsigned page_flags)
  {
+       gfp_t gfp_flags = GFP_USER;
        if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
-               return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+               gfp_flags |= __GFP_ZERO;
+       if (page_flags & TTM_PAGE_FLAG_DMA32)
+               gfp_flags |= __GFP_DMA32;
+       else
+               gfp_flags |= __GFP_HIGHMEM;
  
-       return alloc_page(GFP_HIGHUSER);
+       return alloc_page(gfp_flags);
  }
  
  static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
                        set_page_dirty_lock(page);
  
                ttm->pages[i] = NULL;
 -              ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
 +              ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
                put_page(page);
        }
        ttm->state = tt_unpopulated;
  static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
  {
        struct page *p;
 -      struct ttm_bo_device *bdev = ttm->bdev;
 -      struct ttm_mem_global *mem_glob = bdev->mem_glob;
 +      struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
        int ret;
  
        while (NULL == (p = ttm->pages[index])) {
                if (!p)
                        return NULL;
  
 -              if (PageHighMem(p)) {
 -                      ret =
 -                          ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
 -                                               false, false, true);
 -                      if (unlikely(ret != 0))
 -                              goto out_err;
 +              ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
 +              if (unlikely(ret != 0))
 +                      goto out_err;
 +
 +              if (PageHighMem(p))
                        ttm->pages[--ttm->first_himem_page] = p;
 -              } else {
 -                      ret =
 -                          ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
 -                                               false, false, false);
 -                      if (unlikely(ret != 0))
 -                              goto out_err;
 +              else
                        ttm->pages[++ttm->last_lomem_page] = p;
 -              }
        }
        return p;
  out_err:
@@@ -347,8 -368,8 +360,8 @@@ static void ttm_tt_free_alloced_pages(s
                                printk(KERN_ERR TTM_PFX
                                       "Erroneous page count. "
                                       "Leaking pages.\n");
 -                      ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
 -                                          PageHighMem(cur_page));
 +                      ttm_mem_global_free_page(ttm->glob->mem_glob,
 +                                               cur_page);
                        __free_page(cur_page);
                }
        }
@@@ -393,7 -414,7 +406,7 @@@ int ttm_tt_set_user(struct ttm_tt *ttm
        struct mm_struct *mm = tsk->mm;
        int ret;
        int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
 -      struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
 +      struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
  
        BUG_ON(num_pages != ttm->num_pages);
        BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
         */
  
        ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
 -                                 false, false, false);
 +                                 false, false);
        if (unlikely(ret != 0))
                return ret;
  
  
        if (ret != num_pages && write) {
                ttm_tt_free_user_pages(ttm);
 -              ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
 +              ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
                return -ENOMEM;
        }
  
@@@ -438,7 -459,8 +451,7 @@@ struct ttm_tt *ttm_tt_create(struct ttm
        if (!ttm)
                return NULL;
  
 -      ttm->bdev = bdev;
 -
 +      ttm->glob = bdev->glob;
        ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        ttm->first_himem_page = ttm->num_pages;
        ttm->last_lomem_page = -1;
@@@ -32,7 -32,6 +32,7 @@@
  
  #include "ttm/ttm_bo_api.h"
  #include "ttm/ttm_memory.h"
 +#include "ttm/ttm_module.h"
  #include "drm_mm.h"
  #include "linux/workqueue.h"
  #include "linux/fs.h"
@@@ -122,6 -121,7 +122,7 @@@ struct ttm_backend 
  #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
  #define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
  #define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
+ #define TTM_PAGE_FLAG_DMA32           (1 << 7)
  
  enum ttm_caching_state {
        tt_uncached,
@@@ -161,7 -161,7 +162,7 @@@ struct ttm_tt 
        long last_lomem_page;
        uint32_t page_flags;
        unsigned long num_pages;
 -      struct ttm_bo_device *bdev;
 +      struct ttm_bo_global *glob;
        struct ttm_backend *be;
        struct task_struct *tsk;
        unsigned long start;
@@@ -354,75 -354,34 +355,83 @@@ struct ttm_bo_driver 
        int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
        void (*sync_obj_unref) (void **sync_obj);
        void *(*sync_obj_ref) (void *sync_obj);
+       /* hook to notify driver about a driver move so it
+        * can do tiling things */
+       void (*move_notify)(struct ttm_buffer_object *bo,
+                           struct ttm_mem_reg *new_mem);
+       /* notify the driver we are taking a fault on this BO
+        * and have reserved it */
+       void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
  };
  
 -#define TTM_NUM_MEM_TYPES 8
 +/**
 + * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
 + */
 +
 +struct ttm_bo_global_ref {
 +      struct ttm_global_reference ref;
 +      struct ttm_mem_global *mem_glob;
 +};
  
 -#define TTM_BO_PRIV_FLAG_MOVING  0    /* Buffer object is moving and needs
 -                                         idling before CPU mapping */
 -#define TTM_BO_PRIV_FLAG_MAX 1
  /**
 - * struct ttm_bo_device - Buffer object driver device-specific data.
 + * struct ttm_bo_global - Buffer object driver global data.
   *
   * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
 - * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
 - * @count: Current number of buffer object.
 - * @pages: Current number of pinned pages.
   * @dummy_read_page: Pointer to a dummy page used for mapping requests
   * of unpopulated pages.
 - * @shrink: A shrink callback object used for buffre object swap.
 + * @shrink: A shrink callback object used for buffer object swap.
   * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
   * used by a buffer object. This is excluding page arrays and backing pages.
   * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
 + * @device_list_mutex: Mutex protecting the device list.
 + * This mutex is held while traversing the device list for pm options.
 + * @lru_lock: Spinlock protecting the bo subsystem lru lists.
 + * @device_list: List of buffer object devices.
 + * @swap_lru: Lru list of buffer objects used for swapping.
 + */
 +
 +struct ttm_bo_global {
 +
 +      /**
 +       * Constant after init.
 +       */
 +
 +      struct kobject kobj;
 +      struct ttm_mem_global *mem_glob;
 +      struct page *dummy_read_page;
 +      struct ttm_mem_shrink shrink;
 +      size_t ttm_bo_extra_size;
 +      size_t ttm_bo_size;
 +      struct mutex device_list_mutex;
 +      spinlock_t lru_lock;
 +
 +      /**
 +       * Protected by device_list_mutex.
 +       */
 +      struct list_head device_list;
 +
 +      /**
 +       * Protected by the lru_lock.
 +       */
 +      struct list_head swap_lru;
 +
 +      /**
 +       * Internal protection.
 +       */
 +      atomic_t bo_count;
 +};
 +
 +
 +#define TTM_NUM_MEM_TYPES 8
 +
 +#define TTM_BO_PRIV_FLAG_MOVING  0    /* Buffer object is moving and needs
 +                                         idling before CPU mapping */
 +#define TTM_BO_PRIV_FLAG_MAX 1
 +/**
 + * struct ttm_bo_device - Buffer object driver device-specific data.
 + *
 + * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
   * @man: An array of mem_type_managers.
   * @addr_space_mm: Range manager for the device address space.
   * lru_lock: Spinlock that protects the buffer+device lru lists and
@@@ -440,21 -399,32 +449,21 @@@ struct ttm_bo_device 
        /*
         * Constant after bo device init / atomic.
         */
 -
 -      struct ttm_mem_global *mem_glob;
 +      struct list_head device_list;
 +      struct ttm_bo_global *glob;
        struct ttm_bo_driver *driver;
 -      struct page *dummy_read_page;
 -      struct ttm_mem_shrink shrink;
 -
 -      size_t ttm_bo_extra_size;
 -      size_t ttm_bo_size;
 -
        rwlock_t vm_lock;
 +      struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
        /*
         * Protected by the vm lock.
         */
 -      struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
        struct rb_root addr_space_rb;
        struct drm_mm addr_space_mm;
  
        /*
 -       * Might want to change this to one lock per manager.
 -       */
 -      spinlock_t lru_lock;
 -      /*
 -       * Protected by the lru lock.
 +       * Protected by the global:lru lock.
         */
        struct list_head ddestroy;
 -      struct list_head swap_lru;
  
        /*
         * Protected by load / firstopen / lastclose /unload sync.
         */
  
        struct delayed_work wq;
+       bool need_dma32;
  };
  
  /**
@@@ -668,9 -640,6 +679,9 @@@ extern int ttm_bo_pci_offset(struct ttm
                             unsigned long *bus_offset,
                             unsigned long *bus_size);
  
 +extern void ttm_bo_global_release(struct ttm_global_reference *ref);
 +extern int ttm_bo_global_init(struct ttm_global_reference *ref);
 +
  extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
  
  /**
   * !0: Failure.
   */
  extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
 -                            struct ttm_mem_global *mem_glob,
 +                            struct ttm_bo_global *glob,
                              struct ttm_bo_driver *driver,
-                             uint64_t file_page_offset);
+                             uint64_t file_page_offset, bool need_dma32);
+ /**
+  * ttm_bo_unmap_virtual
+  *
+  * @bo: tear down the virtual mappings for this BO
+  */
+ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
  
  /**
   * ttm_bo_reserve:
@@@ -32,9 -32,8 +32,9 @@@
  #define _TTM_MODULE_H_
  
  #include <linux/kernel.h>
 +struct kobject;
  
- #define TTM_PFX "[TTM]"
+ #define TTM_PFX "[TTM] "
  
  enum ttm_global_types {
        TTM_GLOBAL_TTM_MEM = 0,
@@@ -55,6 -54,5 +55,6 @@@ extern void ttm_global_init(void)
  extern void ttm_global_release(void);
  extern int ttm_global_item_ref(struct ttm_global_reference *ref);
  extern void ttm_global_item_unref(struct ttm_global_reference *ref);
 +extern struct kobject *ttm_get_kobj(void);
  
  #endif /* _TTM_MODULE_H_ */