+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_MMAP failed\n");
+ return MAP_FAILED;
+ }
+
+ addr = (void *)(uintptr_t)gem_map.addr_ptr;
+ } else {
+ struct drm_i915_gem_mmap_gtt gem_map;
+ memset(&gem_map, 0, sizeof(gem_map));
+
+ gem_map.handle = bo->handles[0].u32;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
+ return MAP_FAILED;
+ }
+
+ addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ gem_map.offset);
+ }
+
+ if (addr == MAP_FAILED) {
+ drv_log("i915 GEM mmap failed\n");
+ return addr;
+ }
+
+ vma->length = bo->total_size;
+ return addr;
+}
+
+static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ int ret;
+ struct drm_i915_gem_set_domain set_domain;
+
+ memset(&set_domain, 0, sizeof(set_domain));
+ set_domain.handle = bo->handles[0].u32;
+ if (bo->tiling == I915_TILING_NONE) {
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
+ set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+ } else {
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ }
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+ if (ret) {
+ drv_log("DRM_IOCTL_I915_GEM_SET_DOMAIN with %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i915_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ struct i915_device *i915 = bo->drv->priv;
+ if (!i915->has_llc && bo->tiling == I915_TILING_NONE)
+ i915_clflush(mapping->vma->addr, mapping->vma->length);
+
+ return 0;