LOCAL_CFLAGS += \
-DENABLE_SHADER_CACHE \
+ -D__STDC_CONSTANT_MACROS \
-D__STDC_LIMIT_MACROS \
-DHAVE___BUILTIN_EXPECT \
-DHAVE___BUILTIN_FFS \
-DHAVE___BUILTIN_UNREACHABLE \
-DHAVE_PTHREAD=1 \
-DHAVE_DLOPEN \
+ -DTEXTURE_FLOAT_ENABLED \
-fvisibility=hidden \
-Wno-sign-compare
LOCAL_CONLYFLAGS += \
-std=c99
+x86_flags := \
+ -DUSE_SSE41 \
+
+x86_64_flags := \
+ -DUSE_SSE41 \
+
ifeq ($(strip $(MESA_ENABLE_ASM)),true)
-ifeq ($(TARGET_ARCH),x86)
-LOCAL_CFLAGS += \
+x86_flags += \
-DUSE_X86_ASM \
+ -DUSE_MMX_ASM \
+ -DUSE_3DNOW_ASM \
+ -DUSE_SSE_ASM \
+
+x86_64_flags += \
+ -DUSE_X86_64_ASM \
endif
-endif
+
+LOCAL_ASFLAGS_x86 += $(x86_flags)
+LOCAL_ASFLAGS_x86_64 += $(x86_64_flags)
+LOCAL_CFLAGS_x86 += $(x86_flags)
+LOCAL_CFLAGS_x86_64 += $(x86_64_flags)
ifeq ($(MESA_ENABLE_LLVM),true)
LOCAL_CFLAGS += \
endif
LOCAL_CPPFLAGS += \
- $(if $(filter true,$(MESA_LOLLIPOP_BUILD)),-D_USING_LIBCXX) \
+ $(if $(filter true,$(MESA_LOLLIPOP_BUILD)),-std=c++11) \
-Wno-error=non-virtual-dtor \
-Wno-non-virtual-dtor
MESA_GPU_DRIVERS := $(filter-out $(invalid_drivers), $(MESA_GPU_DRIVERS))
endif
-# host and target must be the same arch to generate matypes.h
-ifeq ($(TARGET_ARCH),$(HOST_ARCH))
MESA_ENABLE_ASM := true
-else
-MESA_ENABLE_ASM := false
-endif
ifneq ($(filter $(classic_drivers), $(MESA_GPU_DRIVERS)),)
MESA_BUILD_CLASSIC := true
MESA_BUILD_GALLIUM := false
endif
-MESA_ENABLE_LLVM := $(if $(filter radeonsi,$(MESA_GPU_DRIVERS)),true,false)
+MESA_ENABLE_LLVM := $(if $(filter radeonsi swrast,$(MESA_GPU_DRIVERS)),true,false)
# add subdirectories
ifneq ($(strip $(MESA_GPU_DRIVERS)),)
$(call add-clean-step, rm -rf $(HOST_OUT_release)/*/EXECUTABLES/glsl_compiler_intermediates)
$(call add-clean-step, rm -rf $(HOST_OUT_release)/*/STATIC_LIBRARIES/libmesa_*_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/*_dri_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/*_dri_intermediates)
typedef struct __DRIconfigRec __DRIconfig;
typedef struct __DRIframebufferRec __DRIframebuffer;
typedef struct __DRIversionRec __DRIversion;
+typedef struct __DRIimageRec __DRIimage;
typedef struct __DRIcoreExtensionRec __DRIcoreExtension;
typedef struct __DRIextensionRec __DRIextension;
* conjunction with the core extension.
*/
#define __DRI_SWRAST "DRI_SWRast"
-#define __DRI_SWRAST_VERSION 4
+#define __DRI_SWRAST_VERSION 5
+struct winsys_handle;
struct __DRIswrastExtensionRec {
__DRIextension base;
const __DRIconfig ***driver_configs,
void *loaderPrivate);
+ /**
+ * create a dri image from native window system handle
+ *
+ * \since version 5
+ */
+ __DRIimage *(*createImageFromWinsys)(__DRIscreen *_screen,
+ int width, int height, int format,
+ int num_handles, struct winsys_handle *whandle,
+ void *loaderPrivate);
};
/** Common DRI function definitions, shared among DRI2 and Image extensions
#define __BLIT_FLAG_FLUSH 0x0001
#define __BLIT_FLAG_FINISH 0x0002
-typedef struct __DRIimageRec __DRIimage;
typedef struct __DRIimageExtensionRec __DRIimageExtension;
struct __DRIimageExtensionRec {
__DRIextension base;
!state->EXT_gpu_shader5_enable &&
!state->OES_gpu_shader5_enable) {
if (state->is_version(130, 300))
- _mesa_glsl_error(&loc, state,
+ _mesa_glsl_warning(&loc, state,
"sampler arrays indexed with non-constant "
"expressions are forbidden in GLSL %s "
"and later",
LOCAL_C_INCLUDES := \
$(MESA_TOP)/src/egl/main \
$(MESA_TOP)/src/egl/drivers/dri2 \
+ $(MESA_TOP)/src/gallium/include
LOCAL_STATIC_LIBRARIES := \
libmesa_loader
static const struct dri2_extension_match swrast_core_extensions[] = {
{ __DRI_TEX_BUFFER, 2, offsetof(struct dri2_egl_display, tex_buffer) },
+ { __DRI_IMAGE, 1, offsetof(struct dri2_egl_display, image) },
{ NULL, 0, 0 }
};
#include "loader.h"
#include "egl_dri2.h"
#include "egl_dri2_fallbacks.h"
+#include "state_tracker/drm_driver.h"
#include "gralloc_drm.h"
+#include "gralloc_drm_handle.h"
#define ALIGN(val, align) (((val) + (align) - 1) & ~((align) - 1))
static int
get_native_buffer_name(struct ANativeWindowBuffer *buf)
{
- return gralloc_drm_get_gem_handle(buf->handle);
+ struct gralloc_drm_handle_t *handle = gralloc_drm_handle(buf->handle);
+ return (handle) ? handle->name : 0;
}
static EGLBoolean
if (!config)
goto cleanup_surface;
- dri2_surf->dri_drawable =
- dri2_dpy->dri2->createNewDrawable(dri2_dpy->dri_screen, config,
- dri2_surf);
+ if (dri2_dpy->dri2) {
+ dri2_surf->dri_drawable =
+ dri2_dpy->dri2->createNewDrawable(dri2_dpy->dri_screen, config,
+ dri2_surf);
+ } else {
+ dri2_surf->dri_drawable =
+ dri2_dpy->swrast->createNewDrawable(dri2_dpy->dri_screen, config,
+ dri2_surf);
+ }
+
if (dri2_surf->dri_drawable == NULL) {
_eglError(EGL_BAD_ALLOC, "dri2->createNewDrawable");
goto cleanup_surface;
return (count != 0);
}
+static int swrastUpdateBuffer(struct dri2_egl_surface *dri2_surf)
+{
+ if (dri2_surf->base.Type == EGL_WINDOW_BIT) {
+ if (!dri2_surf->buffer && !droid_window_dequeue_buffer(dri2_surf)) {
+ _eglLog(_EGL_WARNING, "failed to dequeue buffer for window");
+ return 1;
+ }
+ dri2_surf->base.Width = dri2_surf->buffer->width;
+ dri2_surf->base.Height = dri2_surf->buffer->height;
+ return 0;
+ }
+ return !dri2_surf->buffer;
+}
+
+static void
+swrastGetDrawableInfo(__DRIdrawable * draw,
+ int *x, int *y, int *w, int *h,
+ void *loaderPrivate)
+{
+ struct dri2_egl_surface *dri2_surf = loaderPrivate;
+
+ swrastUpdateBuffer(dri2_surf);
+
+ *x = 0;
+ *y = 0;
+ *w = dri2_surf->base.Width;
+ *h = dri2_surf->base.Height;
+}
+
+static void
+swrastPutImage2(__DRIdrawable * draw, int op,
+ int x, int y, int w, int h, int stride,
+ char *data, void *loaderPrivate)
+{
+ struct dri2_egl_surface *dri2_surf = loaderPrivate;
+ _EGLDisplay *egl_dpy = dri2_surf->base.Resource.Display;
+ char *dstPtr, *srcPtr;
+ size_t BPerPixel, dstStride, copyWidth, xOffset;
+
+ if (swrastUpdateBuffer(dri2_surf)) {
+ return;
+ }
+
+ BPerPixel = get_format_bpp(dri2_surf->buffer->format);
+ dstStride = BPerPixel * dri2_surf->buffer->stride;
+ copyWidth = BPerPixel * w;
+ xOffset = BPerPixel * x;
+ if (stride == 0)
+ stride = copyWidth;
+
+ /* drivers expect we do these checks (and some rely on it) */
+ if (copyWidth > dstStride - xOffset)
+ copyWidth = dstStride - xOffset;
+ if (h > dri2_surf->base.Height - y)
+ h = dri2_surf->base.Height - y;
+
+ struct dri2_egl_display *dri2_dpy = dri2_egl_display(egl_dpy);
+ if (dri2_dpy->gralloc->lock(dri2_dpy->gralloc, dri2_surf->buffer->handle, GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ 0, 0, dri2_surf->buffer->width, dri2_surf->buffer->height, (void**)&dstPtr)) {
+ _eglLog(_EGL_WARNING, "can not lock window buffer");
+ return;
+ }
+
+ dstPtr += y * dstStride + xOffset;
+ srcPtr = data;
+
+ if (xOffset == 0 && copyWidth == stride && copyWidth == dstStride) {
+ memcpy(dstPtr, srcPtr, copyWidth * h);
+ } else {
+ for (; h > 0; h--) {
+ memcpy(dstPtr, srcPtr, copyWidth);
+ srcPtr += stride;
+ dstPtr += dstStride;
+ }
+ }
+
+ if (dri2_dpy->gralloc->unlock(dri2_dpy->gralloc, dri2_surf->buffer->handle)) {
+ _eglLog(_EGL_WARNING, "unlock buffer failed");
+ }
+
+ droid_window_enqueue_buffer(egl_dpy, dri2_surf);
+}
+
+static void
+swrastPutImage(__DRIdrawable * draw, int op,
+ int x, int y, int w, int h,
+ char *data, void *loaderPrivate)
+{
+ swrastPutImage2(draw, op, x, y, w, h, 0, data, loaderPrivate);
+}
+
+static void
+swrastGetImage(__DRIdrawable * read,
+ int x, int y, int w, int h,
+ char *data, void *loaderPrivate)
+{
+ struct dri2_egl_surface *dri2_surf = loaderPrivate;
+ size_t BPerPixel, srcStride, copyWidth, xOffset;
+ char *dstPtr, *srcPtr;
+
+ _eglLog(_EGL_WARNING, "calling swrastGetImage with read=%p, private=%p, w=%d, h=%d", read, loaderPrivate, w, h);
+
+ if (swrastUpdateBuffer(dri2_surf)) {
+ _eglLog(_EGL_WARNING, "swrastGetImage failed data unchanged");
+ return;
+ }
+
+ BPerPixel = get_format_bpp(dri2_surf->buffer->format);
+ srcStride = BPerPixel * dri2_surf->buffer->stride;
+ copyWidth = BPerPixel * w;
+ xOffset = BPerPixel * x;
+
+ struct dri2_egl_display *dri2_dpy = dri2_egl_display(dri2_surf->base.Resource.Display);
+ if (dri2_dpy->gralloc->lock(dri2_dpy->gralloc, dri2_surf->buffer->handle, GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ 0, 0, dri2_surf->buffer->width, dri2_surf->buffer->height, (void**)&srcPtr)) {
+ _eglLog(_EGL_WARNING, "can not lock window buffer");
+ memset(data, 0, copyWidth * h);
+ return;
+ }
+
+ srcPtr += y * srcStride + xOffset;
+ dstPtr = data;
+
+ if (xOffset == 0 && copyWidth == srcStride) {
+ memcpy(dstPtr, srcPtr, copyWidth * h);
+ } else {
+ for (; h > 0; h--) {
+ memcpy(dstPtr, srcPtr, copyWidth);
+ srcPtr += srcStride;
+ dstPtr += copyWidth;
+ }
+ }
+
+ if (dri2_dpy->gralloc->unlock(dri2_dpy->gralloc, dri2_surf->buffer->handle)) {
+ _eglLog(_EGL_WARNING, "unlock buffer failed");
+ }
+}
+
+static EGLBoolean
+swrast_swap_buffers(_EGLDriver *drv, _EGLDisplay *disp, _EGLSurface *draw)
+{
+ struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
+ struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
+
+ dri2_dpy->core->swapBuffers(dri2_surf->dri_drawable);
+
+ return EGL_TRUE;
+}
+
+static _EGLImage *
+swrast_create_image_android_native_buffer(_EGLDisplay *disp, _EGLContext *ctx,
+ struct ANativeWindowBuffer *buf)
+{
+ struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
+ struct dri2_egl_image *dri2_img;
+ struct winsys_handle whandle;
+ EGLint format;
+
+ if (ctx != NULL) {
+ /* From the EGL_ANDROID_image_native_buffer spec:
+ *
+ * * If <target> is EGL_NATIVE_BUFFER_ANDROID and <ctx> is not
+ * EGL_NO_CONTEXT, the error EGL_BAD_CONTEXT is generated.
+ */
+ _eglError(EGL_BAD_CONTEXT, "eglCreateEGLImageKHR: for "
+ "EGL_NATIVE_BUFFER_ANDROID, the context must be "
+ "EGL_NO_CONTEXT");
+ return NULL;
+ }
+
+ if (!buf || buf->common.magic != ANDROID_NATIVE_BUFFER_MAGIC ||
+ buf->common.version != sizeof(*buf)) {
+ _eglError(EGL_BAD_PARAMETER, "eglCreateEGLImageKHR");
+ return NULL;
+ }
+
+ /* see the table in droid_add_configs_for_visuals */
+ format = get_format(buf->format);
+ if (format < 0)
+ return NULL;
+
+ dri2_img = calloc(1, sizeof(*dri2_img));
+ if (!dri2_img) {
+ _eglError(EGL_BAD_ALLOC, "droid_create_image_mesa_drm");
+ return NULL;
+ }
+
+ if (!_eglInitImage(&dri2_img->base, disp)) {
+ free(dri2_img);
+ return NULL;
+ }
+
+ memset(&whandle, 0, sizeof(whandle));
+ whandle.type = DRM_API_HANDLE_TYPE_BUFFER;
+ whandle.external_buffer = buf;
+ whandle.stride = buf->stride * get_format_bpp(buf->format);
+
+ dri2_img->dri_image =
+ dri2_dpy->swrast->createImageFromWinsys(dri2_dpy->dri_screen,
+ buf->width,
+ buf->height,
+ format,
+ 1, &whandle,
+ dri2_img);
+
+ if (!dri2_img->dri_image) {
+ free(dri2_img);
+ _eglError(EGL_BAD_ALLOC, "droid_create_image_mesa_drm");
+ return NULL;
+ }
+
+ return &dri2_img->base;
+}
+
+static _EGLImage *
+swrast_create_image_khr(_EGLDriver *drv, _EGLDisplay *disp,
+ _EGLContext *ctx, EGLenum target,
+ EGLClientBuffer buffer, const EGLint *attr_list)
+{
+ switch (target) {
+ case EGL_NATIVE_BUFFER_ANDROID:
+ return swrast_create_image_android_native_buffer(disp, ctx,
+ (struct ANativeWindowBuffer *) buffer);
+ default:
+ return dri2_create_image_khr(drv, disp, ctx, target, buffer, attr_list);
+ }
+}
+
static int
droid_open_device(struct dri2_egl_display *dri2_dpy)
{
.flushFrontBuffer = droid_flush_front_buffer,
};
+static const __DRIswrastLoaderExtension droid_swrast_loader_extension = {
+ .base = { __DRI_SWRAST_LOADER, 2 },
+
+ .getDrawableInfo = swrastGetDrawableInfo,
+ .putImage = swrastPutImage,
+ .getImage = swrastGetImage,
+ .putImage2 = swrastPutImage2,
+};
+
static const __DRIextension *droid_dri2_loader_extensions[] = {
&droid_dri2_loader_extension.base,
&image_lookup_extension.base,
NULL,
};
-EGLBoolean
-dri2_initialize_android(_EGLDriver *drv, _EGLDisplay *dpy)
+static const __DRIextension *droid_swrast_loader_extensions[] = {
+ &droid_swrast_loader_extension.base,
+ &image_lookup_extension.base,
+ NULL,
+};
+
+static struct dri2_egl_display *alloc_dri2_egl_display(bool is_swrast)
+{
+ struct dri2_egl_display *dri2_dpy = NULL;
+ const hw_module_t *mod;
+
+ if (!hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &mod)) {
+ gralloc_module_t *gr_mod = (gralloc_module_t *) mod;
+ if (is_swrast || gr_mod->perform) {
+ dri2_dpy = calloc(1, sizeof(*dri2_dpy));
+ if (dri2_dpy)
+ dri2_dpy->gralloc = gr_mod;
+ else
+ _eglError(EGL_BAD_ALLOC, "eglInitialize");
+ }
+ } else {
+ _eglLog(_EGL_FATAL, "DRI2: failed to get gralloc module");
+ }
+
+ return dri2_dpy;
+}
+
+static EGLBoolean
+dri2_initialize_android_drm(_EGLDriver *drv, _EGLDisplay *dpy)
{
struct dri2_egl_display *dri2_dpy;
const char *err;
int ret;
- _eglSetLogProc(droid_log);
-
- loader_set_logger(_eglLog);
-
- dri2_dpy = calloc(1, sizeof(*dri2_dpy));
+ dri2_dpy = alloc_dri2_egl_display(false);
if (!dri2_dpy)
- return _eglError(EGL_BAD_ALLOC, "eglInitialize");
-
- ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID,
- (const hw_module_t **)&dri2_dpy->gralloc);
- if (ret) {
- err = "DRI2: failed to get gralloc module";
- goto cleanup_display;
- }
+ return _eglError(EGL_NOT_INITIALIZED, "eglInitialize");
dpy->DriverData = (void *) dri2_dpy;
return _eglError(EGL_NOT_INITIALIZED, err);
}
+
+/* differs with droid_display_vtbl in create_image, swap_buffers */
+static struct dri2_egl_display_vtbl swrast_display_vtbl = {
+ .authenticate = NULL,
+ .create_window_surface = droid_create_window_surface,
+ .create_pixmap_surface = dri2_fallback_create_pixmap_surface,
+ .create_pbuffer_surface = droid_create_pbuffer_surface,
+ .destroy_surface = droid_destroy_surface,
+ .create_image = swrast_create_image_khr,
+ .swap_interval = dri2_fallback_swap_interval,
+ .swap_buffers = swrast_swap_buffers,
+ .swap_buffers_with_damage = dri2_fallback_swap_buffers_with_damage,
+ .swap_buffers_region = dri2_fallback_swap_buffers_region,
+ .post_sub_buffer = dri2_fallback_post_sub_buffer,
+ .copy_buffers = dri2_fallback_copy_buffers,
+ .query_buffer_age = dri2_fallback_query_buffer_age,
+ .create_wayland_buffer_from_image = dri2_fallback_create_wayland_buffer_from_image,
+ .get_sync_values = dri2_fallback_get_sync_values,
+ .get_dri_drawable = dri2_surface_get_dri_drawable,
+};
+
+static EGLBoolean
+dri2_initialize_android_swrast(_EGLDriver *drv, _EGLDisplay *dpy)
+{
+ struct dri2_egl_display *dri2_dpy;
+ const char *err = "";
+ const hw_module_t *mod;
+
+ dri2_dpy = alloc_dri2_egl_display(true);
+ if (!dri2_dpy)
+ return _eglError(EGL_BAD_ALLOC, "eglInitialize");
+
+ dpy->DriverData = (void *) dri2_dpy;
+
+ dri2_dpy->driver_name = strdup("swrast");
+ if (!dri2_load_driver_swrast(dpy)) {
+ err = "DRISW: failed to load swrast driver";
+ goto cleanup_driver_name;
+ }
+
+ dri2_dpy->loader_extensions = droid_swrast_loader_extensions;
+
+ if (!dri2_create_screen(dpy)) {
+ err = "DRISW: failed to create screen";
+ goto cleanup_driver;
+ }
+
+ if (!droid_add_configs_for_visuals(drv, dpy)) {
+ err = "DRISW: failed to add configs";
+ goto cleanup_screen;
+ }
+
+ dpy->Extensions.ANDROID_framebuffer_target = EGL_TRUE;
+ dpy->Extensions.ANDROID_image_native_buffer = EGL_TRUE;
+ dpy->Extensions.ANDROID_recordable = EGL_TRUE;
+ dpy->Extensions.KHR_image_base = EGL_TRUE;
+
+ /* Fill vtbl last to prevent accidentally calling virtual function during
+ * initialization.
+ */
+ dri2_dpy->vtbl = &swrast_display_vtbl;
+
+ return EGL_TRUE;
+
+cleanup_screen:
+ dri2_dpy->core->destroyScreen(dri2_dpy->dri_screen);
+cleanup_driver:
+ dlclose(dri2_dpy->driver);
+cleanup_driver_name:
+ free(dri2_dpy->driver_name);
+ free(dri2_dpy);
+
+ return _eglError(EGL_NOT_INITIALIZED, err);
+}
+
+EGLBoolean
+dri2_initialize_android(_EGLDriver *drv, _EGLDisplay *dpy)
+{
+ EGLBoolean initialized = EGL_FALSE;
+
+ _eglSetLogProc(droid_log);
+
+ loader_set_logger(_eglLog);
+
+ if (!getenv("LIBGL_ALWAYS_SOFTWARE"))
+ initialized = dri2_initialize_android_drm(drv, dpy);
+
+ if (!initialized)
+ initialized = dri2_initialize_android_swrast(drv, dpy);
+
+ return initialized;
+}
# swrast
ifneq ($(filter swrast,$(MESA_GPU_DRIVERS)),)
-SUBDIRS += winsys/sw/dri drivers/softpipe
+SUBDIRS += winsys/sw/dri drivers/llvmpipe drivers/softpipe
endif
# freedreno
bool
pipe_loader_drm_probe_fd(struct pipe_loader_device **dev, int fd);
+struct pipe_screen *
+load_pipe_screen(struct pipe_loader_device **dev, int fd);
+
#ifdef __cplusplus
}
#endif
.configuration = pipe_loader_drm_configuration,
.release = pipe_loader_drm_release
};
+
+PUBLIC struct pipe_screen *load_pipe_screen(struct pipe_loader_device **dev, int fd)
+{
+ struct pipe_screen *pscreen = NULL;
+ if (pipe_loader_drm_probe_fd(dev, fd)) {
+ pscreen = pipe_loader_create_screen(*dev);
+ }
+ return pscreen;
+}
--- /dev/null
+# Mesa 3-D graphics library
+#
+# Copyright (C) 2015-2016 Zhen Wu <wuzhen@jidemail.com>
+# Copyright (C) 2015-2016 Jide Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+LOCAL_PATH := $(call my-dir)
+
+# get C_SOURCES
+include $(LOCAL_PATH)/Makefile.sources
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := $(C_SOURCES)
+
+LOCAL_MODULE := libmesa_pipe_llvmpipe
+
+include $(GALLIUM_COMMON_MK)
+include $(BUILD_STATIC_LIBRARY)
inline void
nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
{
+ if (buf->fence)
+ pipe_mutex_lock(buf->fence->screen->push_mutex);
if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo);
buf->bo = NULL;
if (buf->mm)
release_allocation(&buf->mm, buf->fence);
+ if (buf->fence)
+ pipe_mutex_unlock(buf->fence->screen->push_mutex);
if (buf->domain == NOUVEAU_BO_VRAM)
NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
struct pipe_transfer **ptransfer)
{
struct nouveau_context *nv = nouveau_context(pipe);
+ struct nouveau_screen *screen = nv->screen;
struct nv04_resource *buf = nv04_resource(resource);
struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
uint8_t *map;
buf->data = NULL;
}
nouveau_transfer_staging(nv, tx, false);
+ pipe_mutex_lock(screen->push_mutex);
nouveau_transfer_read(nv, tx);
+ pipe_mutex_unlock(screen->push_mutex);
} else {
/* The buffer is currently idle. Create a staging area for writes,
* and make sure that the cached data is up-to-date. */
if (usage & PIPE_TRANSFER_WRITE)
nouveau_transfer_staging(nv, tx, true);
- if (!buf->data)
+ if (!buf->data) {
+ pipe_mutex_lock(screen->push_mutex);
nouveau_buffer_cache(nv, buf);
+ pipe_mutex_unlock(screen->push_mutex);
+ }
}
}
return buf->data ? (buf->data + box->x) : tx->map;
PIPE_TRANSFER_PERSISTENT))) {
/* Discarding was not possible, must sync because
* subsequent transfers might use UNSYNCHRONIZED. */
+ pipe_mutex_lock(screen->push_mutex);
nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
+ pipe_mutex_unlock(screen->push_mutex);
} else
if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
/* The whole range is being discarded, so it doesn't matter what was
map = tx->map;
} else
if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
- if (usage & PIPE_TRANSFER_DONTBLOCK)
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
map = NULL;
- else
+ } else {
+ pipe_mutex_lock(screen->push_mutex);
nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
+ pipe_mutex_unlock(screen->push_mutex);
+ }
} else {
/* It is expected that the returned buffer be a representation of the
* data in question, so we must copy it over from the buffer. */
{
struct nouveau_transfer *tx = nouveau_transfer(transfer);
struct nv04_resource *buf = nv04_resource(transfer->resource);
+ struct nouveau_screen *screen = nouveau_context(pipe)->screen;
- if (tx->map)
+ if (tx->map) {
+ pipe_mutex_lock(screen->push_mutex);
nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
+ pipe_mutex_unlock(screen->push_mutex);
+ }
util_range_add(&buf->valid_buffer_range,
tx->base.box.x + box->x,
struct nouveau_context *nv = nouveau_context(pipe);
struct nouveau_transfer *tx = nouveau_transfer(transfer);
struct nv04_resource *buf = nv04_resource(transfer->resource);
+ struct nouveau_screen *screen = nouveau_context(pipe)->screen;
if (tx->base.usage & PIPE_TRANSFER_WRITE) {
if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
- if (tx->map)
+ if (tx->map) {
+ pipe_mutex_lock(screen->push_mutex);
nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
+ pipe_mutex_unlock(screen->push_mutex);
+ }
util_range_add(&buf->valid_buffer_range,
tx->base.box.x, tx->base.box.x + tx->base.box.width);
++fence->ref;
+ pipe_mutex_lock(screen->fence.list_mutex);
if (screen->fence.tail)
screen->fence.tail->next = fence;
else
screen->fence.head = fence;
screen->fence.tail = fence;
+ pipe_mutex_unlock(screen->fence.list_mutex);
screen->fence.emit(&screen->base, &fence->sequence);
struct nouveau_fence *it;
struct nouveau_screen *screen = fence->screen;
+ /* XXX This can race against fence_update. But fence_update can also call
+ * into this, so ... be have to be careful.
+ */
if (fence->state == NOUVEAU_FENCE_STATE_EMITTED ||
fence->state == NOUVEAU_FENCE_STATE_FLUSHED) {
if (fence == screen->fence.head) {
return;
screen->fence.sequence_ack = sequence;
+ pipe_mutex_lock(screen->fence.list_mutex);
for (fence = screen->fence.head; fence; fence = next) {
next = fence->next;
sequence = fence->sequence;
if (fence->state == NOUVEAU_FENCE_STATE_EMITTED)
fence->state = NOUVEAU_FENCE_STATE_FLUSHED;
}
+ pipe_mutex_unlock(screen->fence.list_mutex);
}
#define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
uint32_t spins = 0;
int64_t start = 0;
+ /* Fast-path for the case where the fence is already signaled to avoid
+ * messing around with mutexes and timing.
+ */
+ if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
+ return true;
+
if (debug && debug->debug_message)
start = os_time_get_nano();
if (!nouveau_fence_kick(fence))
return false;
+ pipe_mutex_unlock(screen->push_mutex);
+
do {
if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
if (debug && debug->debug_message)
pipe_debug_message(debug, PERF_INFO,
"stalled %.3f ms waiting for fence",
(os_time_get_nano() - start) / 1000000.f);
+ pipe_mutex_lock(screen->push_mutex);
return true;
}
if (!spins)
fence->sequence,
screen->fence.sequence_ack, screen->fence.sequence);
+ pipe_mutex_lock(screen->push_mutex);
+
return false;
}
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
+#include "util/u_atomic.h"
#include "util/u_inlines.h"
#include "util/list.h"
nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref)
{
if (fence)
- ++fence->ref;
+ p_atomic_inc(&fence->ref);
if (*ref) {
- if (--(*ref)->ref == 0)
+ if (p_atomic_dec_zero(&(*ref)->ref))
nouveau_fence_del(*ref);
}
struct pipe_fence_handle *pfence,
uint64_t timeout)
{
+ bool ret;
if (!timeout)
return nouveau_fence_signalled(nouveau_fence(pfence));
- return nouveau_fence_wait(nouveau_fence(pfence), NULL);
+ pipe_mutex_lock(nouveau_screen(screen)->push_mutex);
+ ret = nouveau_fence_wait(nouveau_fence(pfence), NULL);
+ pipe_mutex_unlock(nouveau_screen(screen)->push_mutex);
+ return ret;
}
if (nv_dbg)
nouveau_mesa_debug = atoi(nv_dbg);
+ pipe_mutex_init(screen->push_mutex);
+ pipe_mutex_init(screen->fence.list_mutex);
+
/* These must be set before any failure is possible, as the cleanup
* paths assume they're responsible for deleting them.
*/
nouveau_device_del(&screen->device);
nouveau_drm_del(&screen->drm);
close(fd);
+
+ pipe_mutex_destroy(screen->push_mutex);
+ pipe_mutex_destroy(screen->fence.list_mutex);
}
static void
#include "pipe/p_screen.h"
#include "util/u_memory.h"
+#include "os/os_thread.h"
#ifdef DEBUG
# define NOUVEAU_ENABLE_DRIVER_STATISTICS
struct nouveau_object *channel;
struct nouveau_client *client;
struct nouveau_pushbuf *pushbuf;
+ pipe_mutex push_mutex;
int refcount;
struct nouveau_fence *head;
struct nouveau_fence *tail;
struct nouveau_fence *current;
+ pipe_mutex list_mutex;
u32 sequence;
u32 sequence_ack;
void (*emit)(struct pipe_screen *, u32 *sequence);
struct pipe_framebuffer_state *fb = &nv30->framebuffer;
uint32_t colr = 0, zeta = 0, mode = 0;
- if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, true))
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
+ if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, true)) {
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
+ }
if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) {
colr = pack_rgba(fb->cbufs[0]->format, color->f);
PUSH_DATA (push, mode);
nv30_state_release(nv30);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
}
static void
rt_format |= NV30_3D_RT_FORMAT_TYPE_LINEAR;
}
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
+
refn.bo = mt->base.bo;
refn.flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_WR;
if (nouveau_pushbuf_space(push, 32, 1, 0) ||
- nouveau_pushbuf_refn (push, &refn, 1))
+ nouveau_pushbuf_refn (push, &refn, 1)) {
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
+ }
BEGIN_NV04(push, NV30_3D(RT_ENABLE), 1);
PUSH_DATA (push, NV30_3D_RT_ENABLE_COLOR0);
NV30_3D_CLEAR_BUFFERS_COLOR_B |
NV30_3D_CLEAR_BUFFERS_COLOR_A);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
+
nv30->dirty |= NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR;
}
if (buffers & PIPE_CLEAR_STENCIL)
mode |= NV30_3D_CLEAR_BUFFERS_STENCIL;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
+
refn.bo = mt->base.bo;
refn.flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_WR;
if (nouveau_pushbuf_space(push, 32, 1, 0) ||
- nouveau_pushbuf_refn (push, &refn, 1))
+ nouveau_pushbuf_refn (push, &refn, 1)) {
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
+ }
BEGIN_NV04(push, NV30_3D(RT_ENABLE), 1);
PUSH_DATA (push, 0);
BEGIN_NV04(push, NV30_3D(CLEAR_BUFFERS), 1);
PUSH_DATA (push, mode);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
+
nv30->dirty |= NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR;
}
if (!nv30)
return NULL;
+ pipe_mutex_lock(screen->base.push_mutex);
+
nv30->screen = screen;
nv30->base.screen = &screen->base;
nv30->base.copy_data = nv30_transfer_copy_data;
ret = nouveau_bufctx_new(nv30->base.client, 64, &nv30->bufctx);
if (ret) {
nv30_context_destroy(pipe);
+ pipe_mutex_unlock(screen->base.push_mutex);
return NULL;
}
nv30->blitter = util_blitter_create(pipe);
if (!nv30->blitter) {
nv30_context_destroy(pipe);
+ pipe_mutex_unlock(screen->base.push_mutex);
return NULL;
}
nouveau_context_init_vdec(&nv30->base);
+ pipe_mutex_unlock(screen->base.push_mutex);
+
return pipe;
}
struct nouveau_pushbuf *push = nv30->screen->base.pushbuf;
unsigned i;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
+
BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs);
for (i = 0; i < r->vertex_info.num_attribs; i++) {
PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP,
NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1);
}
- if (!nv30_state_validate(nv30, ~0, false))
+ if (!nv30_state_validate(nv30, ~0, false)) {
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
+ }
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (push, r->prim);
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
PUSH_RESET(push, BUFCTX_VTXTMP);
+
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
}
static void
unsigned ps = fn + (pn ? 1 : 0);
unsigned i;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
+
BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs);
for (i = 0; i < r->vertex_info.num_attribs; i++) {
PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP,
NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1);
}
- if (!nv30_state_validate(nv30, ~0, false))
+ if (!nv30_state_validate(nv30, ~0, false)) {
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
+ }
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (push, r->prim);
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
PUSH_RESET(push, BUFCTX_VTXTMP);
+
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
}
static void
nv30_render_validate(nv30);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
+
if (nv30->draw_dirty & NV30_NEW_VIEWPORT)
draw_set_viewport_states(draw, 0, 1, &nv30->viewport);
if (nv30->draw_dirty & NV30_NEW_RASTERIZER)
if (transfer[i])
pipe_buffer_unmap(pipe, transfer[i]);
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
+
nv30->draw_dirty = 0;
nv30_state_release(nv30);
}
struct nv30_fragprog *fp = nv30->fragprog.program;
struct pipe_context *pipe = &nv30->base.pipe;
+ pipe_mutex_unlock(nv->screen->push_mutex);
+
if (unlikely(!fp->buffer))
fp->buffer = pipe_buffer_create(pipe->screen, 0, 0, fp->insn_len * 4);
if (nv04_resource(fp->buffer)->domain != NOUVEAU_BO_VRAM)
nouveau_buffer_migrate(nv, nv04_resource(fp->buffer), NOUVEAU_BO_VRAM);
+
+ pipe_mutex_lock(nv->screen->push_mutex);
}
void
struct nv30_context *nv30 = nv30_context(pipe);
struct nv30_rect src, dst;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
if (dstres->target == PIPE_BUFFER && srcres->target == PIPE_BUFFER) {
nouveau_copy_buffer(&nv30->base,
nv04_resource(dstres), dstx,
nv04_resource(srcres), src_box->x, src_box->width);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
}
src_box->width, src_box->height, &dst);
nv30_transfer_rect(nv30, NEAREST, &src, &dst);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
}
static void
y1 = src.y1;
/* On nv3x we must use sifm which is restricted to 1024x1024 tiles */
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
for (y = src.y0; y < y1; y += h) {
h = y1 - y;
if (h > 1024)
nv30_transfer_rect(nv30, BILINEAR, &src, &dst);
}
}
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
}
void
tx->tmp.y1 = tx->tmp.h;
tx->tmp.z = 0;
- if (usage & PIPE_TRANSFER_READ)
+ if (usage & PIPE_TRANSFER_READ) {
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
nv30_transfer_rect(nv30, NEAREST, &tx->img, &tx->tmp);
+ PUSH_KICK(nv30->base.pushbuf);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
+ }
if (tx->tmp.bo->map) {
*ptransfer = &tx->base;
struct nv30_transfer *tx = nv30_transfer(ptx);
if (ptx->usage & PIPE_TRANSFER_WRITE) {
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
nv30_transfer_rect(nv30, NEAREST, &tx->tmp, &tx->img);
/* Allow the copies above to finish executing before freeing the source */
nouveau_fence_work(nv30->screen->base.fence.current,
nouveau_fence_unref_bo, tx->tmp.bo);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
} else {
nouveau_bo_ref(NULL, &tx->tmp.bo);
}
struct nv30_query *q = nv30_query(pq);
struct nouveau_pushbuf *push = nv30->base.pushbuf;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
switch (q->type) {
case PIPE_QUERY_TIME_ELAPSED:
q->qo[0] = nv30_query_object_new(nv30->screen);
}
break;
case PIPE_QUERY_TIMESTAMP:
- return true;
+ break;
default:
BEGIN_NV04(push, NV30_3D(QUERY_RESET), 1);
PUSH_DATA (push, q->report);
BEGIN_NV04(push, SUBC_3D(q->enable), 1);
PUSH_DATA (push, 1);
}
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return true;
}
struct nv30_query *q = nv30_query(pq);
struct nouveau_pushbuf *push = nv30->base.pushbuf;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
q->qo[1] = nv30_query_object_new(screen);
if (q->qo[1]) {
BEGIN_NV04(push, NV30_3D(QUERY_GET), 1);
PUSH_DATA (push, 0);
}
PUSH_KICK (push);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return true;
}
nv30->render_cond_mode = mode;
nv30->render_cond_cond = condition;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
if (!pq) {
BEGIN_NV04(push, SUBC_3D(0x1e98), 1);
PUSH_DATA (push, 0x01000000);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
}
BEGIN_NV04(push, SUBC_3D(0x1e98), 1);
PUSH_DATA (push, 0x02000000 | q->qo[1]->hw->start);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
}
static void
if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
nv30->dirty |= NV30_NEW_ARRAYS;
+ pipe_mutex_lock(nv30->screen->base.push_mutex);
+
push->user_priv = &nv30->bufctx;
if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
nv30_update_user_vbufs(nv30);
nv30_state_validate(nv30, ~0, true);
if (nv30->draw_flags) {
nv30_render_vbo(pipe, info);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
} else
if (nv30->vbo_fifo) {
nv30_push_vbo(nv30, info);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
return;
}
nv30_state_release(nv30);
nv30_release_user_vbufs(nv30);
+ pipe_mutex_unlock(nv30->screen->base.push_mutex);
}
void
struct nv50_program *cp = nv50->compprog;
bool ret;
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
ret = !nv50_state_validate_cp(nv50, ~0);
if (ret) {
NOUVEAU_ERR("Failed to launch grid !\n");
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
}
BEGIN_NV04(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 1);
PUSH_DATA (push, 0);
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
+
/* bind a compute shader clobbers fragment shader state */
nv50->dirty_3d |= NV50_NEW_3D_FRAGPROG;
}
if (fence)
nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);
+ pipe_mutex_lock(screen->push_mutex);
PUSH_KICK(screen->pushbuf);
+ pipe_mutex_unlock(screen->push_mutex);
nouveau_context_update_frame_stats(nouveau_context(pipe));
}
{
struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
+ pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
PUSH_DATA (push, 0);
BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1);
PUSH_DATA (push, 0x20);
+ pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
}
static void
data_words = string_words;
else
data_words = string_words + !!(len & 3);
+ pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
BEGIN_NI04(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
if (string_words)
PUSH_DATAp(push, str, string_words);
memcpy(&data, &str[string_words * 4], len & 3);
PUSH_DATA (push, data);
}
+ pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
}
void
return NULL;
pipe = &nv50->base.pipe;
+ pipe_mutex_lock(screen->base.push_mutex);
+
if (!nv50_blitctx_create(nv50))
goto out_err;
util_dynarray_init(&nv50->global_residents);
+ pipe_mutex_unlock(screen->base.push_mutex);
+
return pipe;
out_err:
+ pipe_mutex_unlock(screen->base.push_mutex);
if (nv50->bufctx_3d)
nouveau_bufctx_del(&nv50->bufctx_3d);
if (nv50->bufctx_cp)
/* nv50_draw.c */
extern struct draw_stage *nv50_draw_render_stage(struct nv50_context *);
+/* nv50_query.c */
+void nv50_render_condition(struct pipe_context *pipe,
+ struct pipe_query *pq,
+ boolean condition, uint mode);
+
/* nv50_shader_state.c */
void nv50_vertprog_validate(struct nv50_context *);
void nv50_gmtyprog_validate(struct nv50_context *);
{
struct nv50_miptree *mt = nv50_miptree(pt);
- if (mt->base.fence && mt->base.fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
+ if (mt->base.fence && mt->base.fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
+ pipe_mutex_lock(nouveau_screen(pscreen)->push_mutex);
nouveau_fence_work(mt->base.fence, nouveau_fence_unref_bo, mt->base.bo);
- else
+ pipe_mutex_unlock(nouveau_screen(pscreen)->push_mutex);
+ } else {
nouveau_bo_ref(NULL, &mt->base.bo);
+ }
nouveau_fence_ref(NULL, &mt->base.fence);
nouveau_fence_ref(NULL, &mt->base.fence_wr);
return q->funcs->get_query_result(nv50_context(pipe), q, wait, result);
}
-static void
+void
nv50_render_condition(struct pipe_context *pipe,
struct pipe_query *pq,
boolean condition, uint mode)
}
static void
+nv50_render_condition_locked(struct pipe_context *pipe,
+ struct pipe_query *pq,
+ boolean condition, uint mode)
+{
+ pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
+ nv50_render_condition(pipe, pq, condition, mode);
+ pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
+}
+
+static void
nv50_set_active_query_state(struct pipe_context *pipe, boolean enable)
{
}
pipe->end_query = nv50_end_query;
pipe->get_query_result = nv50_get_query_result;
pipe->set_active_query_state = nv50_set_active_query_state;
- pipe->render_condition = nv50_render_condition;
+ pipe->render_condition = nv50_render_condition_locked;
nv50->cond_condmode = NV50_3D_COND_MODE_ALWAYS;
}
if (hq->mm) {
if (hq->state == NV50_HW_QUERY_STATE_READY)
nouveau_mm_free(hq->mm);
- else
+ else {
+ pipe_mutex_lock(screen->base.push_mutex);
nouveau_fence_work(screen->base.fence.current,
nouveau_mm_free_work, hq->mm);
+ pipe_mutex_unlock(screen->base.push_mutex);
+ }
}
}
if (size) {
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
struct nv50_hw_query *hq = nv50_hw_query(q);
+ bool ret = true;
if (hq->funcs && hq->funcs->begin_query)
return hq->funcs->begin_query(nv50, hq);
if (!hq->is64bit)
hq->data[0] = hq->sequence++; /* the previously used one */
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
break;
default:
assert(0);
- return false;
+ ret = false;
+ break;
}
- hq->state = NV50_HW_QUERY_STATE_ACTIVE;
- return true;
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
+ if (ret)
+ hq->state = NV50_HW_QUERY_STATE_ACTIVE;
+ return ret;
}
static void
hq->state = NV50_HW_QUERY_STATE_ENDED;
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
assert(0);
break;
}
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
if (hq->is64bit)
nouveau_fence_ref(nv50->screen->base.fence.current, &hq->fence);
}
nv50_hw_query_update(q);
if (hq->state != NV50_HW_QUERY_STATE_READY) {
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
if (!wait) {
/* for broken apps that spin on GL_QUERY_RESULT_AVAILABLE */
if (hq->state != NV50_HW_QUERY_STATE_FLUSHED) {
hq->state = NV50_HW_QUERY_STATE_FLUSHED;
PUSH_KICK(nv50->base.pushbuf);
}
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return false;
}
- if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nv50->screen->base.client))
+ if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nv50->screen->base.client)) {
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return false;
+ }
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
}
hq->state = NV50_HW_QUERY_STATE_READY;
return false;
}
+ pipe_mutex_lock(screen->base.push_mutex);
assert(cfg->num_counters <= 4);
PUSH_SPACE(push, 4 * 4);
BEGIN_NV04(push, NV50_CP(MP_PM_SET(c)), 1);
PUSH_DATA (push, 0);
}
+ pipe_mutex_unlock(screen->base.push_mutex);
return true;
}
screen->pm.prog = prog;
}
+ pipe_mutex_lock(screen->base.push_mutex);
/* disable all counting */
PUSH_SPACE(push, 8);
for (c = 0; c < 4; c++) {
PUSH_SPACE(push, 2);
BEGIN_NV04(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 1);
PUSH_DATA (push, 0);
+ pipe_mutex_unlock(screen->base.push_mutex);
pipe->bind_compute_state(pipe, screen->pm.prog);
input[0] = hq->bo->offset + hq->base_offset;
nouveau_bufctx_reset(nv50->bufctx_cp, NV50_BIND_CP_QUERY);
+ pipe_mutex_lock(screen->base.push_mutex);
/* re-active other counters */
PUSH_SPACE(push, 8);
mask = 0;
| cfg->ctr[i].unit | cfg->ctr[i].mode);
}
}
+ pipe_mutex_unlock(screen->base.push_mutex);
}
static inline bool
cfg = nv50_hw_sm_query_get_cfg(nv50, hq);
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
ret = nv50_hw_sm_query_read_data(count, nv50, wait, hq, cfg, mp_count);
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
if (!ret)
return false;
bool m2mf;
unsigned dst_layer = dstz, src_layer = src_box->z;
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
+
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
nouveau_copy_buffer(&nv50->base,
nv04_resource(dst), dstx,
nv04_resource(src), src_box->x, src_box->width);
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
}
else
srect.base += src_mt->layer_stride;
}
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
}
break;
}
nouveau_bufctx_reset(nv50->bufctx, NV50_BIND_2D);
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
}
static void
assert(dst->texture->target != PIPE_BUFFER);
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
+
BEGIN_NV04(push, NV50_3D(CLEAR_COLOR(0)), 4);
PUSH_DATAf(push, color->f[0]);
PUSH_DATAf(push, color->f[1]);
PUSH_DATAf(push, color->f[2]);
PUSH_DATAf(push, color->f[3]);
- if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0))
+ if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0)) {
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
+ }
PUSH_REFN(push, bo, mt->base.domain | NOUVEAU_BO_WR);
PUSH_DATA (push, nv50->cond_condmode);
}
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
+
nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_SCISSOR;
}
assert(dst->texture->target != PIPE_BUFFER);
assert(nouveau_bo_memtype(bo)); /* ZETA cannot be linear */
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
+
if (clear_flags & PIPE_CLEAR_DEPTH) {
BEGIN_NV04(push, NV50_3D(CLEAR_DEPTH), 1);
PUSH_DATAf(push, depth);
mode |= NV50_3D_CLEAR_BUFFERS_S;
}
- if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0))
+ if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0)) {
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
+ }
PUSH_REFN(push, bo, mt->base.domain | NOUVEAU_BO_WR);
PUSH_DATA (push, nv50->cond_condmode);
}
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
+
nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_SCISSOR;
}
unsigned i, j, k;
uint32_t mode = 0;
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
/* don't need NEW_BLEND, COLOR_MASK doesn't affect CLEAR_BUFFERS */
- if (!nv50_state_validate_3d(nv50, NV50_NEW_3D_FRAMEBUFFER))
+ if (!nv50_state_validate_3d(nv50, NV50_NEW_3D_FRAMEBUFFER)) {
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
+ }
/* We have to clear ALL of the layers, not up to the min number of layers
* of any attachment. */
/* restore the array mode */
BEGIN_NV04(push, NV50_3D(RT_ARRAY_MODE), 1);
PUSH_DATA (push, nv50->rt_array_mode);
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
}
static void
assert(size % data_size == 0);
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
+
if (offset & 0xff) {
unsigned fixup_size = MIN2(size, align(offset, 0x100) - offset);
assert(fixup_size % data_size == 0);
nv50_clear_buffer_push(pipe, res, offset, fixup_size, data, data_size);
offset += fixup_size;
size -= fixup_size;
- if (!size)
+ if (!size) {
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
+ }
}
elements = size / data_size;
PUSH_DATAf(push, color.f[2]);
PUSH_DATAf(push, color.f[3]);
- if (nouveau_pushbuf_space(push, 64, 1, 0))
+ if (nouveau_pushbuf_space(push, 64, 1, 0)) {
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
+ }
PUSH_REFN(push, buf->bo, buf->domain | NOUVEAU_BO_WR);
data, data_size);
}
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
+
nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_SCISSOR;
}
info->src.box.height != -info->dst.box.height))
eng3d = true;
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
+
if (nv50->screen->num_occlusion_queries_active) {
BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
PUSH_DATA (push, 0);
BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
PUSH_DATA (push, 1);
}
+
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
}
static void
unsigned base = tx->rect[0].base;
unsigned z = tx->rect[0].z;
unsigned i;
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
for (i = 0; i < box->depth; ++i) {
nv50_m2mf_transfer_rect(nv50, &tx->rect[1], &tx->rect[0],
tx->nblocksx, tx->nblocksy);
tx->rect[0].base += mt->layer_stride;
tx->rect[1].base += size;
}
+ /* Kick these reads out so we don't have to reacquire a lock below */
+ PUSH_KICK(nv50->base.pushbuf);
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
tx->rect[0].z = z;
tx->rect[0].base = base;
tx->rect[1].base = 0;
unsigned i;
if (tx->base.usage & PIPE_TRANSFER_WRITE) {
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
for (i = 0; i < tx->base.box.depth; ++i) {
nv50_m2mf_transfer_rect(nv50, &tx->rect[0], &tx->rect[1],
tx->nblocksx, tx->nblocksy);
/* Allow the copies above to finish executing before freeing the source */
nouveau_fence_work(nv50->screen->base.fence.current,
nouveau_fence_unref_bo, tx->rect[1].bo);
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
} else {
nouveau_bo_ref(NULL, &tx->rect[1].bo);
}
bool tex_dirty = false;
int s;
+ pipe_mutex_lock(nv50->screen->base.push_mutex);
+
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nv50->vb_elt_first = info->min_index + info->index_bias;
nv50->vb_elt_limit = info->max_index - info->min_index;
nv50_push_vbo(nv50, info);
push->kick_notify = nv50_default_kick_notify;
nouveau_pushbuf_bufctx(push, NULL);
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
return;
}
nv50_release_user_vbufs(nv50);
nouveau_pushbuf_bufctx(push, NULL);
+
+ pipe_mutex_unlock(nv50->screen->base.push_mutex);
}
nvc0_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
+ struct nvc0_screen *screen = nvc0->screen;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nvc0_program *cp = nvc0->compprog;
int ret;
+ pipe_mutex_lock(screen->base.push_mutex);
+
ret = !nvc0_state_validate_cp(nvc0, ~0);
if (ret) {
NOUVEAU_ERR("Failed to launch grid !\n");
+ pipe_mutex_unlock(screen->base.push_mutex);
return;
}
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
nvc0->dirty_cp |= NVC0_NEW_CP_SURFACES;
nvc0->images_dirty[5] |= nvc0->images_valid[5];
+
+ pipe_mutex_unlock(screen->base.push_mutex);
}
if (fence)
nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);
+ pipe_mutex_lock(screen->push_mutex);
PUSH_KICK(nvc0->base.pushbuf); /* fencing handled in kick_notify */
+ pipe_mutex_unlock(screen->push_mutex);
nouveau_context_update_frame_stats(&nvc0->base);
}
{
struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
+ pipe_mutex_lock(nvc0_context(pipe)->screen->base.push_mutex);
IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
+ pipe_mutex_unlock(nvc0_context(pipe)->screen->base.push_mutex);
}
static void
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
int i, s;
+ pipe_mutex_lock(nvc0_context(pipe)->screen->base.push_mutex);
+
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
if (!nvc0->vtxbuf[i].buffer)
nvc0->cb_dirty = true;
if (flags & (PIPE_BARRIER_VERTEX_BUFFER | PIPE_BARRIER_INDEX_BUFFER))
nvc0->base.vbo_dirty = true;
+
+ pipe_mutex_unlock(nvc0_context(pipe)->screen->base.push_mutex);
}
static void
data_words = string_words;
else
data_words = string_words + !!(len & 3);
+ pipe_mutex_lock(nvc0_context(pipe)->screen->base.push_mutex);
BEGIN_NIC0(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
if (string_words)
PUSH_DATAp(push, str, string_words);
memcpy(&data, &str[string_words * 4], len & 3);
PUSH_DATA (push, data);
}
+ pipe_mutex_unlock(nvc0_context(pipe)->screen->base.push_mutex);
}
static void
return NULL;
pipe = &nvc0->base.pipe;
+ pipe_mutex_lock(screen->base.push_mutex);
+
if (!nvc0_blitctx_create(nvc0))
goto out_err;
util_dynarray_init(&nvc0->global_residents);
+ pipe_mutex_unlock(screen->base.push_mutex);
+
return pipe;
out_err:
+ pipe_mutex_unlock(screen->base.push_mutex);
if (nvc0) {
if (nvc0->bufctx_3d)
nouveau_bufctx_del(&nvc0->bufctx_3d);
uint32_t label);
void nvc0_program_init_tcp_empty(struct nvc0_context *);
+/* nvc0_query.c */
+void nvc0_render_condition(struct pipe_context *pipe,
+ struct pipe_query *pq,
+ boolean condition, uint mode);
+
/* nvc0_shader_state.c */
void nvc0_vertprog_validate(struct nvc0_context *);
void nvc0_tctlprog_validate(struct nvc0_context *);
index, resource, offset);
}
-static void
+void
nvc0_render_condition(struct pipe_context *pipe,
struct pipe_query *pq,
boolean condition, uint mode)
}
}
+static void
+nvc0_render_condition_locked(struct pipe_context *pipe,
+ struct pipe_query *pq,
+ boolean condition, uint mode)
+{
+ pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
+ nvc0_render_condition(pipe, pq, condition, mode);
+ pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
+}
+
int
nvc0_screen_get_driver_query_info(struct pipe_screen *pscreen,
unsigned id,
pipe->get_query_result = nvc0_get_query_result;
pipe->get_query_result_resource = nvc0_get_query_result_resource;
pipe->set_active_query_state = nvc0_set_active_query_state;
- pipe->render_condition = nvc0_render_condition;
+ pipe->render_condition = nvc0_render_condition_locked;
nvc0->cond_condmode = NVC0_3D_COND_MODE_ALWAYS;
}
if (hq->mm) {
if (hq->state == NVC0_HW_QUERY_STATE_READY)
nouveau_mm_free(hq->mm);
- else
+ else {
+ pipe_mutex_lock(screen->base.push_mutex);
nouveau_fence_work(screen->base.fence.current,
nouveau_mm_free_work, hq->mm);
+ pipe_mutex_unlock(screen->base.push_mutex);
+ }
}
}
if (size) {
}
hq->sequence++;
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
default:
break;
}
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
hq->state = NVC0_HW_QUERY_STATE_ACTIVE;
return ret;
}
}
hq->state = NVC0_HW_QUERY_STATE_ENDED;
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
default:
break;
}
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
if (hq->is64bit)
nouveau_fence_ref(nvc0->screen->base.fence.current, &hq->fence);
}
nvc0_hw_query_update(nvc0->screen->base.client, q);
if (hq->state != NVC0_HW_QUERY_STATE_READY) {
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
if (!wait) {
if (hq->state != NVC0_HW_QUERY_STATE_FLUSHED) {
hq->state = NVC0_HW_QUERY_STATE_FLUSHED;
/* flush for silly apps that spin on GL_QUERY_RESULT_AVAILABLE */
PUSH_KICK(nvc0->base.pushbuf);
}
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return false;
}
- if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->screen->base.client))
+ if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->screen->base.client)) {
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return false;
+ }
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
NOUVEAU_DRV_STAT(&nvc0->screen->base, query_sync_count, 1);
}
hq->state = NVC0_HW_QUERY_STATE_READY;
assert(!hq->funcs || !hq->funcs->get_query_result);
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
+
if (index == -1) {
/* TODO: Use a macro to write the availability of the query */
if (hq->state != NVC0_HW_QUERY_STATE_READY)
nvc0->base.push_cb(&nvc0->base, buf, offset,
result_type >= PIPE_QUERY_TYPE_I64 ? 2 : 1,
ready);
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
}
4 | NVC0_IB_ENTRY_1_NO_PREFETCH);
}
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
+
if (buf->mm) {
nouveau_fence_ref(nvc0->screen->base.fence.current, &buf->fence);
nouveau_fence_ref(nvc0->screen->base.fence.current, &buf->fence_wr);
return false;
}
+ pipe_mutex_lock(screen->base.push_mutex);
assert(cfg->num_counters <= 4);
PUSH_SPACE(push, 4 * 8 * + 6);
PUSH_DATA (push, 0xff);
}
+ pipe_mutex_unlock(screen->base.push_mutex);
return true;
}
return false;
}
+ pipe_mutex_lock(screen->base.push_mutex);
assert(cfg->num_counters <= 8);
PUSH_SPACE(push, 8 * 8 + 2);
BEGIN_NVC0(push, NVC0_CP(MP_PM_SET(c)), 1);
PUSH_DATA (push, 0);
}
+ pipe_mutex_unlock(screen->base.push_mutex);
return true;
}
if (unlikely(!screen->pm.prog))
screen->pm.prog = nvc0_hw_sm_get_program(screen);
+ pipe_mutex_lock(screen->base.push_mutex);
/* disable all counting */
PUSH_SPACE(push, 8);
for (c = 0; c < 8; ++c)
/* upload input data for the compute shader which reads MP counters */
nvc0_hw_sm_upload_input(nvc0, hq);
+ pipe_mutex_unlock(screen->base.push_mutex);
pipe->bind_compute_state(pipe, screen->pm.prog);
for (i = 0; i < 3; i++) {
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_QUERY);
+ pipe_mutex_lock(screen->base.push_mutex);
/* re-activate other counters */
PUSH_SPACE(push, 16);
mask = 0;
PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
}
}
+ pipe_mutex_unlock(screen->base.push_mutex);
}
static inline bool
* _current_ one, and remove both.
*/
nouveau_fence_ref(screen->base.fence.current, ¤t);
+ pipe_mutex_lock(screen->base.push_mutex);
nouveau_fence_wait(current, NULL);
+ pipe_mutex_unlock(screen->base.push_mutex);
nouveau_fence_ref(NULL, ¤t);
nouveau_fence_ref(NULL, &screen->base.fence.current);
}
bool m2mf;
unsigned dst_layer = dstz, src_layer = src_box->z;
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
+
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
nouveau_copy_buffer(&nvc0->base,
nv04_resource(dst), dstx,
nv04_resource(src), src_box->x, src_box->width);
NOUVEAU_DRV_STAT(&nvc0->screen->base, buf_copy_bytes, src_box->width);
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
}
NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_copy_count, 1);
else
srect.base += src_mt->layer_stride;
}
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
}
break;
}
nouveau_bufctx_reset(nvc0->bufctx, 0);
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
}
static void
assert(dst->texture->target != PIPE_BUFFER);
- if (!PUSH_SPACE(push, 32 + sf->depth))
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
+
+ if (!PUSH_SPACE(push, 32 + sf->depth)) {
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
+ }
PUSH_REFN (push, res->bo, res->domain | NOUVEAU_BO_WR);
IMMED_NVC0(push, NVC0_3D(COND_MODE), nvc0->cond_condmode);
nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
+
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
}
static void
assert(size % data_size == 0);
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
+
if (data_size == 12) {
nvc0_clear_buffer_push(pipe, res, offset, size, data, data_size);
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
}
nvc0_clear_buffer_push(pipe, res, offset, fixup_size, data, data_size);
offset += fixup_size;
size -= fixup_size;
- if (!size)
+ if (!size) {
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
+ }
}
elements = size / data_size;
width &= ~0xff;
assert(width > 0);
- if (!PUSH_SPACE(push, 40))
+ if (!PUSH_SPACE(push, 40)) {
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
+ }
PUSH_REFN (push, buf->bo, buf->domain | NOUVEAU_BO_WR);
}
nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
+
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
}
static void
assert(dst->texture->target != PIPE_BUFFER);
- if (!PUSH_SPACE(push, 32 + sf->depth))
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
+ if (!PUSH_SPACE(push, 32 + sf->depth)) {
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
+ }
PUSH_REFN (push, mt->base.bo, mt->base.domain | NOUVEAU_BO_WR);
IMMED_NVC0(push, NVC0_3D(COND_MODE), nvc0->cond_condmode);
nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
+
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
}
void
unsigned i, j, k;
uint32_t mode = 0;
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
+
/* don't need NEW_BLEND, COLOR_MASK doesn't affect CLEAR_BUFFERS */
- if (!nvc0_state_validate_3d(nvc0, NVC0_NEW_3D_FRAMEBUFFER))
+ if (!nvc0_state_validate_3d(nvc0, NVC0_NEW_3D_FRAMEBUFFER)) {
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
return;
+ }
if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) {
BEGIN_NVC0(push, NVC0_3D(CLEAR_COLOR(0)), 4);
(j << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
}
}
+
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
}
nvc0->samplers_dirty[4] |= 3;
if (nvc0->cond_query && !blit->render_condition_enable)
- nvc0->base.pipe.render_condition(&nvc0->base.pipe, nvc0->cond_query,
- nvc0->cond_cond, nvc0->cond_mode);
+ nvc0_render_condition(&nvc0->base.pipe, nvc0->cond_query,
+ nvc0->cond_cond, nvc0->cond_mode);
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX_TMP);
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
if (info->num_window_rectangles > 0 || info->window_rectangle_include)
eng3d = true;
+ pipe_mutex_lock(nvc0->screen->base.push_mutex);
+
if (nvc0->screen->num_occlusion_queries_active)
IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 0);
if (nvc0->screen->num_occlusion_queries_active)
IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 1);
+ pipe_mutex_unlock(nvc0->screen->base.push_mutex);
+
NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_blit_count, 1);
}
return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
}
-void *
-nvc0_miptree_transfer_map(struct pipe_context *pctx,
- struct pipe_resource *res,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **ptransfer)
+static void *
+nvc0_miptree_transfer_map_unlocked(
+ struct pipe_context *pctx,
+ struct pipe_resource *res,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
struct nvc0_context *nvc0 = nvc0_context(pctx);
- struct nouveau_device *dev = nvc0->screen->base.device;
+ struct nvc0_screen *screen = nvc0->screen;
+ struct nouveau_device *dev = screen->base.device;
struct nv50_miptree *mt = nv50_miptree(res);
struct nvc0_transfer *tx;
uint32_t size;
return tx->rect[1].bo->map;
}
-void
-nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
- struct pipe_transfer *transfer)
+void *
+nvc0_miptree_transfer_map(
+ struct pipe_context *pctx,
+ struct pipe_resource *res,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
+{
+ struct nvc0_context *nvc0 = nvc0_context(pctx);
+ struct nvc0_screen *screen = nvc0->screen;
+
+ pipe_mutex_lock(screen->base.push_mutex);
+ void *ret = nvc0_miptree_transfer_map_unlocked(
+ pctx, res, level, usage, box, ptransfer);
+ pipe_mutex_unlock(screen->base.push_mutex);
+
+ return ret;
+}
+
+static void
+nvc0_miptree_transfer_unmap_unlocked(struct pipe_context *pctx,
+ struct pipe_transfer *transfer)
{
struct nvc0_context *nvc0 = nvc0_context(pctx);
struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer;
FREE(tx);
}
+void
+nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
+ struct pipe_transfer *transfer)
+{
+ struct nvc0_context *nvc0 = nvc0_context(pctx);
+ struct nvc0_screen *screen = nvc0->screen;
+
+ pipe_mutex_lock(screen->base.push_mutex);
+ nvc0_miptree_transfer_unmap_unlocked(pctx, transfer);
+ pipe_mutex_unlock(screen->base.push_mutex);
+}
+
/* This happens rather often with DTD9/st. */
static void
nvc0_cb_push(struct nouveau_context *nv,
struct nvc0_screen *screen = nvc0->screen;
int s;
+ pipe_mutex_lock(screen->base.push_mutex);
+
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nvc0->vb_elt_first = info->min_index + info->index_bias;
nvc0->vb_elt_limit = info->max_index - info->min_index;
nvc0_push_vbo(nvc0, info);
push->kick_notify = nvc0_default_kick_notify;
nouveau_pushbuf_bufctx(push, NULL);
+ pipe_mutex_unlock(screen->base.push_mutex);
return;
}
nvc0_release_user_vbufs(nvc0);
nouveau_pushbuf_bufctx(push, NULL);
+ pipe_mutex_unlock(screen->base.push_mutex);
}
nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
+ struct nvc0_screen *screen = nvc0->screen;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nve4_cp_launch_desc *desc;
uint64_t desc_gpuaddr;
struct nouveau_bo *desc_bo;
int ret;
+ pipe_mutex_lock(screen->base.push_mutex);
+
desc = nve4_compute_alloc_launch_desc(&nvc0->base, &desc_bo, &desc_gpuaddr);
if (!desc) {
ret = -1;
NOUVEAU_ERR("Failed to launch grid !\n");
nouveau_scratch_done(&nvc0->base);
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_DESC);
+ pipe_mutex_unlock(screen->base.push_mutex);
}
#define DRM_API_HANDLE_TYPE_SHARED 0
#define DRM_API_HANDLE_TYPE_KMS 1
#define DRM_API_HANDLE_TYPE_FD 2
+#define DRM_API_HANDLE_TYPE_BUFFER 3
/**
{
/**
* Input for texture_from_handle, valid values are
- * DRM_API_HANDLE_TYPE_SHARED or DRM_API_HANDLE_TYPE_FD.
+ * DRM_API_HANDLE_TYPE_SHARED or DRM_API_HANDLE_TYPE_FD or DRM_API_HANDLE_TYPE_BUFFER.
* Input to texture_get_handle,
* to select handle for kms, flink, or prime.
*/
* of a specific layer of an array texture.
*/
unsigned layer;
+
+ /**
+ * Input to texture_from_handle.
+ * Output for texture_get_handle.
+ */
+ void* external_buffer;
+
/**
* Input to texture_from_handle.
* Output for texture_get_handle.
* only needed for exporting dmabuf's, so I think I won't loose much
* sleep over it.
*/
-static int convert_to_fourcc(int format)
+int convert_to_fourcc(int format)
{
switch(format) {
case __DRI_IMAGE_FORMAT_RGB565:
/* no-op */
}
-static __DRIimage *
+__DRIimage *
dri2_lookup_egl_image(struct dri_screen *screen, void *handle)
{
const __DRIimageLookupExtension *loader = screen->sPriv->dri2.image;
return img;
}
-static __DRIimage *
+__DRIimage *
dri2_create_image_from_winsys(__DRIscreen *_screen,
int width, int height, int format,
int num_handles, struct winsys_handle *whandle,
return img;
}
-static __DRIimage *
+__DRIimage *
dri2_create_from_texture(__DRIcontext *context, int target, unsigned texture,
int depth, int level, unsigned *error,
void *loaderPrivate)
pipe_transfer_unmap(pipe, (struct pipe_transfer *)data);
}
-static void
+void
dri2_destroy_image(__DRIimage *img)
{
pipe_resource_reference(&img->texture, NULL);
ctx->fence_server_sync(ctx, fence->pipe_fence);
}
-static __DRI2fenceExtension dri2FenceExtension = {
+__DRI2fenceExtension dri2FenceExtension = {
.base = { __DRI2_FENCE, 2 },
.create_fence = dri2_create_fence,
if (screen->fd < 0 || (fd = fcntl(screen->fd, F_DUPFD_CLOEXEC, 3)) < 0)
goto free_screen;
- if (pipe_loader_drm_probe_fd(&screen->dev, fd))
- pscreen = pipe_loader_create_screen(screen->dev);
-
+ pscreen = load_pipe_screen(&screen->dev, screen->fd);
if (!pscreen)
goto release_pipe;
pipe_transfer_unmap(pipe, transfer);
}
+extern __DRIimage *dri2_create_from_texture(__DRIcontext *context, int target, unsigned texture,
+ int depth, int level, unsigned *error,
+ void *loaderPrivate);
+extern __DRIimage *dri2_lookup_egl_image(struct dri_screen *screen, void *handle);
+extern void dri2_destroy_image(__DRIimage *img);
+extern int convert_to_fourcc(int format);
+extern __DRIimage *dri2_create_image_from_winsys(__DRIscreen *_screen,
+ int width, int height, int format,
+ int num_handles, struct winsys_handle *whandle,
+ void *loaderPrivate);
+extern __DRI2fenceExtension dri2FenceExtension;
+
+
+static GLboolean
+drisw_query_image(__DRIimage *image, int attrib, int *value)
+{
+ switch (attrib) {
+ case __DRI_IMAGE_ATTRIB_FORMAT:
+ *value = image->dri_format;
+ return GL_TRUE;
+ case __DRI_IMAGE_ATTRIB_WIDTH:
+ *value = image->texture->width0;
+ return GL_TRUE;
+ case __DRI_IMAGE_ATTRIB_HEIGHT:
+ *value = image->texture->height0;
+ return GL_TRUE;
+ case __DRI_IMAGE_ATTRIB_COMPONENTS:
+ if (image->dri_components == 0)
+ return GL_FALSE;
+ *value = image->dri_components;
+ return GL_TRUE;
+ case __DRI_IMAGE_ATTRIB_NUM_PLANES:
+ *value = 1;
+ return GL_TRUE;
+ case __DRI_IMAGE_ATTRIB_FOURCC:
+ *value = convert_to_fourcc(image->dri_format);
+ return GL_TRUE;
+ default:
+ return GL_FALSE;
+ }
+}
+
/*
* Backend function for init_screen.
*/
+static const __DRIimageExtension driswImageExtension = {
+ .base = { __DRI_IMAGE, 11 },
+
+ .createImageFromTexture = dri2_create_from_texture,
+ .destroyImage = dri2_destroy_image,
+ .queryImage = drisw_query_image,
+};
+
static const __DRIextension *drisw_screen_extensions[] = {
&driTexBufferExtension.base,
&dri2RendererQueryExtension.base,
&dri2ConfigQueryExtension.base,
+ &driswImageExtension.base,
+ &dri2FenceExtension.base,
NULL
};
if (!configs)
goto fail;
+ screen->lookup_egl_image = dri2_lookup_egl_image;
+ driSWRastExtension.createImageFromWinsys = dri2_create_image_from_winsys;
+
return configs;
fail:
dri_destroy_screen_helper(screen);
LOCAL_MODULE := gallium_dri
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ $(MESA_TOP)/src \
+ $(MESA_TOP)/include \
+ $(MESA_TOP)/src/gallium/auxiliary \
+ $(MESA_TOP)/src/gallium/include
+
ifeq ($(MESA_LOLLIPOP_BUILD),true)
LOCAL_MODULE_RELATIVE_PATH := $(MESA_DRI_MODULE_REL_PATH)
else
libdl \
libglapi \
libexpat \
+ libhardware \
ifneq ($(filter freedreno,$(MESA_GPU_DRIVERS)),)
LOCAL_CFLAGS += -DGALLIUM_FREEDRENO
LOCAL_SHARED_LIBRARIES += libdrm_radeon
endif
ifneq ($(filter swrast,$(MESA_GPU_DRIVERS)),)
-gallium_DRIVERS += libmesa_pipe_softpipe libmesa_winsys_sw_dri
-LOCAL_CFLAGS += -DGALLIUM_SOFTPIPE
+gallium_DRIVERS += libmesa_pipe_llvmpipe libmesa_pipe_softpipe libmesa_winsys_sw_dri
+LOCAL_CFLAGS += -DGALLIUM_LLVMPIPE -DGALLIUM_SOFTPIPE
+LOCAL_SHARED_LIBRARIES += libLLVM
endif
ifneq ($(filter vc4,$(MESA_GPU_DRIVERS)),)
LOCAL_CFLAGS += -DGALLIUM_VC4
LOCAL_STATIC_LIBRARIES :=
ifeq ($(MESA_ENABLE_LLVM),true)
-LOCAL_STATIC_LIBRARIES += \
- libLLVMR600CodeGen \
- libLLVMR600Desc \
- libLLVMR600Info \
- libLLVMR600AsmPrinter \
- libelf
+LOCAL_STATIC_LIBRARIES += libelf libz
LOCAL_LDLIBS += $(if $(filter true,$(MESA_LOLLIPOP_BUILD)),-lgcc)
endif
+LOCAL_ADDITION_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
include $(GALLIUM_COMMON_MK)
include $(BUILD_SHARED_LIBRARY)
global:
__driDriverExtensions;
__driDriverGetExtensions*;
+ load_pipe_screen;
nouveau_drm_screen_create;
radeon_drm_winsys_create;
amdgpu_winsys_create;
#include "util/u_memory.h"
#include "state_tracker/sw_winsys.h"
+#include "state_tracker/drm_driver.h"
#include "dri_sw_winsys.h"
+#ifdef ANDROID
+#include <system/graphics.h>
+#include <system/window.h>
+#include <hardware/gralloc.h>
+#endif
+
struct dri_sw_displaytarget
{
void *data;
void *mapped;
const void *front_private;
+#ifdef ANDROID
+ struct ANativeWindowBuffer *androidBuffer;
+#endif
};
+#ifdef ANDROID
+const struct gralloc_module_t* get_gralloc()
+{
+ static const struct gralloc_module_t* gr_module = NULL;
+ const hw_module_t *mod;
+ int err;
+
+ if (!gr_module) {
+ err = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &mod);
+ if (!err) {
+ gr_module = (gralloc_module_t *) mod;
+ }
+ }
+ return gr_module;
+}
+#endif
+
struct dri_sw_winsys
{
struct sw_winsys base;
{
struct dri_sw_displaytarget *dri_sw_dt = dri_sw_displaytarget(dt);
+#ifdef ANDROID
+ if (dri_sw_dt->androidBuffer) {
+ dri_sw_dt->androidBuffer->common.decRef(&dri_sw_dt->androidBuffer->common);
+ }
+#endif
+
align_free(dri_sw_dt->data);
FREE(dri_sw_dt);
unsigned flags)
{
struct dri_sw_displaytarget *dri_sw_dt = dri_sw_displaytarget(dt);
+#ifdef ANDROID
+ if (dri_sw_dt->androidBuffer) {
+ if (!get_gralloc()->lock(get_gralloc(), dri_sw_dt->androidBuffer->handle,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ 0, 0, dri_sw_dt->androidBuffer->width, dri_sw_dt->androidBuffer->height,
+ (void**)&dri_sw_dt->mapped)) {
+ dri_sw_dt->map_flags = flags;
+ return dri_sw_dt->mapped;
+ }
+ }
+#endif
dri_sw_dt->mapped = dri_sw_dt->data;
if (dri_sw_dt->front_private && (flags & PIPE_TRANSFER_READ)) {
dri_sw_ws->lf->put_image2((void *)dri_sw_dt->front_private, dri_sw_dt->data, 0, 0, dri_sw_dt->width, dri_sw_dt->height, dri_sw_dt->stride);
}
dri_sw_dt->map_flags = 0;
+#ifdef ANDROID
+ if (dri_sw_dt->androidBuffer) {
+ get_gralloc()->unlock(get_gralloc(), dri_sw_dt->androidBuffer->handle);
+ }
+#endif
dri_sw_dt->mapped = NULL;
}
struct winsys_handle *whandle,
unsigned *stride)
{
+#ifdef ANDROID
+ struct dri_sw_displaytarget *dri_sw_dt;
+
+ if (whandle->type == DRM_API_HANDLE_TYPE_BUFFER) {
+ dri_sw_dt = CALLOC_STRUCT(dri_sw_displaytarget);
+ dri_sw_dt->width = templ->width0;
+ dri_sw_dt->height = templ->height0;
+ dri_sw_dt->androidBuffer = whandle->external_buffer;
+ dri_sw_dt->stride = whandle->stride;
+
+ dri_sw_dt->androidBuffer->common.incRef(&dri_sw_dt->androidBuffer->common);
+ *stride = dri_sw_dt->stride;
+
+ return dri_sw_dt;
+ }
+#endif
assert(0);
return NULL;
}
static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
struct virgl_hw_res *res)
{
+ if (!res) return false;
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
int i;
struct virgl_drm_cmd_buf *cbuf,
struct virgl_hw_res *res)
{
+ if (!res) return;
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
if (cbuf->cres > cbuf->nres) {
abi_header := $(intermediates)/$(abi_header)
LOCAL_GENERATED_SOURCES := $(abi_header)
+# workaround build warning
+LOCAL_LDFLAGS_x86 += -Wl,--no-warn-shared-textrel
+
$(abi_header): PRIVATE_PRINTER := shared-glapi
mapi_abi_headers += $(abi_header)
LOCAL_C_INCLUDES += $(intermediates)/main
ifeq ($(strip $(MESA_ENABLE_ASM)),true)
-ifeq ($(TARGET_ARCH),x86)
-sources += x86/matypes.h
-LOCAL_C_INCLUDES += $(intermediates)/x86
-endif
+LOCAL_GENERATED_SOURCES_x86 += $(addprefix $(intermediates)/, x86/matypes.h)
+LOCAL_GENERATED_SOURCES_x86_64 += $(addprefix $(intermediates)/, x86_64/matypes.h)
+
+LOCAL_C_INCLUDES_x86 += $(intermediates)/x86
+LOCAL_C_INCLUDES_x86_64 += $(intermediates)/x86_64
endif
sources += main/git_sha1.h
$(hide) $(PRIVATE_SCRIPT) $(1) $(PRIVATE_XML) > $@
endef
-$(intermediates)/main/git_sha1.h:
+$(intermediates)/main/git_sha1.h: $(wildcard $(MESA_TOP)/.git/logs/HEAD)
@mkdir -p $(dir $@)
@echo "GIT-SHA1: $(PRIVATE_MODULE) <= git"
$(hide) touch $@
$(hide) if which git > /dev/null; then \
- git --git-dir $(PRIVATE_PATH)/../../.git log -n 1 --oneline | \
+ git --git-dir $(MESA_TOP)/.git log -n 1 --oneline | \
sed 's/^\([^ ]*\) .*/#define MESA_GIT_SHA1 "git-\1"/' \
> $@; \
fi
-matypes_deps := \
- $(BUILD_OUT_EXECUTABLES)/mesa_gen_matypes$(BUILD_EXECUTABLE_SUFFIX) \
+matypes_deps32 := \
+ $(BUILD_OUT_EXECUTABLES)/mesa_gen_matypes32$(BUILD_EXECUTABLE_SUFFIX) \
+ $(LOCAL_PATH)/main/mtypes.h \
+ $(LOCAL_PATH)/tnl/t_context.h
+
+matypes_deps64 := \
+ $(BUILD_OUT_EXECUTABLES)/mesa_gen_matypes64$(BUILD_EXECUTABLE_SUFFIX) \
$(LOCAL_PATH)/main/mtypes.h \
$(LOCAL_PATH)/tnl/t_context.h
-$(intermediates)/x86/matypes.h: $(matypes_deps)
+$(intermediates)/x86/matypes.h: $(matypes_deps32)
+ @mkdir -p $(dir $@)
+ @echo "MATYPES: $(PRIVATE_MODULE) <= $(notdir $@)"
+ $(hide) $< > $@
+
+$(intermediates)/x86_64/matypes.h: $(matypes_deps64)
@mkdir -p $(dir $@)
@echo "MATYPES: $(PRIVATE_MODULE) <= $(notdir $@)"
$(hide) $< > $@
# Import the following variables:
# MESA_FILES
# X86_FILES
+# X86_64_FILES
include $(LOCAL_PATH)/Makefile.sources
include $(CLEAR_VARS)
$(MESA_FILES)
ifeq ($(strip $(MESA_ENABLE_ASM)),true)
-ifeq ($(TARGET_ARCH),x86)
- LOCAL_SRC_FILES += $(X86_FILES)
-endif # x86
+ LOCAL_SRC_FILES_x86 += $(X86_FILES)
+ LOCAL_SRC_FILES_x86_64 += $(X86_64_FILES)
endif # MESA_ENABLE_ASM
-ifeq ($(ARCH_X86_HAVE_SSE4_1),true)
-LOCAL_WHOLE_STATIC_LIBRARIES := \
- libmesa_sse41
-LOCAL_CFLAGS := \
- -msse4.1 \
- -DUSE_SSE41
-endif
-
LOCAL_C_INCLUDES := \
$(MESA_TOP)/src/mapi \
$(MESA_TOP)/src/mesa/main \
LOCAL_WHOLE_STATIC_LIBRARIES += \
libmesa_program
+LOCAL_WHOLE_STATIC_LIBRARIES_x86 += \
+ libmesa_sse41 \
+
+LOCAL_WHOLE_STATIC_LIBRARIES_x86_64 += \
+ libmesa_sse41 \
+
include $(LOCAL_PATH)/Android.gen.mk
include $(MESA_COMMON_MK)
include $(BUILD_STATIC_LIBRARY)
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-ifeq ($(ARCH_X86_HAVE_SSE4_1),true)
+ifneq ($(filter x86 x86_64,$(TARGET_ARCH)),)
LOCAL_PATH := $(call my-dir)
LOCAL_SRC_FILES += \
$(X86_SSE41_FILES)
+LOCAL_CFLAGS += \
+ -msse4.1 \
+
LOCAL_C_INCLUDES := \
$(MESA_TOP)/src/mapi \
$(MESA_TOP)/src/gallium/include \
$(MESA_GEN_NIR_H)
ifeq ($(strip $(MESA_ENABLE_ASM)),true)
-ifeq ($(TARGET_ARCH),x86)
- LOCAL_SRC_FILES += $(X86_FILES)
-endif # x86
+ LOCAL_SRC_FILES_x86 += $(X86_FILES)
+ LOCAL_SRC_FILES_x86_64 += $(X86_64_FILES)
endif # MESA_ENABLE_ASM
-ifeq ($(ARCH_X86_HAVE_SSE4_1),true)
-LOCAL_WHOLE_STATIC_LIBRARIES := \
- libmesa_sse41
-LOCAL_CFLAGS := \
- -DUSE_SSE41
-endif
-
LOCAL_C_INCLUDES := \
$(MESA_TOP)/src/mapi \
$(MESA_TOP)/src/mesa/main \
LOCAL_WHOLE_STATIC_LIBRARIES += \
libmesa_program
+LOCAL_WHOLE_STATIC_LIBRARIES_x86 += \
+ libmesa_sse41 \
+
+LOCAL_WHOLE_STATIC_LIBRARIES_x86_64 += \
+ libmesa_sse41 \
+
LOCAL_STATIC_LIBRARIES += libmesa_nir libmesa_glsl
+ifeq ($(MESA_LOLLIPOP_BUILD),true)
+LOCAL_C_INCLUDES += external/libcxx/include
+LOCAL_CXX_STL := libc++
+else
+include external/stlport/libstlport.mk
+endif # MESA_LOLLIPOP_BUILD
+
include $(LOCAL_PATH)/Android.gen.mk
include $(MESA_COMMON_MK)
include $(BUILD_STATIC_LIBRARY)
# ---------------------------------------------------------------------
ifeq ($(strip $(MESA_ENABLE_ASM)),true)
-ifeq ($(TARGET_ARCH),x86)
+ifneq ($(filter x86 x86_64,$(TARGET_ARCH)),)
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := mesa_gen_matypes
+LOCAL_MULTILIB := both
+LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)32
+LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)64
LOCAL_IS_HOST_MODULE := true
LOCAL_C_INCLUDES := \
include $(MESA_COMMON_MK)
include $(BUILD_HOST_EXECUTABLE)
-endif # x86
+endif # x86 x86_64
endif # MESA_ENABLE_ASM
.createNewScreen2 = driCreateNewScreen2,
};
-const __DRIswrastExtension driSWRastExtension = {
- .base = { __DRI_SWRAST, 4 },
+__DRIswrastExtension driSWRastExtension = {
+ .base = { __DRI_SWRAST, __DRI_SWRAST_VERSION },
.createNewScreen = driSWRastCreateNewScreen,
.createNewDrawable = driCreateNewDrawable,
* Extensions.
*/
extern const __DRIcoreExtension driCoreExtension;
-extern const __DRIswrastExtension driSWRastExtension;
+extern __DRIswrastExtension driSWRastExtension;
extern const __DRIdri2Extension driDRI2Extension;
extern const __DRI2configQueryExtension dri2ConfigQueryExtension;
extern const __DRIcopySubBufferExtension driCopySubBufferExtension;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_CUBE_MAP:
+ case TEXTURE_EXTERNAL_BIT:
ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_NORMAL);
break;
case GL_TEXTURE_RECTANGLE:
ctx->TextureFormatSupported[MESA_FORMAT_B4G4R4A4_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_B5G5R5A1_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_B5G6R5_UNORM] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_R8G8B8X8_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_L_UNORM8] = true;
if (intel->gen == 3)
ctx->TextureFormatSupported[MESA_FORMAT_A_UNORM8] = true;
case TEXTURE_1D_INDEX:
return D0_SAMPLE_TYPE_2D;
case TEXTURE_2D_INDEX:
+ case TEXTURE_EXTERNAL_INDEX:
return D0_SAMPLE_TYPE_2D;
case TEXTURE_RECT_INDEX:
return D0_SAMPLE_TYPE_2D;
switch (cap) {
case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
break;
case GL_LIGHTING:
break;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
case GL_TEXTURE_RECTANGLE_ARB:
i915_miptree_layout_2d(mt);
break;
break;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
case GL_TEXTURE_RECTANGLE_ARB:
i945_miptree_layout_2d(mt);
break;
return MAPSURF_32BIT | MT_32BIT_ARGB8888;
case MESA_FORMAT_B8G8R8X8_UNORM:
return MAPSURF_32BIT | MT_32BIT_XRGB8888;
+ case MESA_FORMAT_R8G8B8X8_UNORM:
+ return MAPSURF_32BIT | MT_32BIT_XBGR8888;
case MESA_FORMAT_R8G8B8A8_UNORM:
return MAPSURF_32BIT | MT_32BIT_ABGR8888;
case MESA_FORMAT_YCBCR_REV:
case GL_TEXTURE_2D:
case GL_TEXTURE_CUBE_MAP:
case GL_TEXTURE_3D:
+ case GL_TEXTURE_EXTERNAL_OES:
ok = i915_update_tex_unit(intel, i, SS3_NORMALIZED_COORDS);
break;
case GL_TEXTURE_RECTANGLE:
{
[MESA_FORMAT_B8G8R8A8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B8G8R8X8_UNORM] = DV_PF_8888,
+ [MESA_FORMAT_R8G8B8X8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B5G6R5_UNORM] = DV_PF_565 | DITHER_FULL_ALWAYS,
[MESA_FORMAT_B5G5R5A1_UNORM] = DV_PF_1555 | DITHER_FULL_ALWAYS,
[MESA_FORMAT_B4G4R4A4_UNORM] = DV_PF_4444 | DITHER_FULL_ALWAYS,
ctx->Extensions.TDFX_texture_compression_FXT1 = true;
ctx->Extensions.OES_EGL_image = true;
ctx->Extensions.OES_draw_texture = true;
+ ctx->Extensions.OES_EGL_image_external = true;
ctx->Const.GLSLVersion = 120;
_mesa_override_glsl_version(&ctx->Const);
LOCAL_CFLAGS := \
$(MESA_DRI_CFLAGS)
-ifeq ($(ARCH_X86_HAVE_SSE4_1),true)
-LOCAL_CFLAGS += \
- -DUSE_SSE41
-endif
-
LOCAL_C_INCLUDES := \
$(MESA_DRI_C_INCLUDES)
if (image == NULL)
return;
+#ifndef ANDROID
/* We support external textures only for EGLImages created with
* EGL_EXT_image_dma_buf_import. We may lift that restriction in the future.
*/
"for images created with EGL_EXT_image_dma_buf_import");
return;
}
+#endif
/* Disallow depth/stencil textures: we don't have a way to pass the
* separate stencil miptree of a GL_DEPTH_STENCIL texture through.
EXT(APPLE_object_purgeable , APPLE_object_purgeable , GLL, GLC, x , x , 2006)
EXT(APPLE_packed_pixels , dummy_true , GLL, x , x , x , 2002)
+EXT(APPLE_texture_2D_limited_npot , ARB_texture_non_power_of_two , x , x , ES1, x , 2011)
EXT(APPLE_texture_max_level , dummy_true , x , x , ES1, ES2, 2009)
EXT(APPLE_vertex_array_object , dummy_true , GLL, x , x , x , 2002)
EXT(ARB_texture_mirror_clamp_to_edge , ARB_texture_mirror_clamp_to_edge , GLL, GLC, x , x , 2013)
EXT(ARB_texture_mirrored_repeat , dummy_true , GLL, x , x , x , 2001)
EXT(ARB_texture_multisample , ARB_texture_multisample , GLL, GLC, x , x , 2009)
-EXT(ARB_texture_non_power_of_two , ARB_texture_non_power_of_two , GLL, GLC, x , x , 2003)
+EXT(ARB_texture_non_power_of_two , ARB_texture_non_power_of_two , GLL, GLC, ES1, x , 2003)
EXT(ARB_texture_query_levels , ARB_texture_query_levels , GLL, GLC, x , x , 2012)
EXT(ARB_texture_query_lod , ARB_texture_query_lod , GLL, GLC, x , x , 2009)
EXT(ARB_texture_rectangle , NV_texture_rectangle , GLL, GLC, x , x , 2004)