"description": "egl/android: fix buffer_count for applications setting max count",
"nominated": true,
"nomination_type": 1,
- "resolution": 1,
+ "resolution": 0,
"master_sha": null,
"because_sha": "be08e6a4496aad219df1fd829fca3e4f7b322538"
},
MESA_VERSION := $(shell cat $(MESA_TOP)/VERSION)
LOCAL_CFLAGS += \
+ -O3 \
-Wno-error \
-Werror=incompatible-pointer-types \
-Wno-unused-parameter \
-DHAVE_ZLIB \
-DMAJOR_IN_SYSMACROS \
-DVK_USE_PLATFORM_ANDROID_KHR \
+ -DTEXTURE_FLOAT_ENABLED \
-fvisibility=hidden \
-fno-math-errno \
-fno-trapping-math \
endif
define mesa-build-with-llvm
- $(if $(filter $(MESA_ANDROID_MAJOR_VERSION), 4 5 6 7), \
+ $(if $(filter $(MESA_ANDROID_MAJOR_VERSION), 4 5 6), \
$(warning Unsupported LLVM version in Android $(MESA_ANDROID_MAJOR_VERSION)),) \
- $(eval LOCAL_CFLAGS += -DLLVM_AVAILABLE -DMESA_LLVM_VERSION_STRING=\"3.9\") \
- $(eval LOCAL_SHARED_LIBRARIES += libLLVM)
+ $(eval LOCAL_CFLAGS += -DLLVM_AVAILABLE -DMESA_LLVM_VERSION_STRING=\"9.0\") \
+ $(eval LOCAL_SHARED_LIBRARIES += libLLVM90)
endef
# add subdirectories
$(call add-clean-step, rm -rf $(HOST_OUT)/*/EXECUTABLES/glsl_compiler_intermediates)
$(call add-clean-step, rm -rf $(HOST_OUT)/*/STATIC_LIBRARIES/libmesa_*_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/*_dri_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/*_dri_intermediates)
ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
if (vk_create & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
- ahb_usage |= AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP;
+ ahb_usage |= 1UL << 25;
if (vk_create & VK_IMAGE_CREATE_PROTECTED_BIT)
ahb_usage |= AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
!state->OES_gpu_shader5_enable &&
!state->has_bindless()) {
if (state->is_version(130, 300))
- _mesa_glsl_error(&loc, state,
+ _mesa_glsl_warning(&loc, state,
"sampler arrays indexed with non-constant "
"expressions are forbidden in GLSL %s "
"and later",
LOCAL_SHARED_LIBRARIES += libgralloc_drm
endif
+ifeq ($(strip $(BOARD_USES_GRALLOC1)),true)
+LOCAL_CFLAGS += -DHAVE_GRALLOC1
+endif
+
ifeq ($(filter $(MESA_ANDROID_MAJOR_VERSION), 4 5 6 7),)
LOCAL_SHARED_LIBRARIES += libnativewindow
endif
#include <system/window.h>
#include <hardware/gralloc.h>
+
+#ifdef HAVE_GRALLOC1
+#include <hardware/gralloc1.h>
+#endif
#endif /* HAVE_ANDROID_PLATFORM */
#include "eglconfig.h"
#endif
#ifdef HAVE_ANDROID_PLATFORM
+#ifdef HAVE_GRALLOC1
+ const hw_module_t *gralloc;
+ uint16_t gralloc_version;
+ gralloc1_device_t *gralloc1_dev;
+ GRALLOC1_PFN_LOCK_FLEX pfn_lockflex;
+ GRALLOC1_PFN_GET_FORMAT pfn_getFormat;
+ GRALLOC1_PFN_UNLOCK pfn_unlock;
+#else
const gralloc_module_t *gralloc;
#endif
+#endif
bool is_render_node;
bool is_different_gpu;
#ifdef HAVE_DRM_GRALLOC
#include <gralloc_drm_handle.h>
#include "gralloc_drm.h"
+#define GRALLOC_DRM_GET_FORMAT 1 /* also needed here to build */
+#else
+/* Note, this is drm_gralloc specific, it is used as we want to offer a runtime
+ * fallback for format query but not depend on the full header.
+ */
+#define GRALLOC_DRM_GET_FORMAT 1
#endif /* HAVE_DRM_GRALLOC */
#define ALIGN(val, align) (((val) + (align) - 1) & ~((align) - 1))
int bpp;
switch (native) {
- case HAL_PIXEL_FORMAT_RGBA_FP16:
- bpp = 8;
- break;
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
/*
*/
case HAL_PIXEL_FORMAT_RGBX_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
- case HAL_PIXEL_FORMAT_RGBA_1010102:
bpp = 4;
break;
case HAL_PIXEL_FORMAT_RGB_565:
* TODO: Remove this once https://issuetracker.google.com/32077885 is fixed.
*/
case HAL_PIXEL_FORMAT_RGBX_8888: return DRM_FORMAT_XBGR8888;
- case HAL_PIXEL_FORMAT_RGBA_FP16: return DRM_FORMAT_ABGR16161616F;
- case HAL_PIXEL_FORMAT_RGBA_1010102: return DRM_FORMAT_ABGR2101010;
default:
_eglLog(_EGL_WARNING, "unsupported native buffer format 0x%x", native);
}
* TODO: Revert this once https://issuetracker.google.com/32077885 is fixed.
*/
case HAL_PIXEL_FORMAT_RGBX_8888: return __DRI_IMAGE_FORMAT_XBGR8888;
- case HAL_PIXEL_FORMAT_RGBA_FP16: return __DRI_IMAGE_FORMAT_ABGR16161616F;
- case HAL_PIXEL_FORMAT_RGBA_1010102: return __DRI_IMAGE_FORMAT_ABGR2101010;
default:
_eglLog(_EGL_WARNING, "unsupported native buffer format 0x%x", format);
}
}
#endif /* HAVE_DRM_GRALLOC */
+static int
+droid_resolve_format(struct dri2_egl_display *dri2_dpy,
+ struct ANativeWindowBuffer *buf)
+{
+ int format = -1;
+ int ret;
+
+ if (buf->format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)
+ return buf->format;
+#ifdef HAVE_GRALLOC1
+ if (dri2_dpy->gralloc_version == HARDWARE_MODULE_API_VERSION(1, 0)) {
+ if (!dri2_dpy->pfn_getFormat) {
+ _eglLog(_EGL_WARNING, "gralloc does not support getFormat");
+ return -1;
+ }
+ ret = dri2_dpy->pfn_getFormat(dri2_dpy->gralloc1_dev, buf->handle,
+ &format);
+ if (ret) {
+ _eglLog(_EGL_WARNING, "gralloc->getFormat failed: %d", ret);
+ return -1;
+ }
+ } else
+#endif
+ {
+ const gralloc_module_t *gralloc0 =
+ (const struct gralloc_module_t *) dri2_dpy->gralloc;
+
+ if (!gralloc0->perform) {
+ _eglLog(_EGL_WARNING, "gralloc->perform not supported");
+ return -1;
+ }
+ if (!strcmp(gralloc0->common.name, "CrOS Gralloc")) {
+ ret = gralloc0->perform(
+ (const struct gralloc_module_t *)dri2_dpy->gralloc,
+ GRALLOC_DRM_GET_FORMAT, buf->handle, &format);
+ if (ret) {
+ _eglLog(_EGL_WARNING, "gralloc->perform failed with error: %d", ret);
+ return -1;
+ }
+ else
+ format = HAL_PIXEL_FORMAT_RGBX_8888;
+ }
+ }
+ return format;
+}
+
static EGLBoolean
droid_window_dequeue_buffer(struct dri2_egl_surface *dri2_surf)
{
if (type == EGL_WINDOW_BIT) {
int format;
int buffer_count;
- int min_buffer_count, max_buffer_count;
-
- /* Prefer triple buffering for performance reasons. */
- const int preferred_buffer_count = 3;
if (window->common.magic != ANDROID_NATIVE_WINDOW_MAGIC) {
_eglError(EGL_BAD_NATIVE_WINDOW, "droid_create_surface");
goto cleanup_surface;
}
- /* Query ANativeWindow for MIN_UNDEQUEUED_BUFFER, minimum amount
- * of undequeued buffers.
+ /* Query ANativeWindow for MIN_UNDEQUEUED_BUFFER, set buffer count
+ * and allocate color_buffers.
*/
if (window->query(window, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
- &min_buffer_count)) {
+ &buffer_count)) {
_eglError(EGL_BAD_NATIVE_WINDOW, "droid_create_surface");
goto cleanup_surface;
}
-
- /* Query for maximum buffer count, application can set this
- * to limit the total amount of buffers.
- */
- if (window->query(window, NATIVE_WINDOW_MAX_BUFFER_COUNT,
- &max_buffer_count)) {
+ if (native_window_set_buffer_count(window, buffer_count+1)) {
_eglError(EGL_BAD_NATIVE_WINDOW, "droid_create_surface");
goto cleanup_surface;
}
-
- /* Clamp preferred between minimum (min undequeued + 1 dequeued)
- * and maximum.
- */
- buffer_count = CLAMP(preferred_buffer_count, min_buffer_count + 1,
- max_buffer_count);
-
- if (native_window_set_buffer_count(window, buffer_count)) {
- _eglError(EGL_BAD_NATIVE_WINDOW, "droid_create_surface");
- goto cleanup_surface;
- }
- dri2_surf->color_buffers = calloc(buffer_count,
+ dri2_surf->color_buffers = calloc(buffer_count+1,
sizeof(*dri2_surf->color_buffers));
if (!dri2_surf->color_buffers) {
_eglError(EGL_BAD_ALLOC, "droid_create_surface");
goto cleanup_surface;
}
- dri2_surf->color_buffers_count = buffer_count;
+ dri2_surf->color_buffers_count = buffer_count+1;
if (format != dri2_conf->base.NativeVisualID) {
_eglLog(_EGL_WARNING, "Native format mismatch: 0x%x != 0x%x",
struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
struct ANativeWindow *window = dri2_surf->window;
- if (window->setSwapInterval(window, interval))
+ if (window && window->setSwapInterval(window, interval))
return EGL_FALSE;
surf->SwapInterval = interval;
{
struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
+ /* To avoid blocking other EGL calls, release the display mutex before
+ * we enter droid_window_dequeue_buffer() and re-acquire the mutex upon
+ * return.
+ */
+ mtx_unlock(&disp->Mutex);
if (update_buffers(dri2_surf) < 0) {
_eglError(EGL_BAD_ALLOC, "droid_query_buffer_age");
+ mtx_lock(&disp->Mutex);
return -1;
}
+ mtx_lock(&disp->Mutex);
return dri2_surf->back ? dri2_surf->back->age : 0;
}
return EGL_TRUE;
}
+static bool
+get_ycbcr_from_flexlayout(struct android_flex_layout *outFlexLayout,
+ struct android_ycbcr *ycbcr)
+{
+ for (int i = 0; i < outFlexLayout->num_planes; i++) {
+ switch(outFlexLayout->planes[i].component){
+ case FLEX_COMPONENT_Y:
+ ycbcr->y = outFlexLayout->planes[i].top_left;
+ ycbcr->ystride = outFlexLayout->planes[i].v_increment;
+ break;
+ case FLEX_COMPONENT_Cb:
+ ycbcr->cb = outFlexLayout->planes[i].top_left;
+ ycbcr->cstride = outFlexLayout->planes[i].v_increment;
+ break;
+ case FLEX_COMPONENT_Cr:
+ ycbcr->cr = outFlexLayout->planes[i].top_left;
+ ycbcr->chroma_step = outFlexLayout->planes[i].h_increment;
+ break;
+ default:
+ _eglLog(_EGL_WARNING, "unknown component 0x%x", __func__,
+ outFlexLayout->planes[i].component);
+ return false;
+ }
+ }
+ return true;
+}
+
static _EGLImage *
droid_create_image_from_prime_fds_yuv(_EGLDisplay *disp, _EGLContext *ctx,
struct ANativeWindowBuffer *buf,
int fourcc;
int ret;
- if (!dri2_dpy->gralloc->lock_ycbcr) {
- _eglLog(_EGL_WARNING, "Gralloc does not support lock_ycbcr");
+ int format = droid_resolve_format(dri2_dpy, buf);
+ if (format < 0) {
+ _eglError(EGL_BAD_PARAMETER, "eglCreateEGLImageKHR");
return NULL;
}
memset(&ycbcr, 0, sizeof(ycbcr));
- ret = dri2_dpy->gralloc->lock_ycbcr(dri2_dpy->gralloc, buf->handle,
- 0, 0, 0, 0, 0, &ycbcr);
- if (ret) {
- /* HACK: See droid_create_image_from_prime_fds() and
- * https://issuetracker.google.com/32077885.*/
- if (buf->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)
+#ifdef HAVE_GRALLOC1
+ if(dri2_dpy->gralloc_version == HARDWARE_MODULE_API_VERSION(1, 0)) {
+ struct android_flex_layout outFlexLayout;
+ int ignored_outReleaseFence = 0;
+ gralloc1_rect_t accessRegion;
+
+ if (!dri2_dpy->pfn_lockflex) {
+ _eglLog(_EGL_WARNING, "gralloc does not support lockflex");
+ return NULL;
+ }
+
+ ret = dri2_dpy->pfn_lockflex(dri2_dpy->gralloc1_dev, buf->handle,
+ 0, 0, &accessRegion, &outFlexLayout, -1);
+ if (ret) {
+ _eglLog(_EGL_WARNING, "gralloc->lockflex failed: %d", ret);
+ return NULL;
+ }
+ if (!get_ycbcr_from_flexlayout(&outFlexLayout, &ycbcr)) {
+ _eglLog(_EGL_WARNING, "get_ycbcr_from_flexlayout failed");
+ dri2_dpy->pfn_unlock(dri2_dpy->gralloc1_dev, buf->handle,
+ &ignored_outReleaseFence);
+ return NULL;
+ }
+ dri2_dpy->pfn_unlock(dri2_dpy->gralloc1_dev, buf->handle,
+ &ignored_outReleaseFence);
+ } else
+#endif
+ {
+ const gralloc_module_t *gralloc0 =
+ (const gralloc_module_t *) dri2_dpy->gralloc;
+
+ if (!gralloc0->lock_ycbcr) {
+ _eglLog(_EGL_WARNING, "gralloc does not support lock_ycbcr");
return NULL;
+ }
- _eglLog(_EGL_WARNING, "gralloc->lock_ycbcr failed: %d", ret);
- return NULL;
+ ret = gralloc0->lock_ycbcr(gralloc0, buf->handle,
+ 0, 0, 0, 0, 0, &ycbcr);
+
+ if (ret) {
+ /* HACK: See droid_create_image_from_prime_fd() and
+ * https://issuetracker.google.com/32077885.
+ */
+ if (buf->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)
+ return NULL;
+
+ _eglLog(_EGL_WARNING, "gralloc->lock_ycbcr failed: %d", ret);
+ return NULL;
+ }
+
+ gralloc0->unlock((const struct gralloc_module_t *) dri2_dpy->gralloc,
+ buf->handle);
}
- dri2_dpy->gralloc->unlock(dri2_dpy->gralloc, buf->handle);
/* When lock_ycbcr's usage argument contains no SW_READ/WRITE flags
* it will return the .y/.cb/.cr pointers based on a NULL pointer,
/* .chroma_step is the byte distance between the same chroma channel
* values of subsequent pixels, assumed to be the same for Cb and Cr. */
- fourcc = get_fourcc_yuv(buf->format, chroma_order, ycbcr.chroma_step);
+ fourcc = get_fourcc_yuv(format, chroma_order, ycbcr.chroma_step);
if (fourcc == -1) {
_eglLog(_EGL_WARNING, "unsupported YUV format, native = %x, chroma_order = %s, chroma_step = %d",
- buf->format, chroma_order == YCbCr ? "YCbCr" : "YCrCb", ycbcr.chroma_step);
+ format, chroma_order == YCbCr ? "YCbCr" : "YCrCb", ycbcr.chroma_step);
return NULL;
}
droid_create_image_from_prime_fds(_EGLDisplay *disp, _EGLContext *ctx,
struct ANativeWindowBuffer *buf, int num_fds, int fds[3])
{
+ struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
unsigned int pitch;
- if (is_yuv(buf->format)) {
+ int format = droid_resolve_format(dri2_dpy, buf);
+ if (format < 0) {
+ _eglLog(_EGL_WARNING, "Could not resolve buffer format");
+ return NULL;
+ }
+
+ if (is_yuv(format)) {
_EGLImage *image;
image = droid_create_image_from_prime_fds_yuv(disp, ctx, buf, num_fds, fds);
*/
assert(num_fds == 1);
- const int fourcc = get_fourcc(buf->format);
+ const int fourcc = get_fourcc(format);
if (fourcc == -1) {
_eglError(EGL_BAD_PARAMETER, "eglCreateEGLImageKHR");
return NULL;
}
- pitch = buf->stride * get_format_bpp(buf->format);
+ pitch = buf->stride * get_format_bpp(format);
if (pitch == 0) {
_eglError(EGL_BAD_PARAMETER, "eglCreateEGLImageKHR");
return NULL;
{ HAL_PIXEL_FORMAT_RGBA_8888, { 0, 8, 16, 24 }, { 8, 8, 8, 8 } },
{ HAL_PIXEL_FORMAT_RGBX_8888, { 0, 8, 16, -1 }, { 8, 8, 8, 0 } },
{ HAL_PIXEL_FORMAT_RGB_565, { 11, 5, 0, -1 }, { 5, 6, 5, 0 } },
- /* This must be after HAL_PIXEL_FORMAT_RGBA_8888, we only keep BGRA
- * visual if it turns out RGBA visual is not available.
- */
{ HAL_PIXEL_FORMAT_BGRA_8888, { 16, 8, 0, 24 }, { 8, 8, 8, 8 } },
};
* (chadversary) testing on Android Nougat, this was good enough to pacify
* the buggy clients.
*/
- bool has_rgba = false;
for (int i = 0; i < ARRAY_SIZE(visuals); i++) {
- /* Only enable BGRA configs when RGBA is not available. BGRA configs are
- * buggy on stock Android.
- */
- if (visuals[i].format == HAL_PIXEL_FORMAT_BGRA_8888 && has_rgba)
- continue;
for (int j = 0; dri2_dpy->driver_configs[j]; j++) {
const EGLint surface_type = EGL_WINDOW_BIT | EGL_PBUFFER_BIT;
format_count[i]++;
}
}
- if (visuals[i].format == HAL_PIXEL_FORMAT_RGBA_8888 && format_count[i])
- has_rgba = true;
}
for (int i = 0; i < ARRAY_SIZE(format_count); i++) {
return false;
#ifdef HAVE_DRM_GRALLOC
- /* Handle control nodes using __DRI_DRI2_LOADER extension and GEM names
- * for backwards compatibility with drm_gralloc. (Do not use on new
- * systems.) */
- dri2_dpy->loader_extensions = droid_dri2_loader_extensions;
- if (!dri2_load_driver(disp)) {
- err = "DRI2: failed to load driver";
- goto error;
+ dri2_dpy->is_render_node = drmGetNodeTypeFromFd(dri2_dpy->fd) == DRM_NODE_RENDER;
+
+ if (!dri2_dpy->is_render_node) {
+ /* Handle control nodes using __DRI_DRI2_LOADER extension and GEM names
+ * for backwards compatibility with drm_gralloc. (Do not use on new
+ * systems.) */
+ dri2_dpy->loader_extensions = droid_dri2_loader_extensions;
+ if (!dri2_load_driver(disp)) {
+ err = "DRI2: failed to load driver";
+ goto error;
+ }
+ } else {
+ dri2_dpy->loader_extensions = droid_image_loader_extensions;
+ if (!dri2_load_driver_dri3(disp)) {
+ err = "DRI3: failed to load driver";
+ goto error;
+ }
}
#else
if (swrast) {
{
struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
int fd = -1, err = -EINVAL;
+ char buf[PROPERTY_VALUE_MAX];
if (swrast)
return EGL_FALSE;
+#ifdef HAVE_GRALLOC1
+ const gralloc_module_t *gralloc0 =
+ (const struct gralloc_module_t *) dri2_dpy->gralloc;
+ if (gralloc0->perform)
+ err = gralloc0->perform(gralloc0,
+ GRALLOC_MODULE_PERFORM_GET_DRM_FD,
+ &fd);
+#else
if (dri2_dpy->gralloc->perform)
err = dri2_dpy->gralloc->perform(dri2_dpy->gralloc,
GRALLOC_MODULE_PERFORM_GET_DRM_FD,
&fd);
+#endif
if (err || fd < 0) {
_eglLog(_EGL_WARNING, "fail to get drm fd");
return EGL_FALSE;
}
- dri2_dpy->fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+#ifdef HAVE_GRALLOC1
+ if (!strcmp(gralloc0->common.name, "DRM Memory Allocator") ||
+ property_get("ro.hardware.hwcomposer", buf, NULL) > 0) {
+ dri2_dpy->fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+#else
+ if (!strcmp(dri2_dpy->gralloc->common.name, "DRM Memory Allocator") ||
+ property_get("ro.hardware.hwcomposer", buf, NULL) > 0) {
+ dri2_dpy->fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+#endif
+ } else {
+ char *device_name = drmGetRenderDeviceNameFromFd(fd);
+ dri2_dpy->fd = loader_open_device(device_name);
+ free(device_name);
+ }
if (dri2_dpy->fd < 0)
return EGL_FALSE;
- if (drmGetNodeTypeFromFd(dri2_dpy->fd) == DRM_NODE_RENDER)
- return EGL_FALSE;
-
return droid_probe_device(disp, swrast);
}
#else
err = "DRI2: failed to get gralloc module";
goto cleanup;
}
+#ifdef HAVE_GRALLOC1
+ dri2_dpy->gralloc_version = dri2_dpy->gralloc->module_api_version;
+
+ if (dri2_dpy->gralloc_version == HARDWARE_MODULE_API_VERSION(1, 0)) {
+ hw_device_t *device;
+ ret = dri2_dpy->gralloc->methods->open(
+ dri2_dpy->gralloc, GRALLOC_HARDWARE_MODULE_ID, &device);
+ if (ret) {
+ err = "Failed to open hw_device device";
+ goto cleanup;
+ }
+
+ gralloc1_device_t *dev = (gralloc1_device_t *) device;
+
+ dri2_dpy->gralloc1_dev = dev;
+
+ dri2_dpy->pfn_lockflex = (GRALLOC1_PFN_LOCK_FLEX) \
+ dev->getFunction(dev, GRALLOC1_FUNCTION_LOCK_FLEX);
+
+ dri2_dpy->pfn_getFormat = (GRALLOC1_PFN_GET_FORMAT) \
+ dev->getFunction(dev, GRALLOC1_FUNCTION_GET_FORMAT);
+
+ dri2_dpy->pfn_unlock = (GRALLOC1_PFN_UNLOCK) \
+ dev->getFunction(dev, GRALLOC1_FUNCTION_UNLOCK);
+ }
+#endif
disp->DriverData = (void *) dri2_dpy;
if (!disp->Options.ForceSoftware)
#include "eglconfig.h"
#include "eglimage.h"
#include "eglsync.h"
+#include "egllog.h"
#include "GL/mesa_glinterop.h"
if (!share && share_list != EGL_NO_CONTEXT)
RETURN_EGL_ERROR(disp, EGL_BAD_CONTEXT, EGL_NO_CONTEXT);
+ if (share)
+ _eglLog(_EGL_WARNING, "Application is using shared EGL context\n");
+
context = drv->API.CreateContext(drv, disp, conf, share, attrib_list);
ret = (context) ? _eglLinkContext(context) : EGL_NO_CONTEXT;
extern const char gallium_driinfo_xml[];
+struct pipe_screen *
+load_pipe_screen(struct pipe_loader_device **dev, int fd);
+
#ifdef __cplusplus
}
#endif
.get_driconf_xml = pipe_loader_drm_get_driconf_xml,
.release = pipe_loader_drm_release
};
+
+PUBLIC struct pipe_screen *load_pipe_screen(struct pipe_loader_device **dev, int fd)
+{
+ struct pipe_screen *pscreen = NULL;
+ if (pipe_loader_drm_probe_fd(dev, fd)) {
+ pscreen = pipe_loader_create_screen(*dev);
+ }
+ return pscreen;
+}
C_SOURCES := \
nouveau_buffer.c \
nouveau_buffer.h \
+ nouveau_context.c \
nouveau_context.h \
nouveau_debug.h \
nouveau_fence.c \
bool canCommuteDefSrc(const Instruction *) const;
void print() const;
+ void print(std::ostringstream &) const;
inline CmpInstruction *asCmp();
inline TexInstruction *asTex();
#include <inttypes.h>
+#include <iomanip>
+#include <ostream>
+#include <sstream>
+
namespace nv50_ir {
enum TextStyle
void Instruction::print() const
{
+ std::ostringstream buffer;
+ print(buffer);
+ INFO("%s", buffer.str().c_str());
+}
+
+void Instruction::print(std::ostringstream &buffer) const
+{
#define BUFSZ 512
const size_t size = BUFSZ;
buf[MIN2(pos, BUFSZ - 1)] = 0;
- INFO("%s (%u)\n", buf, encSize);
+ buffer << buf << " (" << encSize << ")" << std::endl;
}
class PrintPass : public Pass
{
public:
- PrintPass(bool omitLineNum) : serial(0), omit_serial(omitLineNum) { }
+ PrintPass(std::ostringstream &buffer, bool omitLineNum)
+ : serial(0), omit_serial(omitLineNum), buffer(buffer) { }
+private:
virtual bool visit(Function *);
virtual bool visit(BasicBlock *);
virtual bool visit(Instruction *);
-private:
int serial;
bool omit_serial;
+
+ std::ostringstream &buffer;
};
bool
{
char str[16];
- INFO("\n%s:%i (", fn->getName(), fn->getLabel());
+ buffer << std::endl << fn->getName() << ':' << (int32_t)fn->getLabel() << " (";
if (!fn->outs.empty())
- INFO("out");
+ buffer << "out";
for (std::deque<ValueRef>::iterator it = fn->outs.begin();
it != fn->outs.end();
++it) {
it->get()->print(str, sizeof(str), typeOfSize(it->get()->reg.size));
- INFO(" %s", str);
+ buffer << ' ' << str;
}
- if (!fn->ins.empty())
- INFO("%s%sin", colour[TXT_DEFAULT], fn->outs.empty() ? "" : ", ");
+ if (!fn->ins.empty()) {
+ buffer << colour[TXT_DEFAULT];
+ if (!fn->outs.empty())
+ buffer << ", ";
+ buffer << "in";
+ }
for (std::deque<ValueDef>::iterator it = fn->ins.begin();
it != fn->ins.end();
++it) {
it->get()->print(str, sizeof(str), typeOfSize(it->get()->reg.size));
- INFO(" %s", str);
+ buffer << ' ' << str;
}
- INFO("%s)\n", colour[TXT_DEFAULT]);
+ buffer << colour[TXT_DEFAULT] << ')' << std::endl;
return true;
}
BasicBlock::get(ei.getNode())->getId(),
ei.getEdge()->typeStr());
#endif
- INFO("BB:%i (%u instructions) - ", bb->getId(), bb->getInsnCount());
+ buffer << "BB:" << bb->getId() << " (" << bb->getInsnCount() << " instructions) - ";
if (bb->idom())
- INFO("idom = BB:%i, ", bb->idom()->getId());
+ buffer << "idom = BB:" << bb->idom()->getId() << ", ";
- INFO("df = { ");
+ buffer << "df = { ";
for (DLList::Iterator df = bb->getDF().iterator(); !df.end(); df.next())
- INFO("BB:%i ", BasicBlock::get(df)->getId());
+ buffer << "BB:" << BasicBlock::get(df)->getId() << ' ';
- INFO("}\n");
+ buffer << '}' << std::endl;
for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next())
- INFO(" -> BB:%i (%s)\n",
- BasicBlock::get(ei.getNode())->getId(),
- ei.getEdge()->typeStr());
+ buffer << " -> BB:" << BasicBlock::get(ei.getNode())->getId() << " ("
+ << ei.getEdge()->typeStr() << ')' << std::endl;
return true;
}
PrintPass::visit(Instruction *insn)
{
if (omit_serial)
- INFO(" ");
+ buffer << " ";
else
- INFO("%3i: ", serial);
+ buffer << std::setw(3) << serial << std::setw(1) << ": ";
serial++;
- insn->print();
+ insn->print(buffer);
return true;
}
void
Function::print()
{
- PrintPass pass(prog->driver->omitLineNum);
+ std::ostringstream buffer;
+ PrintPass pass(buffer, prog->driver->omitLineNum);
pass.run(this, true, false);
+ MSG(buffer.str().c_str());
}
void
Program::print()
{
- PrintPass pass(driver->omitLineNum);
+ std::ostringstream buffer;
+ PrintPass pass(buffer, driver->omitLineNum);
init_colours();
pass.run(this, true, false);
+ MSG(buffer.str().c_str());
}
void
#define ERROR(args...) _debug_printf("ERROR: " args)
#define WARN(args...) _debug_printf("WARNING: " args)
#define INFO(args...) _debug_printf(args)
+#define MSG(msg) os_log_message(msg)
#define INFO_DBG(m, f, args...) \
do { \
files_libnouveau = files(
'nouveau_buffer.c',
'nouveau_buffer.h',
+ 'nouveau_context.c',
'nouveau_context.h',
'nouveau_debug.h',
'nouveau_fence.c',
static inline void
release_allocation(struct nouveau_mm_allocation **mm,
- struct nouveau_fence *fence)
+ struct nouveau_fence *fence,
+ struct nouveau_pushbuf *push)
{
- nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
+ nouveau_fence_work(fence, push, nouveau_mm_free_work, *mm);
(*mm) = NULL;
}
inline void
-nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
+nouveau_buffer_release_gpu_storage(struct nouveau_pushbuf *push, struct nv04_resource *buf)
{
if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
- nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo);
+ nouveau_fence_work(buf->fence, push, nouveau_fence_unref_bo, buf->bo);
buf->bo = NULL;
} else {
nouveau_bo_ref(NULL, &buf->bo);
}
if (buf->mm)
- release_allocation(&buf->mm, buf->fence);
+ release_allocation(&buf->mm, buf->fence, push);
if (buf->domain == NOUVEAU_BO_VRAM)
NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
}
static inline bool
-nouveau_buffer_reallocate(struct nouveau_screen *screen,
+nouveau_buffer_reallocate(struct nouveau_screen *screen, struct nouveau_pushbuf *push,
struct nv04_resource *buf, unsigned domain)
{
- nouveau_buffer_release_gpu_storage(buf);
+ nouveau_buffer_release_gpu_storage(push, buf);
nouveau_fence_ref(NULL, &buf->fence);
nouveau_fence_ref(NULL, &buf->fence_wr);
{
struct nv04_resource *res = nv04_resource(presource);
- nouveau_buffer_release_gpu_storage(res);
+ nouveau_buffer_release_gpu_storage(nouveau_screen(pscreen)->pushbuf, res);
if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
align_free(res->data);
else
nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data);
- nouveau_fence_ref(nv->screen->fence.current, &buf->fence);
- nouveau_fence_ref(nv->screen->fence.current, &buf->fence_wr);
+ nouveau_fence_ref(nv->fence.current, &buf->fence);
+ nouveau_fence_ref(nv->fence.current, &buf->fence_wr);
}
/* Does a CPU wait for the buffer's backing data to become reliably accessible
return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence_wr));
- if (!nouveau_fence_wait(buf->fence_wr, &nv->debug))
+ if (!nouveau_fence_wait(buf->fence_wr, nv->pushbuf, &nv->debug))
return false;
} else {
if (!buf->fence)
return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence));
- if (!nouveau_fence_wait(buf->fence, &nv->debug))
+ if (!nouveau_fence_wait(buf->fence, nv->pushbuf, &nv->debug))
return false;
nouveau_fence_ref(NULL, &buf->fence);
{
if (tx->map) {
if (likely(tx->bo)) {
- nouveau_fence_work(nv->screen->fence.current,
+ nouveau_fence_work(nv->fence.current, nv->pushbuf,
nouveau_fence_unref_bo, tx->bo);
if (tx->mm)
- release_allocation(&tx->mm, nv->screen->fence.current);
+ release_allocation(&tx->mm, nv->fence.current, nv->pushbuf);
} else {
align_free(tx->map -
(tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK));
if (nouveau_buffer_should_discard(buf, usage)) {
int ref = buf->base.reference.count - 1;
- nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
+ nouveau_buffer_reallocate(nv->screen, nv->pushbuf, buf, buf->domain);
if (ref > 0) /* any references inside context possible ? */
nv->invalidate_resource_storage(nv, &buf->base, ref);
}
src->bo, src->offset + srcx, src->domain, size);
dst->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
- nouveau_fence_ref(nv->screen->fence.current, &dst->fence);
- nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr);
+ nouveau_fence_ref(nv->fence.current, &dst->fence);
+ nouveau_fence_ref(nv->fence.current, &dst->fence_wr);
src->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
- nouveau_fence_ref(nv->screen->fence.current, &src->fence);
+ nouveau_fence_ref(nv->fence.current, &src->fence);
} else {
struct pipe_box src_box;
src_box.x = srcx;
nv->copy_data(nv, buf->bo, buf->offset, new_domain,
bo, offset, old_domain, buf->base.width0);
- nouveau_fence_work(screen->fence.current, nouveau_fence_unref_bo, bo);
+ nouveau_fence_work(nv->fence.current, nv->pushbuf, nouveau_fence_unref_bo, bo);
if (mm)
- release_allocation(&mm, screen->fence.current);
+ release_allocation(&mm, nv->fence.current, nv->pushbuf);
} else
if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
struct nouveau_transfer tx;
assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
buf->base.width0 = base + size;
- if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
+ if (!nouveau_buffer_reallocate(screen, nv->pushbuf, buf, NOUVEAU_BO_GART))
return false;
ret = nouveau_bo_map(buf->bo, 0, nv->client);
if (buf->mm && !nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE)) {
util_range_set_empty(&buf->valid_buffer_range);
} else {
- nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
+ nouveau_buffer_reallocate(nv->screen, nv->pushbuf, buf, buf->domain);
if (ref > 0) /* any references inside context possible ? */
nv->invalidate_resource_storage(nv, &buf->base, ref);
}
if (!nv->scratch.runout)
return;
- if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos,
+ if (!nouveau_fence_work(nv->fence.current, nv->pushbuf, nouveau_scratch_unref_bos,
nv->scratch.runout))
return;
};
void
-nouveau_buffer_release_gpu_storage(struct nv04_resource *);
+nouveau_buffer_release_gpu_storage(struct nouveau_pushbuf *push, struct nv04_resource *);
void
nouveau_copy_buffer(struct nouveau_context *,
--- /dev/null
+#include "nouveau_context.h"
+
+static void
+nouveau_set_debug_callback(struct pipe_context *pipe,
+ const struct pipe_debug_callback *cb)
+{
+ struct nouveau_context *context = nouveau_context(pipe);
+
+ if (cb)
+ context->debug = *cb;
+ else
+ memset(&context->debug, 0, sizeof(context->debug));
+}
+
+int
+nouveau_context_init(struct nouveau_context *context)
+{
+ struct nouveau_screen *screen = context->screen;
+ int ret;
+
+ context->pipe.set_debug_callback = nouveau_set_debug_callback;
+
+ ret = nouveau_client_new(screen->device, &context->client);
+ if (ret)
+ return ret;
+
+ ret = nouveau_pushbuf_new(context->client, screen->channel,
+ 4, 512 * 1024, 1,
+ &context->pushbuf);
+ if (ret)
+ return ret;
+
+ nouveau_fence_list_init(&context->fence, screen, context->pushbuf);
+ return ret;
+}
+
+void
+nouveau_context_destroy(struct nouveau_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < NOUVEAU_MAX_SCRATCH_BUFS; ++i)
+ if (ctx->scratch.bo[i])
+ nouveau_bo_ref(NULL, &ctx->scratch.bo[i]);
+
+ nouveau_pushbuf_del(&ctx->pushbuf);
+ nouveau_client_del(&ctx->client);
+
+ FREE(ctx);
+}
#include "pipe/p_context.h"
#include "pipe/p_state.h"
+
+#include "util/u_memory.h"
+
#include <nouveau.h>
+#include "nouveau_screen.h"
+
#define NOUVEAU_MAX_SCRATCH_BUFS 4
struct nv04_resource;
struct nouveau_pushbuf *pushbuf;
struct pipe_debug_callback debug;
+ struct nouveau_fence_list fence;
+
bool vbo_dirty;
void (*copy_data)(struct nouveau_context *,
void
nouveau_context_init_vdec(struct nouveau_context *);
-void
+int
nouveau_context_init(struct nouveau_context *);
void
nouveau_scratch_get(struct nouveau_context *, unsigned size, uint64_t *gpu_addr,
struct nouveau_bo **);
-static inline void
-nouveau_context_destroy(struct nouveau_context *ctx)
-{
- int i;
-
- for (i = 0; i < NOUVEAU_MAX_SCRATCH_BUFS; ++i)
- if (ctx->scratch.bo[i])
- nouveau_bo_ref(NULL, &ctx->scratch.bo[i]);
-
- FREE(ctx);
-}
+void
+nouveau_context_destroy(struct nouveau_context *);
static inline void
nouveau_context_update_frame_stats(struct nouveau_context *nv)
#endif
bool
-nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence)
+nouveau_fence_new(struct nouveau_fence_list *list, struct nouveau_fence **fence)
{
*fence = CALLOC_STRUCT(nouveau_fence);
if (!*fence)
return false;
- (*fence)->screen = screen;
+ (*fence)->list = list;
(*fence)->ref = 1;
list_inithead(&(*fence)->work);
}
void
-nouveau_fence_emit(struct nouveau_fence *fence)
+nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_pushbuf *push)
{
- struct nouveau_screen *screen = fence->screen;
+ struct nouveau_fence_list *list = fence->list;
assert(fence->state == NOUVEAU_FENCE_STATE_AVAILABLE);
++fence->ref;
- if (screen->fence.tail)
- screen->fence.tail->next = fence;
+ if (list->tail)
+ list->tail->next = fence;
else
- screen->fence.head = fence;
+ list->head = fence;
- screen->fence.tail = fence;
+ list->tail = fence;
- screen->fence.emit(&screen->base, &fence->sequence);
+ list->emit(list, push, &fence->sequence);
assert(fence->state == NOUVEAU_FENCE_STATE_EMITTING);
fence->state = NOUVEAU_FENCE_STATE_EMITTED;
nouveau_fence_del(struct nouveau_fence *fence)
{
struct nouveau_fence *it;
- struct nouveau_screen *screen = fence->screen;
+ struct nouveau_fence_list *list = fence->list;
if (fence->state == NOUVEAU_FENCE_STATE_EMITTED ||
fence->state == NOUVEAU_FENCE_STATE_FLUSHED) {
- if (fence == screen->fence.head) {
- screen->fence.head = fence->next;
- if (!screen->fence.head)
- screen->fence.tail = NULL;
+ if (fence == list->head) {
+ list->head = fence->next;
+ if (!list->head)
+ list->tail = NULL;
} else {
- for (it = screen->fence.head; it && it->next != fence; it = it->next);
+ for (it = list->head; it && it->next != fence; it = it->next);
it->next = fence->next;
- if (screen->fence.tail == fence)
- screen->fence.tail = it;
+ if (list->tail == fence)
+ list->tail = it;
}
}
}
void
-nouveau_fence_update(struct nouveau_screen *screen, bool flushed)
+nouveau_fence_update(struct nouveau_fence_list *list, bool flushed)
{
struct nouveau_fence *fence;
struct nouveau_fence *next = NULL;
- u32 sequence = screen->fence.update(&screen->base);
+ u32 sequence = list->update(list);
- if (screen->fence.sequence_ack == sequence)
+ if (list->sequence_ack == sequence)
return;
- screen->fence.sequence_ack = sequence;
+ list->sequence_ack = sequence;
- for (fence = screen->fence.head; fence; fence = next) {
+ for (fence = list->head; fence; fence = next) {
next = fence->next;
sequence = fence->sequence;
nouveau_fence_trigger_work(fence);
nouveau_fence_ref(NULL, &fence);
- if (sequence == screen->fence.sequence_ack)
+ if (sequence == list->sequence_ack)
break;
}
- screen->fence.head = next;
+ list->head = next;
if (!next)
- screen->fence.tail = NULL;
+ list->tail = NULL;
if (flushed) {
for (fence = next; fence; fence = fence->next)
bool
nouveau_fence_signalled(struct nouveau_fence *fence)
{
- struct nouveau_screen *screen = fence->screen;
-
if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
return true;
if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
- nouveau_fence_update(screen, false);
+ nouveau_fence_update(fence->list, false);
return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED;
}
static bool
-nouveau_fence_kick(struct nouveau_fence *fence)
+nouveau_fence_kick(struct nouveau_fence *fence, struct nouveau_pushbuf *push)
{
- struct nouveau_screen *screen = fence->screen;
-
/* wtf, someone is waiting on a fence in flush_notify handler? */
assert(fence->state != NOUVEAU_FENCE_STATE_EMITTING);
if (fence->state < NOUVEAU_FENCE_STATE_EMITTED) {
- PUSH_SPACE(screen->pushbuf, 8);
+ PUSH_SPACE(push, 8);
/* The space allocation might trigger a flush, which could emit the
* current fence. So check again.
*/
if (fence->state < NOUVEAU_FENCE_STATE_EMITTED)
- nouveau_fence_emit(fence);
+ nouveau_fence_emit(fence, push);
}
if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
- if (nouveau_pushbuf_kick(screen->pushbuf, screen->pushbuf->channel))
+ if (nouveau_pushbuf_kick(push, push->channel))
return false;
- if (fence == screen->fence.current)
- nouveau_fence_next(screen);
+ if (fence == fence->list->current)
+ nouveau_fence_next(fence->list, push);
- nouveau_fence_update(screen, false);
+ nouveau_fence_update(fence->list, false);
return true;
}
bool
-nouveau_fence_wait(struct nouveau_fence *fence, struct pipe_debug_callback *debug)
+nouveau_fence_wait(struct nouveau_fence *fence, struct nouveau_pushbuf *push, struct pipe_debug_callback *debug)
{
- struct nouveau_screen *screen = fence->screen;
uint32_t spins = 0;
int64_t start = 0;
if (debug && debug->debug_message)
start = os_time_get_nano();
- if (!nouveau_fence_kick(fence))
+ if (!nouveau_fence_kick(fence, push))
return false;
do {
return true;
}
if (!spins)
- NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1);
+ NOUVEAU_DRV_STAT(fence->list->screen, any_non_kernel_fence_sync_count, 1);
spins++;
#ifdef PIPE_OS_UNIX
if (!(spins % 8)) /* donate a few cycles */
sched_yield();
#endif
- nouveau_fence_update(screen, false);
+ nouveau_fence_update(fence->list, false);
} while (spins < NOUVEAU_FENCE_MAX_SPINS);
debug_printf("Wait on fence %u (ack = %u, next = %u) timed out !\n",
fence->sequence,
- screen->fence.sequence_ack, screen->fence.sequence);
+ fence->list->sequence_ack, fence->list->sequence);
return false;
}
void
-nouveau_fence_next(struct nouveau_screen *screen)
+nouveau_fence_next(struct nouveau_fence_list *list, struct nouveau_pushbuf *push)
{
- if (screen->fence.current->state < NOUVEAU_FENCE_STATE_EMITTING) {
- if (screen->fence.current->ref > 1)
- nouveau_fence_emit(screen->fence.current);
+ if (list->current->state < NOUVEAU_FENCE_STATE_EMITTING) {
+ if (list->current->ref > 1)
+ nouveau_fence_emit(list->current, push);
else
return;
}
- nouveau_fence_ref(NULL, &screen->fence.current);
+ nouveau_fence_ref(NULL, &list->current);
- nouveau_fence_new(screen, &screen->fence.current);
+ nouveau_fence_new(list, &list->current);
}
void
bool
nouveau_fence_work(struct nouveau_fence *fence,
+ struct nouveau_pushbuf *push,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;
list_add(&work->list, &fence->work);
p_atomic_inc(&fence->work_count);
if (fence->work_count > 64)
- nouveau_fence_kick(fence);
+ nouveau_fence_kick(fence, push);
return true;
}
struct pipe_debug_callback;
+struct nouveau_fence_list;
+struct nouveau_pushbuf;
+struct nouveau_screen;
+
struct nouveau_fence_work {
struct list_head list;
void (*func)(void *);
struct nouveau_fence {
struct nouveau_fence *next;
- struct nouveau_screen *screen;
+ struct nouveau_fence_list *list;
int state;
int ref;
uint32_t sequence;
struct list_head work;
};
-void nouveau_fence_emit(struct nouveau_fence *);
+struct nouveau_fence_list {
+ struct nouveau_fence *head;
+ struct nouveau_fence *tail;
+ struct nouveau_fence *current;
+
+ struct nouveau_screen *screen;
+ struct nouveau_pushbuf *push;
+ void *data;
+
+ uint32_t sequence;
+ uint32_t sequence_ack;
+ void (*emit)(struct nouveau_fence_list *, struct nouveau_pushbuf *, uint32_t *sequence);
+ uint32_t (*update)(struct nouveau_fence_list *);
+};
+
+void nouveau_fence_emit(struct nouveau_fence *, struct nouveau_pushbuf *);
void nouveau_fence_del(struct nouveau_fence *);
-bool nouveau_fence_new(struct nouveau_screen *, struct nouveau_fence **);
-bool nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
-void nouveau_fence_update(struct nouveau_screen *, bool flushed);
-void nouveau_fence_next(struct nouveau_screen *);
-bool nouveau_fence_wait(struct nouveau_fence *, struct pipe_debug_callback *);
+bool nouveau_fence_new(struct nouveau_fence_list *list, struct nouveau_fence **);
+bool nouveau_fence_work(struct nouveau_fence *, struct nouveau_pushbuf *, void (*)(void *), void *);
+void nouveau_fence_update(struct nouveau_fence_list *, bool flushed);
+void nouveau_fence_next(struct nouveau_fence_list *, struct nouveau_pushbuf *);
+bool nouveau_fence_wait(struct nouveau_fence *, struct nouveau_pushbuf *push, struct pipe_debug_callback *);
bool nouveau_fence_signalled(struct nouveau_fence *);
void nouveau_fence_unref_bo(void *data); /* generic unref bo callback */
+static inline void
+nouveau_fence_list_init(struct nouveau_fence_list *list,
+ struct nouveau_screen *screen,
+ struct nouveau_pushbuf *push)
+{
+ list->screen = screen;
+ list->push = push;
+}
static inline void
nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref)
struct pipe_fence_handle *pfence,
uint64_t timeout)
{
+ struct nouveau_fence *fence = nouveau_fence(pfence);
+
if (!timeout)
- return nouveau_fence_signalled(nouveau_fence(pfence));
+ return nouveau_fence_signalled(fence);
- return nouveau_fence_wait(nouveau_fence(pfence), NULL);
+ return nouveau_fence_wait(fence, fence->list->push, NULL);
}
if (ret)
return ret;
+ nouveau_fence_list_init(&screen->fence, screen, screen->pushbuf);
+
/* getting CPU time first appears to be more accurate */
screen->cpu_gpu_time_delta = os_time_get();
disk_cache_destroy(screen->disk_shader_cache);
}
-
-static void
-nouveau_set_debug_callback(struct pipe_context *pipe,
- const struct pipe_debug_callback *cb)
-{
- struct nouveau_context *context = nouveau_context(pipe);
-
- if (cb)
- context->debug = *cb;
- else
- memset(&context->debug, 0, sizeof(context->debug));
-}
-
-void
-nouveau_context_init(struct nouveau_context *context)
-{
- context->pipe.set_debug_callback = nouveau_set_debug_callback;
-}
# define NOUVEAU_ENABLE_DRIVER_STATISTICS
#endif
+#include "nouveau_fence.h"
+
typedef uint32_t u32;
typedef uint16_t u16;
uint16_t class_3d;
- struct {
- struct nouveau_fence *head;
- struct nouveau_fence *tail;
- struct nouveau_fence *current;
- u32 sequence;
- u32 sequence_ack;
- void (*emit)(struct pipe_screen *, u32 *sequence);
- u32 (*update)(struct pipe_screen *);
- } fence;
+ struct nouveau_fence_list fence;
struct nouveau_mman *mm_VRAM;
struct nouveau_mman *mm_GART;
nv30 = container_of(push->user_priv, nv30, bufctx);
screen = &nv30->screen->base;
- nouveau_fence_next(screen);
- nouveau_fence_update(screen, true);
+ nouveau_fence_next(&screen->fence, push);
+ nouveau_fence_update(&screen->fence, true);
if (push->bufctx) {
struct nouveau_bufref *bref;
if (nv30->blit_fp)
pipe_resource_reference(&nv30->blit_fp, NULL);
- if (nv30->screen->base.pushbuf->user_priv == &nv30->bufctx)
- nv30->screen->base.pushbuf->user_priv = NULL;
+ if (nv30->base.pushbuf->user_priv == &nv30->bufctx)
+ nv30->base.pushbuf->user_priv = NULL;
nouveau_bufctx_del(&nv30->bufctx);
{
struct nv30_render *r = nv30_render(render);
struct nv30_context *nv30 = r->nv30;
- struct nouveau_pushbuf *push = nv30->screen->base.pushbuf;
+ struct nouveau_pushbuf *push = nv30->base.pushbuf;
unsigned i;
BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs);
struct nv30_render *r = nv30_render(nv30->draw->render);
struct nv30_rasterizer_stateobj *rast = nv30->rast;
struct pipe_screen *pscreen = &nv30->screen->base.base;
- struct nouveau_pushbuf *push = nv30->screen->base.pushbuf;
+ struct nouveau_pushbuf *push = nv30->base.pushbuf;
struct nouveau_object *eng3d = nv30->screen->eng3d;
struct nv30_vertprog *vp = nv30->vertprog.program;
struct vertex_info *vinfo = &r->vertex_info;
}
/* Allow the copies above to finish executing before freeing the source */
- nouveau_fence_work(nv30->screen->base.fence.current,
+ nouveau_fence_work(nv30->screen->base.fence.current, nv30->base.pushbuf,
nouveau_fence_unref_bo, tx->tmp.bo);
} else {
nouveau_bo_ref(NULL, &tx->tmp.bo);
}
static void
-nv30_screen_fence_emit(struct pipe_screen *pscreen, uint32_t *sequence)
+nv30_screen_fence_emit(struct nouveau_fence_list *fence, struct nouveau_pushbuf *push, uint32_t *sequence)
{
- struct nv30_screen *screen = nv30_screen(pscreen);
- struct nouveau_pushbuf *push = screen->base.pushbuf;
-
- *sequence = ++screen->base.fence.sequence;
+ *sequence = ++fence->sequence;
assert(PUSH_AVAIL(push) + push->rsvd_kick >= 3);
PUSH_DATA (push, NV30_3D_FENCE_OFFSET |
}
static uint32_t
-nv30_screen_fence_update(struct pipe_screen *pscreen)
+nv30_screen_fence_update(struct nouveau_fence_list *list)
{
- struct nv30_screen *screen = nv30_screen(pscreen);
+ struct nv30_screen *screen = list->data;
struct nv04_notify *fence = screen->fence->data;
return *(uint32_t *)((char *)screen->notify->map + fence->offset);
}
* _current_ one, and remove both.
*/
nouveau_fence_ref(screen->base.fence.current, ¤t);
- nouveau_fence_wait(current, NULL);
+ nouveau_fence_wait(current, screen->base.pushbuf, NULL);
nouveau_fence_ref(NULL, ¤t);
nouveau_fence_ref(NULL, &screen->base.fence.current);
}
if (ret)
FAIL_SCREEN_INIT("nv30_screen_init failed: %d\n", ret);
+ screen->base.fence.data = screen;
+
screen->base.vidmem_bindings |= PIPE_BIND_VERTEX_BUFFER;
screen->base.sysmem_bindings |= PIPE_BIND_VERTEX_BUFFER;
if (oclass == NV40_3D_CLASS) {
nouveau_pushbuf_kick(push, push->channel);
- nouveau_fence_new(&screen->base, &screen->base.fence.current);
+ nouveau_fence_new(&screen->base.fence, &screen->base.fence.current);
return &screen->base;
}
int i = ffs(vbo_user) - 1;
vbo_user &= ~(1 << i);
- nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer.resource));
+ nouveau_buffer_release_gpu_storage(nv30->base.pushbuf, nv04_resource(nv30->vtxbuf[i].buffer.resource));
}
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
nv50_compute_upload_input(struct nv50_context *nv50, const uint32_t *input)
{
struct nv50_screen *screen = nv50->screen;
- struct nouveau_pushbuf *push = screen->base.pushbuf;
+ struct nouveau_pushbuf *push = nv50->base.pushbuf;
unsigned size = align(nv50->compprog->parm_size, 0x4);
BEGIN_NV04(push, NV50_CP(USER_PARAM_COUNT), 1);
mm = nouveau_mm_allocate(screen->base.mm_GART, size, &bo, &offset);
assert(mm);
- nouveau_bo_map(bo, 0, screen->base.client);
+ nouveau_bo_map(bo, 0, nv50->base.client);
memcpy(bo->map + offset, input, size);
nouveau_bufctx_refn(nv50->bufctx, 0, bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
BEGIN_NV04(push, NV50_CP(USER_PARAM(0)), size / 4);
nouveau_pushbuf_data(push, bo, offset, size);
- nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, mm);
+ nouveau_fence_work(screen->base.fence.current, push, nouveau_mm_free_work, mm);
nouveau_bo_ref(NULL, &bo);
nouveau_bufctx_reset(nv50->bufctx, 0);
}
struct nv50_screen *screen = push->user_priv;
if (screen) {
- nouveau_fence_next(&screen->base);
- nouveau_fence_update(&screen->base, true);
+ nouveau_fence_next(&screen->base.fence, push);
+ nouveau_fence_update(&screen->base.fence, true);
if (screen->cur_ctx)
screen->cur_ctx->state.flushed = true;
}
nv50->base.pushbuf = screen->base.pushbuf;
nv50->base.client = screen->base.client;
- ret = nouveau_bufctx_new(screen->base.client, 2, &nv50->bufctx);
+ ret = nouveau_bufctx_new(nv50->base.client, 2, &nv50->bufctx);
if (!ret)
- ret = nouveau_bufctx_new(screen->base.client, NV50_BIND_3D_COUNT,
+ ret = nouveau_bufctx_new(nv50->base.client, NV50_BIND_3D_COUNT,
&nv50->bufctx_3d);
if (!ret)
- ret = nouveau_bufctx_new(screen->base.client, NV50_BIND_CP_COUNT,
+ ret = nouveau_bufctx_new(nv50->base.client, NV50_BIND_CP_COUNT,
&nv50->bufctx_cp);
if (ret)
goto out_err;
*/
nv50->state = screen->save_state;
screen->cur_ctx = nv50;
- nouveau_pushbuf_bufctx(screen->base.pushbuf, nv50->bufctx);
+ nouveau_pushbuf_bufctx(nv50->base.pushbuf, nv50->bufctx);
}
nv50->base.pushbuf->kick_notify = nv50_default_kick_notify;
struct nv50_miptree *mt = nv50_miptree(pt);
if (mt->base.fence && mt->base.fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
- nouveau_fence_work(mt->base.fence, nouveau_fence_unref_bo, mt->base.bo);
+ nouveau_fence_work(mt->base.fence, nouveau_screen(pscreen)->pushbuf, nouveau_fence_unref_bo, mt->base.bo);
else
nouveau_bo_ref(NULL, &mt->base.bo);
if (hq->state == NV50_HW_QUERY_STATE_READY)
nouveau_mm_free(hq->mm);
else
- nouveau_fence_work(screen->base.fence.current,
+ nouveau_fence_work(screen->base.fence.current, nv50->base.pushbuf,
nouveau_mm_free_work, hq->mm);
}
}
return false;
hq->offset = hq->base_offset;
- ret = nouveau_bo_map(hq->bo, 0, screen->base.client);
+ ret = nouveau_bo_map(hq->bo, 0, nv50->base.client);
if (ret) {
nv50_hw_query_allocate(nv50, q, 0);
return false;
}
return false;
}
- if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nv50->screen->base.client))
+ if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nv50->base.client))
return false;
}
hq->state = NV50_HW_QUERY_STATE_READY;
* _current_ one, and remove both.
*/
nouveau_fence_ref(screen->base.fence.current, ¤t);
- nouveau_fence_wait(current, NULL);
+ nouveau_fence_wait(current, screen->base.pushbuf, NULL);
nouveau_fence_ref(NULL, ¤t);
nouveau_fence_ref(NULL, &screen->base.fence.current);
}
}
static void
-nv50_screen_fence_emit(struct pipe_screen *pscreen, u32 *sequence)
+nv50_screen_fence_emit(struct nouveau_fence_list *fence, struct nouveau_pushbuf *push, u32 *sequence)
{
- struct nv50_screen *screen = nv50_screen(pscreen);
- struct nouveau_pushbuf *push = screen->base.pushbuf;
+ struct nouveau_bo *bo = fence->data;
/* we need to do it after possible flush in MARK_RING */
- *sequence = ++screen->base.fence.sequence;
+ *sequence = ++fence->sequence;
assert(PUSH_AVAIL(push) + push->rsvd_kick >= 5);
PUSH_DATA (push, NV50_FIFO_PKHDR(NV50_3D(QUERY_ADDRESS_HIGH), 4));
- PUSH_DATAh(push, screen->fence.bo->offset);
- PUSH_DATA (push, screen->fence.bo->offset);
+ PUSH_DATAh(push, bo->offset);
+ PUSH_DATA (push, bo->offset);
PUSH_DATA (push, *sequence);
PUSH_DATA (push, NV50_3D_QUERY_GET_MODE_WRITE_UNK0 |
NV50_3D_QUERY_GET_UNK4 |
}
static u32
-nv50_screen_fence_update(struct pipe_screen *pscreen)
+nv50_screen_fence_update(struct nouveau_fence_list *fence)
{
- return nv50_screen(pscreen)->fence.map[0];
+ struct nouveau_bo *bo = fence->data;
+ uint32_t *map = bo->map;
+ return map[0];
}
static void
NOUVEAU_ERR("Failed to allocate fence bo: %d\n", ret);
goto fail;
}
-
nouveau_bo_map(screen->fence.bo, 0, NULL);
- screen->fence.map = screen->fence.bo->map;
+ screen->base.fence.data = screen->fence.bo;
screen->base.fence.emit = nv50_screen_fence_emit;
screen->base.fence.update = nv50_screen_fence_update;
goto fail;
}
- nouveau_fence_new(&screen->base, &screen->base.fence.current);
+ nouveau_fence_new(&screen->base.fence, &screen->base.fence.current);
return &screen->base;
} tsc;
struct {
- uint32_t *map;
struct nouveau_bo *bo;
} fence;
if (usage & PIPE_TRANSFER_WRITE)
flags |= NOUVEAU_BO_WR;
- ret = nouveau_bo_map(tx->rect[1].bo, flags, screen->base.client);
+ ret = nouveau_bo_map(tx->rect[1].bo, flags, nv50->base.client);
if (ret) {
nouveau_bo_ref(NULL, &tx->rect[1].bo);
FREE(tx);
}
/* Allow the copies above to finish executing before freeing the source */
- nouveau_fence_work(nv50->screen->base.fence.current,
+ nouveau_fence_work(nv50->screen->base.fence.current, nv50->base.pushbuf,
nouveau_fence_unref_bo, tx->rect[1].bo);
} else {
nouveau_bo_ref(NULL, &tx->rect[1].bo);
* pushbuf submit, but it's probably not a big performance difference.
*/
if (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr))
- nouveau_fence_wait(buf->fence_wr, &nv50->base.debug);
+ nouveau_fence_wait(buf->fence_wr, push, &nv50->base.debug);
while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
{
struct nv50_screen *screen = chan->user_priv;
- nouveau_fence_update(&screen->base, true);
+ nouveau_fence_update(&screen->base.fence, true);
nv50_bufctx_fence(screen->cur_ctx->bufctx_3d, true);
}
unsigned flags)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
- struct nouveau_screen *screen = &nvc0->screen->base;
if (fence)
- nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);
+ nouveau_fence_ref(nvc0->base.fence.current, (struct nouveau_fence **)fence);
+// nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx);
+// nouveau_pushbuf_validate(nvc0->base.pushbuf);
PUSH_KICK(nvc0->base.pushbuf); /* fencing handled in kick_notify */
nouveau_context_update_frame_stats(&nvc0->base);
free(pos);
}
+ if (nvc0->base.fence.current) {
+ struct nouveau_fence *current = NULL;
+
+ /* nouveau_fence_wait will create a new current fence, so wait on the
+ * _current_ one, and remove both.
+ */
+ nouveau_fence_ref(nvc0->base.fence.current, ¤t);
+ nouveau_fence_wait(current, nvc0->base.pushbuf, NULL);
+ nouveau_fence_ref(NULL, ¤t);
+ nouveau_fence_ref(NULL, &nvc0->base.fence.current);
+ }
+ if (nvc0->base.pushbuf)
+ nvc0->base.pushbuf->user_priv = NULL;
+
+ nouveau_bo_ref(NULL, &nvc0->fence.bo);
+
nouveau_context_destroy(&nvc0->base);
}
void
nvc0_default_kick_notify(struct nouveau_pushbuf *push)
{
- struct nvc0_screen *screen = push->user_priv;
-
- if (screen) {
- nouveau_fence_next(&screen->base);
- nouveau_fence_update(&screen->base, true);
- if (screen->cur_ctx)
- screen->cur_ctx->state.flushed = true;
- NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
+ struct nvc0_context *nvc0 = push->user_priv;
+
+ if (nvc0) {
+ nouveau_fence_next(&nvc0->base.fence, push);
+ nouveau_fence_update(&nvc0->base.fence, true);
+ nvc0->state.flushed = true;
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, pushbuf_count, 1);
}
}
if (!nvc0_blitctx_create(nvc0))
goto out_err;
- nvc0->base.pushbuf = screen->base.pushbuf;
- nvc0->base.client = screen->base.client;
+ nvc0->screen = screen;
+ nvc0->base.screen = &screen->base;
+ nouveau_context_init(&nvc0->base);
- ret = nouveau_bufctx_new(screen->base.client, 2, &nvc0->bufctx);
+ ret = nouveau_bufctx_new(nvc0->base.client, 2, &nvc0->bufctx);
if (!ret)
- ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_3D_COUNT,
+ ret = nouveau_bufctx_new(nvc0->base.client, NVC0_BIND_3D_COUNT,
&nvc0->bufctx_3d);
if (!ret)
- ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_CP_COUNT,
+ ret = nouveau_bufctx_new(nvc0->base.client, NVC0_BIND_CP_COUNT,
&nvc0->bufctx_cp);
if (ret)
goto out_err;
- nvc0->screen = screen;
- nvc0->base.screen = &screen->base;
-
pipe->screen = pscreen;
pipe->priv = priv;
pipe->stream_uploader = u_upload_create_default(pipe);
pipe->get_sample_position = nvc0_context_get_sample_position;
pipe->emit_string_marker = nvc0_emit_string_marker;
- nouveau_context_init(&nvc0->base);
+ flags = NOUVEAU_BO_GART | NOUVEAU_BO_MAP;
+ if (screen->base.drm->version >= 0x01000202)
+ flags |= NOUVEAU_BO_COHERENT;
+
+ ret = nouveau_bo_new(screen->base.device, flags, 0, 4096, NULL, &nvc0->fence.bo);
+ if (ret)
+ goto out_err;
+ nouveau_bo_map(nvc0->fence.bo, 0, NULL);
+ nvc0->base.fence.data = nvc0->fence.bo;
+ nvc0->base.fence.emit = nvc0_screen_fence_emit;
+ nvc0->base.fence.update = nvc0_screen_fence_update;
+
+ nouveau_fence_new(&nvc0->base.fence, &nvc0->base.fence.current);
+
+ /* initialize the pushbuffer */
+ nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx);
+ nouveau_pushbuf_validate(nvc0->base.pushbuf);
+
nvc0_init_query_functions(nvc0);
nvc0_init_surface_functions(nvc0);
nvc0_init_state_functions(nvc0);
if (!screen->cur_ctx) {
nvc0->state = screen->save_state;
screen->cur_ctx = nvc0;
- nouveau_pushbuf_bufctx(screen->base.pushbuf, nvc0->bufctx);
}
- screen->base.pushbuf->kick_notify = nvc0_default_kick_notify;
+ nvc0->base.pushbuf->kick_notify = nvc0_default_kick_notify;
+ nvc0->base.pushbuf->user_priv = nvc0;
+ nvc0->base.pushbuf->rsvd_kick = 5;
/* add permanently resident buffers to bufctxts */
flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR;
- BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo);
- BCTX_REFN_bo(nvc0->bufctx, FENCE, flags, screen->fence.bo);
+ BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, nvc0->fence.bo);
+ BCTX_REFN_bo(nvc0->bufctx, FENCE, flags, nvc0->fence.bo);
if (screen->compute)
- BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->fence.bo);
+ BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, nvc0->fence.bo);
nvc0->base.scratch.bo_size = 2 << 20;
struct nvc0_screen *screen;
+ struct {
+ struct nouveau_bo *bo;
+ } fence;
+
void (*m2mf_copy_rect)(struct nvc0_context *,
const struct nv50_m2mf_rect *dst,
const struct nv50_m2mf_rect *src,
IMMED_NVC0(nvc0->base.pushbuf, NVC0_3D(SERIALIZE), 0);
if ((screen->text->size << 1) <= (1 << 23)) {
- ret = nvc0_screen_resize_text_area(screen, screen->text->size << 1);
+ ret = nvc0_screen_resize_text_area(screen, nvc0->base.pushbuf, screen->text->size << 1);
if (ret) {
NOUVEAU_ERR("Error allocating TEXT area: %d\n", ret);
return false;
if (hq->state == NVC0_HW_QUERY_STATE_READY)
nouveau_mm_free(hq->mm);
else
- nouveau_fence_work(screen->base.fence.current,
+ nouveau_fence_work(nvc0->base.fence.current, nvc0->base.pushbuf,
nouveau_mm_free_work, hq->mm);
}
}
return false;
hq->offset = hq->base_offset;
- ret = nouveau_bo_map(hq->bo, 0, screen->base.client);
+ ret = nouveau_bo_map(hq->bo, 0, nvc0->base.client);
if (ret) {
nvc0_hw_query_allocate(nvc0, q, 0);
return false;
break;
}
if (hq->is64bit)
- nouveau_fence_ref(nvc0->screen->base.fence.current, &hq->fence);
+ nouveau_fence_ref(nvc0->base.fence.current, &hq->fence);
}
static bool
return hq->funcs->get_query_result(nvc0, hq, wait, result);
if (hq->state != NVC0_HW_QUERY_STATE_READY)
- nvc0_hw_query_update(nvc0->screen->base.client, q);
+ nvc0_hw_query_update(nvc0->base.client, q);
if (hq->state != NVC0_HW_QUERY_STATE_READY) {
if (!wait) {
}
return false;
}
- if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->screen->base.client))
+ if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->base.client))
return false;
NOUVEAU_DRV_STAT(&nvc0->screen->base, query_sync_count, 1);
}
if (index == -1) {
/* TODO: Use a macro to write the availability of the query */
if (hq->state != NVC0_HW_QUERY_STATE_READY)
- nvc0_hw_query_update(nvc0->screen->base.client, q);
+ nvc0_hw_query_update(nvc0->base.client, q);
uint32_t ready[2] = {hq->state == NVC0_HW_QUERY_STATE_READY};
nvc0->base.push_cb(&nvc0->base, buf, offset,
result_type >= PIPE_QUERY_TYPE_I64 ? 2 : 1,
* of the following logic more complicated.
*/
if (hq->is64bit && hq->fence->state < NOUVEAU_FENCE_STATE_EMITTED)
- nouveau_fence_emit(hq->fence);
+ nouveau_fence_emit(hq->fence, push);
/* We either need to compute a 32- or 64-bit difference between 2 values,
* and then store the result as either a 32- or 64-bit value. As such let's
* outputs the difference (no need to worry about 64-bit clamping).
*/
if (hq->state != NVC0_HW_QUERY_STATE_READY)
- nvc0_hw_query_update(nvc0->screen->base.client, q);
+ nvc0_hw_query_update(nvc0->base.client, q);
if (wait && hq->state != NVC0_HW_QUERY_STATE_READY)
nvc0_hw_query_fifo_wait(nvc0, q);
/* ensure the query's fence has been emitted */
if (hq->is64bit && hq->fence->state < NOUVEAU_FENCE_STATE_EMITTED)
- nouveau_fence_emit(hq->fence);
+ nouveau_fence_emit(hq->fence, push);
PUSH_SPACE(push, 5);
PUSH_REFN (push, hq->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
* _current_ one, and remove both.
*/
nouveau_fence_ref(screen->base.fence.current, ¤t);
- nouveau_fence_wait(current, NULL);
+ nouveau_fence_wait(current, screen->base.pushbuf, NULL);
nouveau_fence_ref(NULL, ¤t);
nouveau_fence_ref(NULL, &screen->base.fence.current);
}
* are supposed to do */
}
-static void
-nvc0_screen_fence_emit(struct pipe_screen *pscreen, u32 *sequence)
+void
+nvc0_screen_fence_emit(struct nouveau_fence_list *list, struct nouveau_pushbuf *push, u32 *sequence)
{
- struct nvc0_screen *screen = nvc0_screen(pscreen);
- struct nouveau_pushbuf *push = screen->base.pushbuf;
+ struct nouveau_bo *bo = list->data;
/* we need to do it after possible flush in MARK_RING */
- *sequence = ++screen->base.fence.sequence;
+ *sequence = ++list->sequence;
assert(PUSH_AVAIL(push) + push->rsvd_kick >= 5);
PUSH_DATA (push, NVC0_FIFO_PKHDR_SQ(NVC0_3D(QUERY_ADDRESS_HIGH), 4));
- PUSH_DATAh(push, screen->fence.bo->offset);
- PUSH_DATA (push, screen->fence.bo->offset);
+ PUSH_DATAh(push, bo->offset);
+ PUSH_DATA (push, bo->offset);
PUSH_DATA (push, *sequence);
PUSH_DATA (push, NVC0_3D_QUERY_GET_FENCE | NVC0_3D_QUERY_GET_SHORT |
(0xf << NVC0_3D_QUERY_GET_UNIT__SHIFT));
}
-static u32
-nvc0_screen_fence_update(struct pipe_screen *pscreen)
+uint32_t
+nvc0_screen_fence_update(struct nouveau_fence_list *list)
{
- struct nvc0_screen *screen = nvc0_screen(pscreen);
- return screen->fence.map[0];
+ struct nouveau_bo *bo = list->data;
+ uint32_t *map = bo->map;
+ return map[0];
}
static int
}
int
-nvc0_screen_resize_text_area(struct nvc0_screen *screen, uint64_t size)
+nvc0_screen_resize_text_area(struct nvc0_screen *screen, struct nouveau_pushbuf *push, uint64_t size)
{
- struct nouveau_pushbuf *push = screen->base.pushbuf;
struct nouveau_bo *bo;
int ret;
}
void
-nvc0_screen_bind_cb_3d(struct nvc0_screen *screen, bool *can_serialize,
- int stage, int index, int size, uint64_t addr)
+nvc0_screen_bind_cb_3d(struct nvc0_screen *screen, struct nouveau_pushbuf *push,
+ bool *can_serialize, int stage, int index, int size,
+ uint64_t addr)
{
assert(stage != 5);
- struct nouveau_pushbuf *push = screen->base.pushbuf;
-
if (screen->base.class_3d >= GM107_3D_CLASS) {
struct nvc0_cb_binding *binding = &screen->cb_bindings[stage][index];
if (ret)
FAIL_SCREEN_INIT("Error allocating fence BO: %d\n", ret);
nouveau_bo_map(screen->fence.bo, 0, NULL);
- screen->fence.map = screen->fence.bo->map;
+ screen->base.fence.data = screen->fence.bo;
screen->base.fence.emit = nvc0_screen_fence_emit;
screen->base.fence.update = nvc0_screen_fence_update;
-
ret = nouveau_object_new(chan, (dev->chipset < 0xe0) ? 0x1f906e : 0x906e,
NVIF_CLASS_SW_GF100, NULL, 0, &screen->nvsw);
if (ret)
nvc0_magic_3d_init(push, screen->eng3d->oclass);
- ret = nvc0_screen_resize_text_area(screen, 1 << 19);
+ ret = nvc0_screen_resize_text_area(screen, push, 1 << 19);
if (ret)
FAIL_SCREEN_INIT("Error allocating TEXT area: %d\n", ret);
/* TIC and TSC entries for each unit (nve4+ only) */
/* auxiliary constants (6 user clip planes, base instance id) */
- nvc0_screen_bind_cb_3d(screen, NULL, i, 15, NVC0_CB_AUX_SIZE,
+ nvc0_screen_bind_cb_3d(screen, push, NULL, i, 15, NVC0_CB_AUX_SIZE,
screen->uniform_bo->offset + NVC0_CB_AUX_INFO(i));
if (screen->eng3d->oclass >= NVE4_3D_CLASS) {
unsigned j;
if (!nvc0_blitter_create(screen))
goto fail;
- nouveau_fence_new(&screen->base, &screen->base.fence.current);
+ nouveau_fence_new(&screen->base.fence, &screen->base.fence.current);
return &screen->base;
struct {
struct nouveau_bo *bo;
- uint32_t *map;
} fence;
struct {
int nve4_screen_compute_setup(struct nvc0_screen *, struct nouveau_pushbuf *);
int nvc0_screen_compute_setup(struct nvc0_screen *, struct nouveau_pushbuf *);
-int nvc0_screen_resize_text_area(struct nvc0_screen *, uint64_t);
+int nvc0_screen_resize_text_area(struct nvc0_screen *, struct nouveau_pushbuf *, uint64_t);
// 3D Only
-void nvc0_screen_bind_cb_3d(struct nvc0_screen *, bool *, int, int, int, uint64_t);
+void nvc0_screen_bind_cb_3d(struct nvc0_screen *, struct nouveau_pushbuf *push,
+ bool *, int, int, int, uint64_t);
static inline void
nvc0_resource_fence(struct nv04_resource *res, uint32_t flags)
}
}
+void
+nvc0_screen_fence_emit(struct nouveau_fence_list *, struct nouveau_pushbuf *, u32 *sequence);
+
+uint32_t
+nvc0_screen_fence_update(struct nouveau_fence_list *);
+
#endif
if (!nvc0->state.uniform_buffer_bound[s]) {
nvc0->state.uniform_buffer_bound[s] = true;
- nvc0_screen_bind_cb_3d(nvc0->screen, &can_serialize, s, i,
+ nvc0_screen_bind_cb_3d(nvc0->screen, nvc0->base.pushbuf, &can_serialize, s, i,
NVC0_MAX_CONSTBUF_SIZE, bo->offset + base);
}
nvc0_cb_bo_push(&nvc0->base, bo, NV_VRAM_DOMAIN(&nvc0->screen->base),
struct nv04_resource *res =
nv04_resource(nvc0->constbuf[s][i].u.buf);
if (res) {
- nvc0_screen_bind_cb_3d(nvc0->screen, &can_serialize, s, i,
+ nvc0_screen_bind_cb_3d(nvc0->screen, nvc0->base.pushbuf, &can_serialize, s, i,
nvc0->constbuf[s][i].size,
res->address + nvc0->constbuf[s][i].offset);
if (i == 0)
nvc0->state.uniform_buffer_bound[s] = false;
} else if (i != 0) {
- nvc0_screen_bind_cb_3d(nvc0->screen, &can_serialize, s, i, -1, 0);
+ nvc0_screen_bind_cb_3d(nvc0->screen, nvc0->base.pushbuf, &can_serialize, s, i, -1, 0);
}
}
}
int i;
for (i = 0; i < 5; ++i)
- nvc0_screen_bind_cb_3d(screen, NULL, i, 15, NVC0_CB_AUX_SIZE,
+ nvc0_screen_bind_cb_3d(screen, nvc0->base.pushbuf, NULL, i, 15, NVC0_CB_AUX_SIZE,
screen->uniform_bo->offset + NVC0_CB_AUX_INFO(i));
nvc0->dirty_cp |= NVC0_NEW_CP_DRIVERCONST;
return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client);
}
if (usage & PIPE_TRANSFER_WRITE)
- return !mt->base.fence || nouveau_fence_wait(mt->base.fence, &nvc0->base.debug);
- return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
+ return !mt->base.fence || nouveau_fence_wait(mt->base.fence, nvc0->base.pushbuf, &nvc0->base.debug);
+ return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, nvc0->base.pushbuf, &nvc0->base.debug);
}
void *
if (usage & PIPE_TRANSFER_WRITE)
flags |= NOUVEAU_BO_WR;
- ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client);
+ ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->base.client);
if (ret) {
pipe_resource_reference(&tx->base.resource, NULL);
nouveau_bo_ref(NULL, &tx->rect[1].bo);
NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_wr, 1);
/* Allow the copies above to finish executing before freeing the source */
- nouveau_fence_work(nvc0->screen->base.fence.current,
+ nouveau_fence_work(nvc0->base.fence.current, nvc0->base.pushbuf,
nouveau_fence_unref_bo, tx->rect[1].bo);
} else {
nouveau_bo_ref(NULL, &tx->rect[1].bo);
static void
nvc0_draw_vbo_kick_notify(struct nouveau_pushbuf *push)
{
- struct nvc0_screen *screen = push->user_priv;
+ struct nvc0_context *nvc0 = push->user_priv;
- nouveau_fence_update(&screen->base, true);
+ nouveau_fence_update(&nvc0->base.fence, true);
- NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, pushbuf_count, 1);
}
static void
include $(CLEAR_VARS)
LOCAL_MODULE := gallium_dri
-
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
LOCAL_MODULE_RELATIVE_PATH := $(MESA_DRI_MODULE_REL_PATH)
LOCAL_SRC_FILES := target.c
-
+LOCAL_C_INCLUDES := $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_util,,)
LOCAL_CFLAGS :=
# We need --undefined-version as some functions in dri.sym may be missing
global:
__driDriverExtensions;
__driDriverGetExtensions*;
+ load_pipe_screen;
nouveau_drm_screen_create;
radeon_drm_winsys_create;
amdgpu_winsys_create;
screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (screen) {
screen->refcount++;
+ fprintf(stdout, "reusing nouveau screen %p\n", screen);
mtx_unlock(&nouveau_screen_mutex);
return &screen->base;
}
*/
util_hash_table_set(fd_tab, intptr_to_pointer(dupfd), screen);
screen->refcount = 1;
+ fprintf(stdout, "creating new nouveau screen %p\n", screen);
mtx_unlock(&nouveau_screen_mutex);
return &screen->base;
static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
struct virgl_hw_res *res)
{
+ if (!res) return false;
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
int i;
struct virgl_drm_cmd_buf *cbuf,
struct virgl_hw_res *res)
{
+ if (!res) return;
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
if (cbuf->cres >= cbuf->nres) {
intermediates := $(call local-generated-sources-dir)
-LOCAL_C_INCLUDES := $(MESA_TOP)/include/drm-uapi
+LOCAL_C_INCLUDES := \
+ $(MESA_TOP)/include/drm-uapi \
+ $(MESA_TOP)/src/gallium/include
LOCAL_SRC_FILES := $(GEN_PERF_FILES)
ahw_usage |= AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
if (vk_create & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
- ahw_usage |= AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP;
+ ahw_usage |= 1UL << 25;
if (vk_create & VK_IMAGE_CREATE_PROTECTED_BIT)
ahw_usage |= AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_CUBE_MAP:
+ case TEXTURE_EXTERNAL_BIT:
ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_NORMAL);
break;
case GL_TEXTURE_RECTANGLE:
ctx->TextureFormatSupported[MESA_FORMAT_B4G4R4A4_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_B5G5R5A1_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_B5G6R5_UNORM] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_R8G8B8X8_UNORM] = true;
ctx->TextureFormatSupported[MESA_FORMAT_L_UNORM8] = true;
if (intel->gen == 3)
ctx->TextureFormatSupported[MESA_FORMAT_A_UNORM8] = true;
case TEXTURE_1D_INDEX:
return D0_SAMPLE_TYPE_2D;
case TEXTURE_2D_INDEX:
+ case TEXTURE_EXTERNAL_INDEX:
return D0_SAMPLE_TYPE_2D;
case TEXTURE_RECT_INDEX:
return D0_SAMPLE_TYPE_2D;
switch (cap) {
case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
break;
case GL_LIGHTING:
break;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
case GL_TEXTURE_RECTANGLE_ARB:
i915_miptree_layout_2d(mt);
break;
break;
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
+ case GL_TEXTURE_EXTERNAL_OES:
case GL_TEXTURE_RECTANGLE_ARB:
i945_miptree_layout_2d(mt);
break;
return MAPSURF_32BIT | MT_32BIT_ARGB8888;
case MESA_FORMAT_B8G8R8X8_UNORM:
return MAPSURF_32BIT | MT_32BIT_XRGB8888;
+ case MESA_FORMAT_R8G8B8X8_UNORM:
+ return MAPSURF_32BIT | MT_32BIT_XBGR8888;
case MESA_FORMAT_R8G8B8A8_UNORM:
return MAPSURF_32BIT | MT_32BIT_ABGR8888;
case MESA_FORMAT_YCBCR_REV:
case GL_TEXTURE_2D:
case GL_TEXTURE_CUBE_MAP:
case GL_TEXTURE_3D:
+ case GL_TEXTURE_EXTERNAL_OES:
ok = i915_update_tex_unit(intel, i, SS3_NORMALIZED_COORDS);
break;
case GL_TEXTURE_RECTANGLE:
{
[MESA_FORMAT_B8G8R8A8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B8G8R8X8_UNORM] = DV_PF_8888,
+ [MESA_FORMAT_R8G8B8X8_UNORM] = DV_PF_8888,
[MESA_FORMAT_B5G6R5_UNORM] = DV_PF_565 | DITHER_FULL_ALWAYS,
[MESA_FORMAT_B5G5R5A1_UNORM] = DV_PF_1555 | DITHER_FULL_ALWAYS,
[MESA_FORMAT_B4G4R4A4_UNORM] = DV_PF_4444 | DITHER_FULL_ALWAYS,
ctx->Extensions.TDFX_texture_compression_FXT1 = true;
ctx->Extensions.OES_EGL_image = true;
ctx->Extensions.OES_draw_texture = true;
+ ctx->Extensions.OES_EGL_image_external = true;
ctx->Const.GLSLVersion = 120;
ctx->Const.GLSLVersionCompat = 120;
{
struct intel_miptree_map *map;
+ if (!mt) {
+ *out_ptr = NULL;
+ *out_stride = 0;
+ return;
+ }
+
map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
if (!map) {
*out_ptr = NULL;
unsigned int level,
unsigned int slice)
{
+ if (!mt) return;
struct intel_miptree_map *map = mt->level[level].slice[slice].map;
if (!map)
libmesa_isl \
libmesa_blorp \
libmesa_intel_compiler \
- libmesa_intel_perf
+ libmesa_intel_perf \
+ libmesa_genxml
ifeq ($(ARCH_X86_HAVE_SSE4_1),true)
LOCAL_CFLAGS += \
$(MESA_GEN_NIR_H)
LOCAL_MODULE_CLASS := SHARED_LIBRARIES
-
+LOCAL_C_INCLUDES += $(call generated-sources-dir-for,STATIC_LIBRARIES,libmesa_util,,)
intermediates := $(call local-generated-sources-dir)
LOCAL_GENERATED_SOURCES += $(addprefix $(intermediates)/, \
* during resolves because the resolve operations only know about the
* miptree and not the renderbuffer.
*/
+ if (!irb->mt) return;
if (irb->Base.Base.Format != irb->mt->format)
can_fast_clear = false;
return false;
for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
- const struct intel_renderbuffer *irb =
+ struct intel_renderbuffer *irb =
intel_renderbuffer(fb->_ColorDrawBuffers[i]);
- if (irb && irb->mt->bo == tex_mt->bo &&
+ if (irb && irb->mt && irb->mt->bo == tex_mt->bo &&
irb->mt_level >= min_level &&
irb->mt_level < min_level + num_levels) {
found = draw_aux_buffer_disabled[i] = true;
/* Resolve color for each active shader image. */
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
+ struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
if (unlikely(prog && prog->info.num_images)) {
for (unsigned j = 0; j < prog->info.num_images; j++) {
assert(brw->screen->devinfo.gen < 9);
for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
- const struct intel_renderbuffer *irb =
+ struct intel_renderbuffer *irb =
intel_renderbuffer(fb->_ColorDrawBuffers[i]);
if (irb) {
front_irb->need_downsample = true;
if (back_irb)
back_irb->need_downsample = true;
- if (depth_irb) {
+ if (depth_irb && depth_irb->mt) {
bool depth_written = brw_depth_writes_enabled(brw);
if (depth_att->Layered) {
intel_miptree_finish_depth(brw, depth_irb->mt,
brw_depth_cache_add_bo(brw, depth_irb->mt->bo);
}
- if (stencil_irb && brw->stencil_write_enabled) {
- struct intel_mipmap_tree *stencil_mt =
- stencil_irb->mt->stencil_mt != NULL ?
- stencil_irb->mt->stencil_mt : stencil_irb->mt;
- brw_depth_cache_add_bo(brw, stencil_mt->bo);
- intel_miptree_finish_write(brw, stencil_mt, stencil_irb->mt_level,
- stencil_irb->mt_layer,
- stencil_irb->layer_count, ISL_AUX_USAGE_NONE);
+ if (stencil_irb && stencil_irb->mt) {
+ if (brw->stencil_write_enabled) {
+ struct intel_mipmap_tree *stencil_mt =
+ stencil_irb->mt->stencil_mt != NULL ?
+ stencil_irb->mt->stencil_mt : stencil_irb->mt;
+ brw_depth_cache_add_bo(brw, stencil_mt->bo);
+ intel_miptree_finish_write(brw, stencil_mt, stencil_irb->mt_level,
+ stencil_irb->mt_layer,
+ stencil_irb->layer_count, ISL_AUX_USAGE_NONE);
+ }
}
for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
struct intel_renderbuffer *irb =
intel_renderbuffer(fb->_ColorDrawBuffers[i]);
- if (!irb)
+ if (!irb || !irb->mt)
continue;
mesa_format mesa_format =
static struct intel_mipmap_tree *
get_stencil_miptree(struct intel_renderbuffer *irb)
{
- if (!irb)
+ if (!irb || !irb->mt)
return NULL;
if (irb->mt->stencil_mt)
return irb->mt->stencil_mt;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct intel_mipmap_tree *mt = irb->mt;
+ if (!mt) return 0;
assert(brw_render_target_supported(brw, rb));
mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
uint32_t tile_x, tile_y;
enum isl_format format;
uint32_t offset;
+
+ if (!mt) return 0;
/* _NEW_BUFFERS */
mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
/* BRW_NEW_FS_PROG_DATA */
bool
intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb)
{
- return intel_miptree_level_has_hiz(irb->mt, irb->mt_level);
+ if (!irb->mt)
+ return false;
+ else
+ return intel_miptree_level_has_hiz(irb->mt, irb->mt_level);
}
void
const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct intel_miptree_map *map;
+ if (!mt) {
+ *out_ptr = NULL;
+ *out_stride = 0;
+ return;
+ }
assert(mt->surf.samples == 1);
map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
unsigned int level,
unsigned int slice)
{
+ if (!mt) return;
struct intel_miptree_map *map = mt->level[level].slice[slice].map;
assert(mt->surf.samples == 1);
EXT(APPLE_object_purgeable , APPLE_object_purgeable , GLL, GLC, x , x , 2006)
EXT(APPLE_packed_pixels , dummy_true , GLL, x , x , x , 2002)
+EXT(APPLE_texture_2D_limited_npot , ARB_texture_non_power_of_two , x , x , ES1, x , 2011)
EXT(APPLE_texture_max_level , dummy_true , x , x , ES1, ES2, 2009)
EXT(ARB_ES2_compatibility , ARB_ES2_compatibility , GLL, GLC, x , x , 2009)
EXT(ARB_texture_mirror_clamp_to_edge , ARB_texture_mirror_clamp_to_edge , GLL, GLC, x , x , 2013)
EXT(ARB_texture_mirrored_repeat , dummy_true , GLL, x , x , x , 2001)
EXT(ARB_texture_multisample , ARB_texture_multisample , GLL, GLC, x , x , 2009)
-EXT(ARB_texture_non_power_of_two , ARB_texture_non_power_of_two , GLL, GLC, x , x , 2003)
+EXT(ARB_texture_non_power_of_two , ARB_texture_non_power_of_two , GLL, GLC, ES1, x , 2003)
EXT(ARB_texture_query_levels , ARB_texture_query_levels , GLL, GLC, x , x , 2012)
EXT(ARB_texture_query_lod , ARB_texture_query_lod , GLL, GLC, x , x , 2009)
EXT(ARB_texture_rectangle , NV_texture_rectangle , GLL, GLC, x , x , 2004)