return ret; \
}
+#define DEFINE_AND_VALIDATE_HOST_CONNECTION_FOR_TLS(ret, tls) \
+ HostConnection *hostCon = HostConnection::getWithThreadInfo(tls); \
+ if (!hostCon) { \
+ ALOGE("egl: Failed to get host connection\n"); \
+ return ret; \
+ } \
+ ExtendedRCEncoderContext *rcEnc = hostCon->rcEncoder(); \
+ if (!rcEnc) { \
+ ALOGE("egl: Failed to get renderControl encoder context\n"); \
+ return ret; \
+ }
+
#define VALIDATE_CONTEXT_RETURN(context,ret) \
if (!(context)) { \
RETURN_ERROR(ret,EGL_BAD_CONTEXT); \
return eglWaitGL();
}
-EGLBoolean eglReleaseThread()
-{
- EGLThreadInfo *tInfo = getEGLThreadInfo();
- if (tInfo) {
- tInfo->eglError = EGL_SUCCESS;
- EGLContext_t* context = tInfo->currentContext;
- if (context) {
- // The following code is doing pretty much the same thing as
- // eglMakeCurrent(&s_display, EGL_NO_CONTEXT, EGL_NO_SURFACE, EGL_NO_SURFACE)
- // with the only issue that we do not require a valid display here.
- DEFINE_AND_VALIDATE_HOST_CONNECTION(EGL_FALSE);
- rcEnc->rcMakeCurrent(rcEnc, 0, 0, 0);
- if (context->majorVersion > 1) {
- hostCon->gl2Encoder()->setClientState(NULL);
- hostCon->gl2Encoder()->setSharedGroup(GLSharedGroupPtr());
- }
- else {
- hostCon->glEncoder()->setClientState(NULL);
- hostCon->glEncoder()->setSharedGroup(GLSharedGroupPtr());
- }
- context->flags &= ~EGLContext_t::IS_CURRENT;
+// We may need to trigger this directly from the TLS destructor.
+static EGLBoolean s_eglReleaseThreadImpl(EGLThreadInfo* tInfo) {
+ if (!tInfo) return EGL_TRUE;
- if (context->deletePending) {
- if (context->rcContext) {
- rcEnc->rcDestroyContext(rcEnc, context->rcContext);
- context->rcContext = 0;
- }
- delete context;
- }
- tInfo->currentContext = 0;
+ tInfo->eglError = EGL_SUCCESS;
+ EGLContext_t* context = tInfo->currentContext;
+
+ if (!context) return EGL_TRUE;
+
+ // The following code is doing pretty much the same thing as
+ // eglMakeCurrent(&s_display, EGL_NO_CONTEXT, EGL_NO_SURFACE, EGL_NO_SURFACE)
+ // with the only issue that we do not require a valid display here.
+ DEFINE_AND_VALIDATE_HOST_CONNECTION_FOR_TLS(EGL_FALSE, tInfo);
+ // We are going to call makeCurrent on the null context and surface
+ // anyway once we are on the host, so skip rcMakeCurrent here.
+ // rcEnc->rcMakeCurrent(rcEnc, 0, 0, 0);
+ context->flags &= ~EGLContext_t::IS_CURRENT;
+ if (context->deletePending) {
+ if (context->rcContext) {
+ rcEnc->rcDestroyContext(rcEnc, context->rcContext);
+ context->rcContext = 0;
}
+ delete context;
}
+ tInfo->currentContext = 0;
+
return EGL_TRUE;
}
+EGLBoolean eglReleaseThread()
+{
+ return s_eglReleaseThreadImpl(getEGLThreadInfo());
+}
+
EGLSurface eglCreatePbufferFromClientBuffer(EGLDisplay dpy, EGLenum buftype, EGLClientBuffer buffer, EGLConfig config, const EGLint *attrib_list)
{
//TODO
VALIDATE_SURFACE_RETURN(draw, EGL_FALSE);
VALIDATE_SURFACE_RETURN(read, EGL_FALSE);
+ // Only place to initialize the TLS destructor; any
+ // thread can suddenly jump in any eglMakeCurrent
+ setTlsDestructor((tlsDtorCallback)s_eglReleaseThreadImpl);
+
if ((read == EGL_NO_SURFACE && draw == EGL_NO_SURFACE) && (ctx != EGL_NO_CONTEXT))
setErrorReturn(EGL_BAD_MATCH, EGL_FALSE);
if ((read != EGL_NO_SURFACE || draw != EGL_NO_SURFACE) && (ctx == EGL_NO_CONTEXT))
}
if (context && (context->flags & EGLContext_t::IS_CURRENT) && (context != tInfo->currentContext)) {
- //context is current to another thread
+ // context is current to another thread
+ ALOGE("%s: error: EGL_BAD_ACCESS: context %p current to another thread!\n", __FUNCTION__, context);
setErrorReturn(EGL_BAD_ACCESS, EGL_FALSE);
}
//Now make the local bind
if (context) {
- ALOGD("%s: %p: ver %d %d", __FUNCTION__, context, context->majorVersion, context->minorVersion);
+ ALOGD("%s: %p: ver %d %d (tinfo %p)", __FUNCTION__, context, context->majorVersion, context->minorVersion, tInfo);
// This is a nontrivial context.
// The thread cannot be gralloc-only anymore.
hostCon->setGrallocOnly(false);
#include <cutils/log.h>
#include <cutils/properties.h>
+#include <set>
+#include <string>
+#include <sstream>
+
/* Set to 1 or 2 to enable debug traces */
#define DEBUG 0
static void fallback_init(void); // forward
-
typedef struct _alloc_list_node {
buffer_handle_t handle;
_alloc_list_node *next;
_alloc_list_node *prev;
} AllocListNode;
+struct MemRegionInfo {
+ void* ashmemBase;
+ mutable uint32_t refCount;
+};
+
+struct MemRegionInfoCmp {
+ bool operator()(const MemRegionInfo& a, const MemRegionInfo& b) const {
+ return a.ashmemBase < b.ashmemBase;
+ }
+};
+
+typedef std::set<MemRegionInfo, MemRegionInfoCmp> MemRegionSet;
+typedef MemRegionSet::iterator mem_region_handle_t;
+
//
// Our gralloc device structure (alloc interface)
//
alloc_device_t device;
AllocListNode *allocListHead; // double linked list of allocated buffers
+ MemRegionSet ashmemRegions; // to track allocations of each ashmem region
pthread_mutex_t lock;
};
+struct gralloc_memregions_t {
+ MemRegionSet ashmemRegions;
+};
+
+#define INITIAL_DMA_REGION_SIZE 4096
+struct gralloc_dmaregion_t {
+ goldfish_dma_context goldfish_dma;
+ uint32_t sz;
+ uint32_t refcount;
+ pthread_mutex_t lock;
+};
+
+// global device instance
+static gralloc_memregions_t* s_grdev = NULL;
+static gralloc_dmaregion_t* s_grdma = NULL;
+
+void init_gralloc_memregions() {
+ if (s_grdev) return;
+ s_grdev = new gralloc_memregions_t;
+}
+
+void init_gralloc_dmaregion() {
+ D("%s: call\n", __FUNCTION__);
+ if (s_grdma) return;
+
+ s_grdma = new gralloc_dmaregion_t;
+ s_grdma->sz = INITIAL_DMA_REGION_SIZE;
+ s_grdma->refcount = 0;
+
+ pthread_mutex_init(&s_grdma->lock, NULL);
+ pthread_mutex_lock(&s_grdma->lock);
+ goldfish_dma_create_region(s_grdma->sz, &s_grdma->goldfish_dma);
+ pthread_mutex_unlock(&s_grdma->lock);
+}
+
+void get_gralloc_dmaregion() {
+ if (!s_grdma) return;
+ pthread_mutex_lock(&s_grdma->lock);
+ s_grdma->refcount++;
+ D("%s: call. refcount: %u\n", __FUNCTION__, s_grdma->refcount);
+ pthread_mutex_unlock(&s_grdma->lock);
+}
+
+static void resize_gralloc_dmaregion_locked(uint32_t new_sz) {
+ if (!s_grdma) return;
+ if (s_grdma->goldfish_dma.mapped) {
+ goldfish_dma_unmap(&s_grdma->goldfish_dma);
+ }
+ close(s_grdma->goldfish_dma.fd);
+ goldfish_dma_create_region(new_sz, &s_grdma->goldfish_dma);
+ s_grdma->sz = new_sz;
+}
+
+bool put_gralloc_dmaregion() {
+ if (!s_grdma) return false;
+ pthread_mutex_lock(&s_grdma->lock);
+ D("%s: call. refcount before: %u\n", __FUNCTION__, s_grdma->refcount);
+ s_grdma->refcount--;
+ bool shouldDelete = !s_grdma->refcount;
+ if (shouldDelete) {
+ D("%s: should delete!\n", __FUNCTION__);
+ resize_gralloc_dmaregion_locked(INITIAL_DMA_REGION_SIZE);
+ D("%s: done\n", __FUNCTION__);
+ }
+ pthread_mutex_unlock(&s_grdma->lock);
+ D("%s: exit\n", __FUNCTION__);
+ return shouldDelete;
+}
+
+void gralloc_dmaregion_register_ashmem(uint32_t sz) {
+ if (!s_grdma) return;
+ pthread_mutex_lock(&s_grdma->lock);
+ D("%s: for sz %u, refcount %u", __FUNCTION__, sz, s_grdma->refcount);
+ uint32_t new_sz = std::max(s_grdma->sz, sz);
+ if (new_sz != s_grdma->sz) {
+ D("%s: change sz from %u to %u", __FUNCTION__, s_grdma->sz, sz);
+ resize_gralloc_dmaregion_locked(new_sz);
+ }
+ if (!s_grdma->goldfish_dma.mapped) {
+ goldfish_dma_map(&s_grdma->goldfish_dma);
+ }
+ pthread_mutex_unlock(&s_grdma->lock);
+}
+
+void get_mem_region(void* ashmemBase) {
+ init_gralloc_memregions();
+ D("%s: call for %p", __FUNCTION__, ashmemBase);
+ MemRegionInfo lookup;
+ lookup.ashmemBase = ashmemBase;
+ mem_region_handle_t handle = s_grdev->ashmemRegions.find(lookup);
+ if (handle == s_grdev->ashmemRegions.end()) {
+ MemRegionInfo newRegion;
+ newRegion.ashmemBase = ashmemBase;
+ newRegion.refCount = 1;
+ s_grdev->ashmemRegions.insert(newRegion);
+ } else {
+ handle->refCount++;
+ }
+}
+
+bool put_mem_region(void* ashmemBase) {
+ init_gralloc_memregions();
+ D("%s: call for %p", __FUNCTION__, ashmemBase);
+ MemRegionInfo lookup;
+ lookup.ashmemBase = ashmemBase;
+ mem_region_handle_t handle = s_grdev->ashmemRegions.find(lookup);
+ if (handle == s_grdev->ashmemRegions.end()) {
+ ALOGE("%s: error: tried to put nonexistent mem region!", __FUNCTION__);
+ return true;
+ } else {
+ handle->refCount--;
+ bool shouldRemove = !handle->refCount;
+ if (shouldRemove) {
+ s_grdev->ashmemRegions.erase(lookup);
+ }
+ return shouldRemove;
+ }
+}
+
+void dump_regions() {
+ init_gralloc_memregions();
+ mem_region_handle_t curr = s_grdev->ashmemRegions.begin();
+ std::stringstream res;
+ for (; curr != s_grdev->ashmemRegions.end(); curr++) {
+ res << "\tashmem base " << curr->ashmemBase << " refcount " << curr->refCount << "\n";
+ }
+ ALOGD("ashmem region dump [\n%s]", res.str().c_str());
+}
+
+#if DEBUG
+
+#define GET_ASHMEM_REGION(cb) \
+ dump_regions(); \
+ get_mem_region((void*)cb->ashmemBase); \
+ dump_regions(); \
+
+#define PUT_ASHMEM_REGION(cb) \
+ dump_regions(); \
+ bool SHOULD_UNMAP = put_mem_region((void*)cb->ashmemBase); \
+ dump_regions(); \
+
+#else
+
+#define GET_ASHMEM_REGION(cb) \
+ get_mem_region((void*)cb->ashmemBase); \
+
+#define PUT_ASHMEM_REGION(cb) \
+ bool SHOULD_UNMAP = put_mem_region((void*)cb->ashmemBase); \
+
+#endif
+
//
// Our framebuffer device structure
//
cb->ashmemBase = intptr_t(addr);
cb->ashmemBasePid = getpid();
+ D("%s: %p mapped ashmem base %p size %d\n", __FUNCTION__,
+ cb, cb->ashmemBase, cb->ashmemSize);
*vaddr = addr;
return 0;
char* convertedBuf = NULL;
if ((doLocked && is_rgb_format) ||
- (cb->goldfish_dma.fd < 0 &&
+ (!s_grdma &&
(doLocked || !is_rgb_format))) {
convertedBuf = new char[rgbSz];
to_send = convertedBuf;
width, height, top, left, bpp);
}
- if (cb->goldfish_dma.fd > 0) {
+ if (s_grdma) {
if (cb->frameworkFormat == HAL_PIXEL_FORMAT_YV12) {
get_yv12_offsets(width, height, NULL, NULL,
&send_buffer_size);
&send_buffer_size);
}
- rcEnc->bindDmaContext(&cb->goldfish_dma);
+ rcEnc->bindDmaContext(&s_grdma->goldfish_dma);
D("%s: call. dma update with sz=%u", __FUNCTION__, send_buffer_size);
+ pthread_mutex_lock(&s_grdma->lock);
rcEnc->rcUpdateColorBufferDMA(rcEnc, cb->hostHandle,
left, top, width, height,
cb->glFormat, cb->glType,
to_send, send_buffer_size);
+ pthread_mutex_unlock(&s_grdma->lock);
} else {
if (cb->frameworkFormat == HAL_PIXEL_FORMAT_YV12) {
yv12_to_rgb888(to_send, pixels,
selectedEmuFrameworkFormat = FRAMEWORK_FORMAT_YV12;
break;
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- ALOGD("%s: 420_888 format experimental path. "
- "Initialize rgb565 gl format\n", __FUNCTION__);
align = 1;
bpp = 1; // per-channel bpp
yuv_format = true;
if (rcEnc->getDmaVersion() > 0) {
D("%s: creating goldfish dma region of size %lu (cb fd %d)\n", __FUNCTION__, ashmem_size, cb->fd);
- err = goldfish_dma_create_region(ashmem_size, &cb->goldfish_dma);
- if (err) {
- ALOGE("%s: Failed to create goldfish DMA region", __FUNCTION__);
- } else {
- goldfish_dma_map(&cb->goldfish_dma);
- cb->setDmaFd(cb->goldfish_dma.fd);
- D("%s: done, cbfd %d dmafd1 %d dmafd2 %d", __FUNCTION__, cb->fd, cb->goldfish_dma.fd, cb->dmafd);
- }
+ init_gralloc_dmaregion();
+ get_gralloc_dmaregion();
} else {
cb->goldfish_dma.fd = -1;
}
GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_SW_READ_MASK) ) {
#endif // PLATFORM_SDK_VERSION
if (hostCon && rcEnc) {
- if (cb->goldfish_dma.fd > 0) {
+ if (s_grdma) {
cb->hostHandle = rcEnc->rcCreateColorBufferDMA(rcEnc, w, h, glFormat, cb->emuFrameworkFormat);
} else {
cb->hostHandle = rcEnc->rcCreateColorBuffer(rcEnc, w, h, glFormat);
pthread_mutex_unlock(&grdev->lock);
*pHandle = cb;
+ D("%s: alloc succeded, new ashmem base and size: %p %d handle: %p",
+ __FUNCTION__, cb->ashmemBase, cb->ashmemSize, cb);
switch (frameworkFormat) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
*pStride = 0;
static int gralloc_free(alloc_device_t* dev,
buffer_handle_t handle)
{
- D("%s: start", __FUNCTION__);
cb_handle_t *cb = (cb_handle_t *)handle;
if (!cb_handle_t::validate((cb_handle_t*)cb)) {
ERR("gralloc_free: invalid handle");
return -EINVAL;
}
- if (cb->hostHandle != 0) {
+ D("%s: for buf %p ptr %p size %d\n",
+ __FUNCTION__, handle, cb->ashmemBase, cb->ashmemSize);
+
+ if (cb->hostHandle) {
DEFINE_AND_VALIDATE_HOST_CONNECTION;
D("Closing host ColorBuffer 0x%x\n", cb->hostHandle);
rcEnc->rcCloseColorBuffer(rcEnc, cb->hostHandle);
//
if (cb->fd > 0) {
if (cb->ashmemSize > 0 && cb->ashmemBase) {
+ D("%s: unmapped %p", __FUNCTION__, cb->ashmemBase);
munmap((void *)cb->ashmemBase, cb->ashmemSize);
+ put_gralloc_dmaregion();
}
close(cb->fd);
}
- if (cb->dmafd > 0) {
- D("%s: unmap and free dma fd %d\n", __FUNCTION__, cb->dmafd);
- cb->goldfish_dma.fd = cb->dmafd;
- if (cb->ashmemSize > 0 && cb->ashmemBase) {
- goldfish_dma_unmap(&cb->goldfish_dma);
- }
- goldfish_dma_free(&cb->goldfish_dma);
- D("%s: closed dma fd %d\n", __FUNCTION__, cb->dmafd);
- }
D("%s: done", __FUNCTION__);
// remove it from the allocated list
static int gralloc_register_buffer(gralloc_module_t const* module,
buffer_handle_t handle)
{
+
D("%s: start", __FUNCTION__);
pthread_once(&sFallbackOnce, fallback_init);
if (sFallback != NULL) {
private_module_t *gr = (private_module_t *)module;
cb_handle_t *cb = (cb_handle_t *)handle;
+
if (!gr || !cb_handle_t::validate(cb)) {
ERR("gralloc_register_buffer(%p): invalid buffer", cb);
return -EINVAL;
return -err;
}
cb->mappedPid = getpid();
- D("%s: checking to map goldfish dma", __FUNCTION__);
- if (cb->dmafd > 0) {
- D("%s: attempting to goldfish dma mmap. cbfd %d dmafd1 %d dmafd2 %d", __FUNCTION__, cb->fd, cb->goldfish_dma.fd, cb->dmafd);
- D("cxt=%p curr pid %d mapped pid %d",
- &cb->goldfish_dma,
- (int)getpid(),
- cb->mappedPid);
- if (cb->goldfish_dma.fd != cb->dmafd) {
- cb->goldfish_dma.fd = cb->dmafd;
- }
- goldfish_dma_map(&cb->goldfish_dma);
+
+ DEFINE_AND_VALIDATE_HOST_CONNECTION;
+ if (rcEnc->getDmaVersion() > 0) {
+ init_gralloc_dmaregion();
+ gralloc_dmaregion_register_ashmem(cb->ashmemSize);
}
+
+ }
+
+ if (cb->ashmemSize > 0) {
+ GET_ASHMEM_REGION(cb);
+ get_gralloc_dmaregion();
}
return 0;
static int gralloc_unregister_buffer(gralloc_module_t const* module,
buffer_handle_t handle)
{
- D("%s: call", __FUNCTION__);
if (sFallback != NULL) {
return sFallback->unregisterBuffer(sFallback, handle);
}
private_module_t *gr = (private_module_t *)module;
cb_handle_t *cb = (cb_handle_t *)handle;
+
if (!gr || !cb_handle_t::validate(cb)) {
ERR("gralloc_unregister_buffer(%p): invalid buffer", cb);
return -EINVAL;
}
- if (cb->hostHandle != 0) {
- DEFINE_AND_VALIDATE_HOST_CONNECTION;
+
+ if (cb->hostHandle) {
D("Closing host ColorBuffer 0x%x\n", cb->hostHandle);
+ DEFINE_AND_VALIDATE_HOST_CONNECTION;
rcEnc->rcCloseColorBuffer(rcEnc, cb->hostHandle);
}
// (through register_buffer)
//
if (cb->ashmemSize > 0 && cb->mappedPid == getpid()) {
+
+ PUT_ASHMEM_REGION(cb);
+ put_gralloc_dmaregion();
+
+ if (!SHOULD_UNMAP) goto done;
+
DEFINE_AND_VALIDATE_HOST_CONNECTION;
+
void *vaddr;
int err = munmap((void *)cb->ashmemBase, cb->ashmemSize);
if (err) {
cb->ashmemBase = 0;
cb->mappedPid = 0;
D("%s: Unregister buffer previous mapped to pid %d", __FUNCTION__, getpid());
- if (cb->dmafd > 0) {
- cb->goldfish_dma.fd = cb->dmafd;
- D("%s: Unmap dma fd %d (%d)", __FUNCTION__, cb->dmafd, cb->goldfish_dma.fd);
- goldfish_dma_unmap(&cb->goldfish_dma);
- }
}
-
+done:
D("gralloc_unregister_buffer(%p) done\n", cb);
return 0;
}
private_module_t *gr = (private_module_t *)module;
cb_handle_t *cb = (cb_handle_t *)handle;
+
if (!gr || !cb_handle_t::validate(cb)) {
ALOGE("gralloc_lock bad handle\n");
return -EINVAL;
char* tmpBuf = 0;
if (cb->frameworkFormat == HAL_PIXEL_FORMAT_YV12 ||
cb->frameworkFormat == HAL_PIXEL_FORMAT_YCbCr_420_888) {
- // We are using RGB88
+ // We are using RGB888
tmpBuf = new char[cb->width * cb->height * 3];
rgb_addr = tmpBuf;
}
- D("gralloc_lock read back color buffer %d %d\n", cb->width, cb->height);
+ D("gralloc_lock read back color buffer %d %d ashmem base %p sz %d\n",
+ cb->width, cb->height, cb->ashmemBase, cb->ashmemSize);
rcEnc->rcReadColorBuffer(rcEnc, cb->hostHandle,
0, 0, cb->width, cb->height, cb->glFormat, cb->glType, rgb_addr);
if (tmpBuf) {
private_module_t *gr = (private_module_t *)module;
cb_handle_t *cb = (cb_handle_t *)handle;
+
if (!gr || !cb_handle_t::validate(cb)) {
ALOGD("%s: invalid gr or cb handle. -EINVAL", __FUNCTION__);
return -EINVAL;
if (NULL == dev) {
return -ENOMEM;
}
+ memset(dev, 0, sizeof(gralloc_device_t));
// Initialize our device structure
//