run_command("IP6TABLE RAW", 10, SU_PATH, "root", "ip6tables", "-t", "raw", "-L", "-nvx", NULL);
run_command("WIFI NETWORKS", 20,
- SU_PATH, "root", "wpa_cli", "list_networks", NULL);
+ SU_PATH, "root", "wpa_cli", "IFNAME=wlan0", "list_networks", NULL);
#ifdef FWDUMP_bcmdhd
run_command("DUMP WIFI INTERNAL COUNTERS", 20,
// Gets the next graphics buffer from the producer and locks it for CPU use,
// filling out the passed-in locked buffer structure with the native pointer
// and metadata. Returns BAD_VALUE if no new buffer is available, and
- // INVALID_OPERATION if the maximum number of buffers is already locked.
+ // NOT_ENOUGH_DATA if the maximum number of buffers is already locked.
//
// Only a fixed number of buffers can be locked at a time, determined by the
// construction-time maxLockedBuffers parameter. If INVALID_OPERATION is
// to set by queueBuffer each time this slot is queued.
int64_t mTimestamp;
+ // mIsAutoTimestamp indicates whether mTimestamp was generated
+ // automatically when the buffer was queued.
+ bool mIsAutoTimestamp;
+
// mFrameNumber is the number of the queued frame for this slot.
uint64_t mFrameNumber;
struct QueueBufferInput : public Flattenable<QueueBufferInput> {
friend class Flattenable<QueueBufferInput>;
inline QueueBufferInput(const Parcel& parcel);
- inline QueueBufferInput(int64_t timestamp,
+ inline QueueBufferInput(int64_t timestamp, bool isAutoTimestamp,
const Rect& crop, int scalingMode, uint32_t transform, bool async,
const sp<Fence>& fence)
- : timestamp(timestamp), crop(crop), scalingMode(scalingMode),
- transform(transform), async(async), fence(fence) { }
- inline void deflate(int64_t* outTimestamp, Rect* outCrop,
- int* outScalingMode, uint32_t* outTransform, bool* outAsync,
- sp<Fence>* outFence) const {
+ : timestamp(timestamp), isAutoTimestamp(isAutoTimestamp), crop(crop),
+ scalingMode(scalingMode), transform(transform), async(async),
+ fence(fence) { }
+ inline void deflate(int64_t* outTimestamp, bool* outIsAutoTimestamp,
+ Rect* outCrop, int* outScalingMode, uint32_t* outTransform,
+ bool* outAsync, sp<Fence>* outFence) const {
*outTimestamp = timestamp;
+ *outIsAutoTimestamp = bool(isAutoTimestamp);
*outCrop = crop;
*outScalingMode = scalingMode;
*outTransform = transform;
private:
int64_t timestamp;
+ int isAutoTimestamp;
Rect crop;
int scalingMode;
uint32_t transform;
uint32_t transform;
int scalingMode;
int64_t timestamp;
+ bool isAutoTimestamp;
bool async;
sp<Fence> fence;
- input.deflate(×tamp, &crop, &scalingMode, &transform, &async, &fence);
+ input.deflate(×tamp, &isAutoTimestamp, &crop, &scalingMode, &transform,
+ &async, &fence);
if (fence == NULL) {
ST_LOGE("queueBuffer: fence is NULL");
item.mTransform = transform;
item.mScalingMode = scalingMode;
item.mTimestamp = timestamp;
+ item.mIsAutoTimestamp = isAutoTimestamp;
item.mFrameNumber = mFrameCounter;
item.mBuf = buf;
item.mFence = fence;
//
// NOTE: code assumes monotonic time values from the system clock are
// positive.
- while (false && mQueue.size() > 1) {
+
+ // Start by checking to see if we can drop frames. We skip this check
+ // if the timestamps are being auto-generated by Surface -- if the
+ // app isn't generating timestamps explicitly, they probably don't
+ // want frames to be discarded based on them.
+ while (mQueue.size() > 1 && !mQueue[0].mIsAutoTimestamp) {
// If entry[1] is timely, drop entry[0] (and repeat). We apply
// an additional criteria here: we only drop the earlier buffer if
// our desiredPresent falls within +/- 1 second of the expected
if (!nativeBuffer) return BAD_VALUE;
if (mCurrentLockedBuffers == mMaxLockedBuffers) {
- return INVALID_OPERATION;
+ CC_LOGW("Max buffers have been locked (%d), cannot lock anymore.",
+ mMaxLockedBuffers);
+ return NOT_ENOUGH_DATA;
}
BufferQueue::BufferItem b;
mTransform(0),
mScalingMode(NATIVE_WINDOW_SCALING_MODE_FREEZE),
mTimestamp(0),
+ mIsAutoTimestamp(false),
mFrameNumber(0),
mBuf(INVALID_BUFFER_SLOT),
mIsDroppable(false),
sizeof(mTransform) +
sizeof(mScalingMode) +
sizeof(mTimestamp) +
+ sizeof(mIsAutoTimestamp) +
sizeof(mFrameNumber) +
sizeof(mBuf) +
sizeof(mIsDroppable) +
FlattenableUtils::write(buffer, size, mTransform);
FlattenableUtils::write(buffer, size, mScalingMode);
FlattenableUtils::write(buffer, size, mTimestamp);
+ FlattenableUtils::write(buffer, size, mIsAutoTimestamp);
FlattenableUtils::write(buffer, size, mFrameNumber);
FlattenableUtils::write(buffer, size, mBuf);
FlattenableUtils::write(buffer, size, mIsDroppable);
FlattenableUtils::read(buffer, size, mTransform);
FlattenableUtils::read(buffer, size, mScalingMode);
FlattenableUtils::read(buffer, size, mTimestamp);
+ FlattenableUtils::read(buffer, size, mIsAutoTimestamp);
FlattenableUtils::read(buffer, size, mFrameNumber);
FlattenableUtils::read(buffer, size, mBuf);
FlattenableUtils::read(buffer, size, mIsDroppable);
size_t IGraphicBufferProducer::QueueBufferInput::getFlattenedSize() const {
return sizeof(timestamp)
+ + sizeof(isAutoTimestamp)
+ sizeof(crop)
+ sizeof(scalingMode)
+ sizeof(transform)
return NO_MEMORY;
}
FlattenableUtils::write(buffer, size, timestamp);
+ FlattenableUtils::write(buffer, size, isAutoTimestamp);
FlattenableUtils::write(buffer, size, crop);
FlattenableUtils::write(buffer, size, scalingMode);
FlattenableUtils::write(buffer, size, transform);
{
size_t minNeeded =
sizeof(timestamp)
+ + sizeof(isAutoTimestamp)
+ sizeof(crop)
+ sizeof(scalingMode)
+ sizeof(transform)
}
FlattenableUtils::read(buffer, size, timestamp);
+ FlattenableUtils::read(buffer, size, isAutoTimestamp);
FlattenableUtils::read(buffer, size, crop);
FlattenableUtils::read(buffer, size, scalingMode);
FlattenableUtils::read(buffer, size, transform);
mTransformHint = 0;
mConsumerRunningBehind = false;
mConnectedToCpu = false;
- mProducerControlledByApp = true;
+ mProducerControlledByApp = controlledByApp;
mSwapIntervalZero = false;
}
ALOGV("Surface::queueBuffer");
Mutex::Autolock lock(mMutex);
int64_t timestamp;
+ bool isAutoTimestamp = false;
if (mTimestamp == NATIVE_WINDOW_TIMESTAMP_AUTO) {
timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
+ isAutoTimestamp = true;
ALOGV("Surface::queueBuffer making up timestamp: %.2f ms",
timestamp / 1000000.f);
} else {
sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
IGraphicBufferProducer::QueueBufferOutput output;
- IGraphicBufferProducer::QueueBufferInput input(timestamp, crop, mScalingMode,
- mTransform, mSwapIntervalZero, fence);
+ IGraphicBufferProducer::QueueBufferInput input(timestamp, isAutoTimestamp,
+ crop, mScalingMode, mTransform, mSwapIntervalZero, fence);
status_t err = mGraphicBufferProducer->queueBuffer(i, input, &output);
if (err != OK) {
ALOGE("queueBuffer: error queuing buffer to SurfaceTexture, %d", err);
int slot;
sp<Fence> fence;
sp<GraphicBuffer> buf;
- IGraphicBufferProducer::QueueBufferInput qbi(0, Rect(0, 0, 1, 1),
+ IGraphicBufferProducer::QueueBufferInput qbi(0, false, Rect(0, 0, 1, 1),
NATIVE_WINDOW_SCALING_MODE_FREEZE, 0, false, Fence::NO_FENCE);
BufferQueue::BufferItem item;
"EGL_KHR_image_base " // mandatory
"EGL_KHR_image_pixmap "
"EGL_KHR_lock_surface "
+ "EGL_KHR_gl_colorspace "
"EGL_KHR_gl_texture_2D_image "
"EGL_KHR_gl_texture_cubemap_image "
"EGL_KHR_gl_renderbuffer_image "
// surfaces
// ----------------------------------------------------------------------------
+// The EGL_KHR_gl_colorspace spec hasn't been published yet, so these haven't
+// been added to the Khronos egl.h.
+#define EGL_GL_COLORSPACE_KHR EGL_VG_COLORSPACE
+#define EGL_GL_COLORSPACE_SRGB_KHR EGL_VG_COLORSPACE_sRGB
+#define EGL_GL_COLORSPACE_LINEAR_KHR EGL_VG_COLORSPACE_LINEAR
+
+// Turn linear formats into corresponding sRGB formats when colorspace is
+// EGL_GL_COLORSPACE_SRGB_KHR, or turn sRGB formats into corresponding linear
+// formats when colorspace is EGL_GL_COLORSPACE_LINEAR_KHR. In any cases where
+// the modification isn't possible, the original format is returned.
+static int modifyFormatColorspace(int fmt, EGLint colorspace) {
+ if (colorspace == EGL_GL_COLORSPACE_LINEAR_KHR) {
+ switch (fmt) {
+ case HAL_PIXEL_FORMAT_sRGB_A_8888: return HAL_PIXEL_FORMAT_RGBA_8888;
+ case HAL_PIXEL_FORMAT_sRGB_X_8888: return HAL_PIXEL_FORMAT_RGBX_8888;
+ }
+ } else if (colorspace == EGL_GL_COLORSPACE_SRGB_KHR) {
+ switch (fmt) {
+ case HAL_PIXEL_FORMAT_RGBA_8888: return HAL_PIXEL_FORMAT_sRGB_A_8888;
+ case HAL_PIXEL_FORMAT_RGBX_8888: return HAL_PIXEL_FORMAT_sRGB_X_8888;
+ // TODO: this should go away once drivers stop using BGRA EGLConfigs
+ case HAL_PIXEL_FORMAT_BGRA_8888: return HAL_PIXEL_FORMAT_sRGB_A_8888;
+ }
+ }
+ return fmt;
+}
+
EGLSurface eglCreateWindowSurface( EGLDisplay dpy, EGLConfig config,
NativeWindowType window,
const EGLint *attrib_list)
egl_display_ptr dp = validate_display_connection(dpy, cnx);
if (dp) {
EGLDisplay iDpy = dp->disp.dpy;
- EGLint format;
if (native_window_api_connect(window, NATIVE_WINDOW_API_EGL) != OK) {
ALOGE("EGLNativeWindowType %p already connected to another API",
return setError(EGL_BAD_ALLOC, EGL_NO_SURFACE);
}
- // set the native window's buffers format to match this config
- if (cnx->egl.eglGetConfigAttrib(iDpy,
- config, EGL_NATIVE_VISUAL_ID, &format)) {
- if (format != 0) {
- int err = native_window_set_buffers_format(window, format);
- if (err != 0) {
- ALOGE("error setting native window pixel format: %s (%d)",
- strerror(-err), err);
- native_window_api_disconnect(window, NATIVE_WINDOW_API_EGL);
- return setError(EGL_BAD_NATIVE_WINDOW, EGL_NO_SURFACE);
+ // Set the native window's buffers format to match this config.
+ // Whether to use sRGB gamma is not part of the EGLconfig, but is part
+ // of our native format. So if sRGB gamma is requested, we have to
+ // modify the EGLconfig's format before setting the native window's
+ // format.
+ EGLint format;
+ if (!cnx->egl.eglGetConfigAttrib(iDpy, config, EGL_NATIVE_VISUAL_ID,
+ &format)) {
+ ALOGE("eglGetConfigAttrib(EGL_NATIVE_VISUAL_ID) failed: %#x",
+ eglGetError());
+ format = 0;
+ }
+ if (attrib_list) {
+ for (const EGLint* attr = attrib_list; *attr != EGL_NONE;
+ attr += 2) {
+ if (*attr == EGL_GL_COLORSPACE_KHR &&
+ dp->haveExtension("EGL_KHR_gl_colorspace")) {
+ format = modifyFormatColorspace(format, *(attr+1));
}
}
}
+ if (format != 0) {
+ int err = native_window_set_buffers_format(window, format);
+ if (err != 0) {
+ ALOGE("error setting native window pixel format: %s (%d)",
+ strerror(-err), err);
+ native_window_api_disconnect(window, NATIVE_WINDOW_API_EGL);
+ return setError(EGL_BAD_NATIVE_WINDOW, EGL_NO_SURFACE);
+ }
+ }
// the EGL spec requires that a new EGLSurface default to swap interval
// 1, so explicitly set that on the window here.
// ----------------------------------------------------------------------------
+static bool findExtension(const char* exts, const char* name, size_t nameLen) {
+ if (exts) {
+ const char* match = strstr(exts, name);
+ if (match && (match[nameLen] == '\0' || match[nameLen] == ' ')) {
+ return true;
+ }
+ }
+ return false;
+}
+
egl_display_t egl_display_t::sDisplay[NUM_DISPLAYS];
egl_display_t::egl_display_t() :
if (len) {
// NOTE: we could avoid the copy if we had strnstr.
const String8 ext(start, len);
- // now look for this extension
- if (disp.queryString.extensions) {
- // if we find it, add this extension string to our list
- // (and don't forget the space)
- const char* match = strstr(disp.queryString.extensions, ext.string());
- if (match && (match[len] == ' ' || match[len] == 0)) {
- mExtensionString.append(start, len+1);
- }
+ if (findExtension(disp.queryString.extensions, ext.string(),
+ len)) {
+ mExtensionString.append(start, len+1);
}
}
// process the next extension string, and skip the space.
return result;
}
+bool egl_display_t::haveExtension(const char* name, size_t nameLen) const {
+ if (!nameLen) {
+ nameLen = strlen(name);
+ }
+ return findExtension(mExtensionString.string(), name, nameLen);
+}
+
// ----------------------------------------------------------------------------
bool egl_display_t::HibernationMachine::incWakeCount(WakeRefStrength strength) {
char const * getClientApiString() const { return mClientApiString.string(); }
char const * getExtensionString() const { return mExtensionString.string(); }
+ bool haveExtension(const char* name, size_t nameLen = 0) const;
+
inline uint32_t getRefsCount() const { return refs; }
struct strings_t {
RenderEngine/ProgramCache.cpp \
RenderEngine/GLExtensions.cpp \
RenderEngine/RenderEngine.cpp \
+ RenderEngine/Texture.cpp \
RenderEngine/GLES10RenderEngine.cpp \
RenderEngine/GLES11RenderEngine.cpp \
RenderEngine/GLES20RenderEngine.cpp
# build surfaceflinger's executable
include $(CLEAR_VARS)
+LOCAL_CFLAGS:= -DLOG_TAG=\"SurfaceFlinger\"
+
LOCAL_SRC_FILES:= \
main_surfaceflinger.cpp
LOCAL_SHARED_LIBRARIES := \
libsurfaceflinger \
+ liblog \
libbinder \
libutils
#include <sys/types.h>
#include <binder/PermissionCache.h>
+#include <binder/IPCThreadState.h>
#include <private/android_filesystem_config.h>
mPageFlipCount++;
}
+status_t DisplayDevice::beginFrame() const {
+ return mDisplaySurface->beginFrame();
+}
+
status_t DisplayDevice::prepareFrame(const HWComposer& hwc) const {
DisplaySurface::CompositionType compositionType;
bool haveGles = hwc.hasGlesComposition(mHwcDisplayId);
}
void DisplayDevice::swapBuffers(HWComposer& hwc) const {
- // We need to call eglSwapBuffers() unless:
- // (a) there was no GLES composition this frame, or
- // (b) we're using a legacy HWC with no framebuffer target support (in
- // which case HWComposer::commit() handles things).
+ // We need to call eglSwapBuffers() if:
+ // (1) we don't have a hardware composer, or
+ // (2) we did GLES composition this frame, and either
+ // (a) we have framebuffer target support (not present on legacy
+ // devices, where HWComposer::commit() handles things); or
+ // (b) this is a virtual display
if (hwc.initCheck() != NO_ERROR ||
(hwc.hasGlesComposition(mHwcDisplayId) &&
- hwc.supportsFramebufferTarget())) {
+ (hwc.supportsFramebufferTarget() || mType >= DISPLAY_VIRTUAL))) {
EGLBoolean success = eglSwapBuffers(mDisplay, mSurface);
if (!success) {
EGLint error = eglGetError();
#include <EGL/eglext.h>
#include <utils/Mutex.h>
+#include <utils/String8.h>
#include <utils/Timers.h>
#include <hardware/hwcomposer_defs.h>
class DisplayInfo;
class DisplaySurface;
+class IGraphicBufferProducer;
class Layer;
class SurfaceFlinger;
class HWComposer;
DISPLAY_ID_INVALID = -1,
DISPLAY_PRIMARY = HWC_DISPLAY_PRIMARY,
DISPLAY_EXTERNAL = HWC_DISPLAY_EXTERNAL,
- NUM_DISPLAY_TYPES = HWC_NUM_DISPLAY_TYPES,
- DISPLAY_VIRTUAL = HWC_NUM_DISPLAY_TYPES
+ DISPLAY_VIRTUAL = HWC_DISPLAY_VIRTUAL,
+ NUM_BUILTIN_DISPLAY_TYPES = HWC_NUM_PHYSICAL_DISPLAY_TYPES,
};
enum {
int32_t getHwcDisplayId() const { return mHwcDisplayId; }
const wp<IBinder>& getDisplayToken() const { return mDisplayToken; }
+ status_t beginFrame() const;
status_t prepareFrame(const HWComposer& hwc) const;
void swapBuffers(HWComposer& hwc) const;
class DisplaySurface : public virtual RefBase {
public:
+ // beginFrame is called at the beginning of the composition loop, before
+ // the configuration is known. The DisplaySurface should do anything it
+ // needs to do to enable HWComposer to decide how to compose the frame.
+ virtual status_t beginFrame() = 0;
+
// prepareFrame is called after the composition configuration is known but
// before composition takes place. The DisplaySurface can use the
// composition type to decide how to manage the flow of buffers between
mConsumer->setDefaultMaxBufferCount(NUM_FRAMEBUFFER_SURFACE_BUFFERS);
}
+status_t FramebufferSurface::beginFrame() {
+ return NO_ERROR;
+}
+
status_t FramebufferSurface::prepareFrame(CompositionType compositionType) {
return NO_ERROR;
}
public:
FramebufferSurface(HWComposer& hwc, int disp, const sp<IGraphicBufferConsumer>& consumer);
+ virtual status_t beginFrame();
virtual status_t prepareFrame(CompositionType compositionType);
virtual status_t compositionComplete();
virtual status_t advanceFrame();
namespace android {
-#ifndef HWC_DEVICE_API_VERSION_1_3
-#define HWC_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, HWC_HEADER_VERSION)
-#endif
-
#define MIN_HWC_HEADER_VERSION HWC_HEADER_VERSION
-#define NUM_PHYSICAL_DISPLAYS HWC_NUM_DISPLAY_TYPES
-#define VIRTUAL_DISPLAY_ID_BASE HWC_NUM_DISPLAY_TYPES
-
static uint32_t hwcApiVersion(const hwc_composer_device_1_t* hwc) {
uint32_t hwcVersion = hwc->common.version;
return hwcVersion & HARDWARE_API_VERSION_2_MAJ_MIN_MASK;
mEventHandler(handler),
mVSyncCount(0), mDebugForceFakeVSync(false)
{
- for (size_t i =0 ; i<MAX_DISPLAYS ; i++) {
+ for (size_t i =0 ; i<MAX_HWC_DISPLAYS ; i++) {
mLists[i] = 0;
}
}
// these display IDs are always reserved
- for (size_t i=0 ; i<NUM_PHYSICAL_DISPLAYS ; i++) {
+ for (size_t i=0 ; i<NUM_BUILTIN_DISPLAYS ; i++) {
mAllocatedDisplayIDs.markBit(i);
}
// hw composer version
if (hwcHasApiVersion(mHwc, HWC_DEVICE_API_VERSION_1_3)) {
// 1.3 adds support for virtual displays
- mNumDisplays = MAX_DISPLAYS;
+ mNumDisplays = MAX_HWC_DISPLAYS;
} else if (hwcHasApiVersion(mHwc, HWC_DEVICE_API_VERSION_1_1)) {
// 1.1 adds support for multiple displays
- mNumDisplays = NUM_PHYSICAL_DISPLAYS;
+ mNumDisplays = NUM_BUILTIN_DISPLAYS;
} else {
mNumDisplays = 1;
}
}
} else if (mHwc) {
// here we're guaranteed to have at least HWC 1.1
- for (size_t i =0 ; i<NUM_PHYSICAL_DISPLAYS ; i++) {
+ for (size_t i =0 ; i<NUM_BUILTIN_DISPLAYS ; i++) {
queryDisplayProperties(i);
}
}
}
status_t HWComposer::freeDisplayId(int32_t id) {
- if (id < NUM_PHYSICAL_DISPLAYS) {
+ if (id < NUM_BUILTIN_DISPLAYS) {
// cannot free the reserved IDs
return BAD_VALUE;
}
disp.list->numHwLayers, disp.list->flags);
result.append(
- " type | handle | hints | flags | tr | blend | format | source crop | frame name \n"
- "------------+----------+----------+----------+----+-------+----------+---------------------------+--------------------------------\n");
- // " __________ | ________ | ________ | ________ | __ | _____ | ________ | [_____,_____,_____,_____] | [_____,_____,_____,_____]
+ " type | handle | hints | flags | tr | blend | format | source crop | frame name \n"
+ "------------+----------+----------+----------+----+-------+----------+---------------------------------+--------------------------------\n");
+ // " __________ | ________ | ________ | ________ | __ | _____ | ________ | [_____._,_____._,_____._,_____._] | [_____,_____,_____,_____]
for (size_t i=0 ; i<disp.list->numHwLayers ; i++) {
const hwc_layer_1_t&l = disp.list->hwLayers[i];
int32_t format = -1;
if (type >= NELEM(compositionTypeName))
type = NELEM(compositionTypeName) - 1;
- result.appendFormat(
- " %10s | %08x | %08x | %08x | %02x | %05x | %08x | [%5d,%5d,%5d,%5d] | [%5d,%5d,%5d,%5d] %s\n",
- compositionTypeName[type],
- intptr_t(l.handle), l.hints, l.flags, l.transform, l.blending, format,
- l.sourceCrop.left, l.sourceCrop.top, l.sourceCrop.right, l.sourceCrop.bottom,
- l.displayFrame.left, l.displayFrame.top, l.displayFrame.right, l.displayFrame.bottom,
- name.string());
+ if (hwcHasApiVersion(mHwc, HWC_DEVICE_API_VERSION_1_3)) {
+ result.appendFormat(
+ " %10s | %08x | %08x | %08x | %02x | %05x | %08x | [%7.1f,%7.1f,%7.1f,%7.1f] | [%5d,%5d,%5d,%5d] %s\n",
+ compositionTypeName[type],
+ intptr_t(l.handle), l.hints, l.flags, l.transform, l.blending, format,
+ l.sourceCropf.left, l.sourceCropf.top, l.sourceCropf.right, l.sourceCropf.bottom,
+ l.displayFrame.left, l.displayFrame.top, l.displayFrame.right, l.displayFrame.bottom,
+ name.string());
+ } else {
+ result.appendFormat(
+ " %10s | %08x | %08x | %08x | %02x | %05x | %08x | [%7d,%7d,%7d,%7d] | [%5d,%5d,%5d,%5d] %s\n",
+ compositionTypeName[type],
+ intptr_t(l.handle), l.hints, l.flags, l.transform, l.blending, format,
+ l.sourceCrop.left, l.sourceCrop.top, l.sourceCrop.right, l.sourceCrop.bottom,
+ l.displayFrame.left, l.displayFrame.top, l.displayFrame.right, l.displayFrame.bottom,
+ name.string());
+ }
}
}
}
};
enum {
- MAX_DISPLAYS = HWC_NUM_DISPLAY_TYPES + 1
+ NUM_BUILTIN_DISPLAYS = HWC_NUM_PHYSICAL_DISPLAY_TYPES,
+ MAX_HWC_DISPLAYS = HWC_NUM_DISPLAY_TYPES,
+ VIRTUAL_DISPLAY_ID_BASE = HWC_DISPLAY_VIRTUAL,
};
HWComposer(
status_t initCheck() const;
- // returns a display ID starting at MAX_DISPLAYS, this ID
- // is to be used with createWorkList (and all other
- // methods requiring an ID below).
- // IDs below MAX_DISPLAY are pre-defined and therefore are always valid.
- // returns a negative error code if an ID cannot be allocated
+ // Returns a display ID starting at VIRTUAL_DISPLAY_ID_BASE, this ID is to
+ // be used with createWorkList (and all other methods requiring an ID
+ // below).
+ // IDs below NUM_BUILTIN_DISPLAYS are pre-defined and therefore are
+ // always valid.
+ // Returns -1 if an ID cannot be allocated
int32_t allocateDisplayId();
- // recycles the given ID and frees the associated worklist.
- // IDs below MAX_DISPLAYS are not recycled
+ // Recycles the given virtual display ID and frees the associated worklist.
+ // IDs below NUM_BUILTIN_DISPLAYS are not recycled.
status_t freeDisplayId(int32_t id);
struct hwc_composer_device_1* mHwc;
// invariant: mLists[0] != NULL iff mHwc != NULL
// mLists[i>0] can be NULL. that display is to be ignored
- struct hwc_display_contents_1* mLists[MAX_DISPLAYS];
- DisplayData mDisplayData[MAX_DISPLAYS];
+ struct hwc_display_contents_1* mLists[MAX_HWC_DISPLAYS];
+ DisplayData mDisplayData[MAX_HWC_DISPLAYS];
size_t mNumDisplays;
cb_context* mCBContext;
VirtualDisplaySurface::~VirtualDisplaySurface() {
}
-status_t VirtualDisplaySurface::prepareFrame(CompositionType compositionType) {
+status_t VirtualDisplaySurface::beginFrame() {
if (mDisplayId < 0)
return NO_ERROR;
VDS_LOGW_IF(mDbgState != DBG_STATE_IDLE,
+ "Unexpected beginFrame() in %s state", dbgStateStr());
+ mDbgState = DBG_STATE_BEGUN;
+
+ uint32_t transformHint, numPendingBuffers;
+ mQueueBufferOutput.deflate(&mSinkBufferWidth, &mSinkBufferHeight,
+ &transformHint, &numPendingBuffers);
+
+ return refreshOutputBuffer();
+}
+
+status_t VirtualDisplaySurface::prepareFrame(CompositionType compositionType) {
+ if (mDisplayId < 0)
+ return NO_ERROR;
+
+ VDS_LOGW_IF(mDbgState != DBG_STATE_BEGUN,
"Unexpected prepareFrame() in %s state", dbgStateStr());
mDbgState = DBG_STATE_PREPARED;
}
mDbgState = DBG_STATE_HWC;
- status_t result;
- sp<Fence> outFence;
- if (mCompositionType != COMPOSITION_GLES) {
- // Dequeue an output buffer from the sink
- uint32_t transformHint, numPendingBuffers;
- mQueueBufferOutput.deflate(&mSinkBufferWidth, &mSinkBufferHeight,
- &transformHint, &numPendingBuffers);
- int sslot;
- result = dequeueBuffer(SOURCE_SINK, 0, &sslot, &outFence, false);
- if (result < 0)
- return result;
- mOutputProducerSlot = mapSource2ProducerSlot(SOURCE_SINK, sslot);
- }
-
if (mCompositionType == COMPOSITION_HWC) {
- // We just dequeued the output buffer, use it for FB as well
+ // Use the output buffer for the FB as well, though conceptually the
+ // FB is unused on this frame.
mFbProducerSlot = mOutputProducerSlot;
- mFbFence = outFence;
- } else if (mCompositionType == COMPOSITION_GLES) {
- mOutputProducerSlot = mFbProducerSlot;
- outFence = mFbFence;
- } else {
- // mFbFence and mFbProducerSlot were set in queueBuffer,
- // and mOutputProducerSlot and outFence were set above when dequeueing
- // the sink buffer.
+ mFbFence = mOutputFence;
}
if (mFbProducerSlot < 0 || mOutputProducerSlot < 0) {
mFbProducerSlot, fbBuffer.get(),
mOutputProducerSlot, outBuffer.get());
- result = mHwc.fbPost(mDisplayId, mFbFence, fbBuffer);
- if (result == NO_ERROR) {
- result = mHwc.setOutputBuffer(mDisplayId, outFence, outBuffer);
- }
-
- return result;
+ return mHwc.fbPost(mDisplayId, mFbFence, fbBuffer);
}
void VirtualDisplaySurface::onFrameCommitted() {
sp<Fence> outFence = mHwc.getLastRetireFence(mDisplayId);
VDS_LOGV("onFrameCommitted: queue sink sslot=%d", sslot);
status_t result = mSource[SOURCE_SINK]->queueBuffer(sslot,
- QueueBufferInput(systemTime(),
+ QueueBufferInput(systemTime(), false,
Rect(mSinkBufferWidth, mSinkBufferHeight),
NATIVE_WINDOW_SCALING_MODE_FREEZE, 0, false, outFence),
&qbo);
VDS_LOGV("dequeueBuffer %dx%d fmt=%d usage=%#x", w, h, format, usage);
+ status_t result = NO_ERROR;
mProducerUsage = usage | GRALLOC_USAGE_HW_COMPOSER;
Source source = fbSourceForCompositionType(mCompositionType);
+
if (source == SOURCE_SINK) {
- mSinkBufferWidth = w;
- mSinkBufferHeight = h;
+ // We already dequeued the output buffer. If the GLES driver wants
+ // something incompatible, we have to cancel and get a new one. This
+ // will mean that HWC will see a different output buffer between
+ // prepare and set, but since we're in GLES-only mode already it
+ // shouldn't matter.
+
+ const sp<GraphicBuffer>& buf = mProducerBuffers[mOutputProducerSlot];
+ if ((mProducerUsage & ~buf->getUsage()) != 0 ||
+ (format != 0 && format != (uint32_t)buf->getPixelFormat()) ||
+ (w != 0 && w != mSinkBufferWidth) ||
+ (h != 0 && h != mSinkBufferHeight)) {
+ VDS_LOGV("dequeueBuffer: output buffer doesn't satisfy GLES "
+ "request, getting a new buffer");
+ result = refreshOutputBuffer();
+ if (result < 0)
+ return result;
+ }
}
- int sslot;
- status_t result = dequeueBuffer(source, format, &sslot, fence, async);
- if (result >= 0) {
- *pslot = mapSource2ProducerSlot(source, sslot);
+ if (source == SOURCE_SINK) {
+ *pslot = mOutputProducerSlot;
+ *fence = mOutputFence;
+ } else {
+ int sslot;
+ result = dequeueBuffer(source, format, &sslot, fence, async);
+ if (result >= 0) {
+ *pslot = mapSource2ProducerSlot(source, sslot);
+ }
}
return result;
}
// Extract the GLES release fence for HWC to acquire
int64_t timestamp;
+ bool isAutoTimestamp;
Rect crop;
int scalingMode;
uint32_t transform;
bool async;
- input.deflate(×tamp, &crop, &scalingMode, &transform,
- &async, &mFbFence);
+ input.deflate(×tamp, &isAutoTimestamp, &crop, &scalingMode,
+ &transform, &async, &mFbFence);
mFbProducerSlot = pslot;
+ mOutputFence = mFbFence;
}
*output = mQueueBufferOutput;
mSinkBufferWidth = 0;
mSinkBufferHeight = 0;
mFbFence = Fence::NO_FENCE;
+ mOutputFence = Fence::NO_FENCE;
mFbProducerSlot = -1;
mOutputProducerSlot = -1;
}
+status_t VirtualDisplaySurface::refreshOutputBuffer() {
+ if (mOutputProducerSlot >= 0) {
+ mSource[SOURCE_SINK]->cancelBuffer(
+ mapProducer2SourceSlot(SOURCE_SINK, mOutputProducerSlot),
+ mOutputFence);
+ }
+
+ int sslot;
+ status_t result = dequeueBuffer(SOURCE_SINK, 0, &sslot, &mOutputFence, false);
+ if (result < 0)
+ return result;
+ mOutputProducerSlot = mapSource2ProducerSlot(SOURCE_SINK, sslot);
+
+ result = mHwc.setOutputBuffer(mDisplayId, mOutputFence,
+ mProducerBuffers[mOutputProducerSlot]);
+
+ return result;
+}
+
// This slot mapping function is its own inverse, so two copies are unnecessary.
// Both are kept to make the intent clear where the function is called, and for
// the (unlikely) chance that we switch to a different mapping function.
//
// DisplaySurface interface
//
+ virtual status_t beginFrame();
virtual status_t prepareFrame(CompositionType compositionType);
virtual status_t compositionComplete();
virtual status_t advanceFrame();
int* sslot, sp<Fence>* fence, bool async);
void updateQueueBufferOutput(const QueueBufferOutput& qbo);
void resetPerFrameState();
+ status_t refreshOutputBuffer();
// Both the sink and scratch buffer pools have their own set of slots
// ("source slots", or "sslot"). We have to merge these into the single
// target buffer.
sp<Fence> mFbFence;
+ // mOutputFence is the fence HWC should wait for before writing to the
+ // output buffer.
+ sp<Fence> mOutputFence;
+
// Producer slot numbers for the buffers to use for HWC framebuffer target
// and output.
int mFbProducerSlot;
// +-----------+-------------------+-------------+
// | State | Event || Next State |
// +-----------+-------------------+-------------+
- // | IDLE | prepareFrame || PREPARED |
+ // | IDLE | beginFrame || BEGUN |
+ // | BEGUN | prepareFrame || PREPARED |
// | PREPARED | dequeueBuffer [1] || GLES |
// | PREPARED | advanceFrame [2] || HWC |
// | GLES | queueBuffer || GLES_DONE |
enum DbgState {
// no buffer dequeued, don't know anything about the next frame
DBG_STATE_IDLE,
- // no buffer dequeued, but we know the buffer source for the frame
+ // output buffer dequeued, framebuffer source not yet known
+ DBG_STATE_BEGUN,
+ // output buffer dequeued, framebuffer source known but not provided
+ // to GLES yet.
DBG_STATE_PREPARED,
// GLES driver has a buffer dequeued
DBG_STATE_GLES,
mUseSoftwareVSync(false),
mDebugVsyncEnabled(false) {
- for (int32_t i=0 ; i<HWC_NUM_DISPLAY_TYPES ; i++) {
+ for (int32_t i=0 ; i<DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES ; i++) {
mVSyncEvent[i].header.type = DisplayEventReceiver::DISPLAY_EVENT_VSYNC;
mVSyncEvent[i].header.id = 0;
mVSyncEvent[i].header.timestamp = 0;
void EventThread::onVSyncReceived(int type, nsecs_t timestamp) {
- ALOGE_IF(type >= HWC_NUM_DISPLAY_TYPES,
+ ALOGE_IF(type >= DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES,
"received vsync event for an invalid display (id=%d)", type);
Mutex::Autolock _l(mLock);
- if (type < HWC_NUM_DISPLAY_TYPES) {
+ if (type < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) {
mVSyncEvent[type].header.type = DisplayEventReceiver::DISPLAY_EVENT_VSYNC;
mVSyncEvent[type].header.id = type;
mVSyncEvent[type].header.timestamp = timestamp;
}
void EventThread::onHotplugReceived(int type, bool connected) {
- ALOGE_IF(type >= HWC_NUM_DISPLAY_TYPES,
+ ALOGE_IF(type >= DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES,
"received hotplug event for an invalid display (id=%d)", type);
Mutex::Autolock _l(mLock);
- if (type < HWC_NUM_DISPLAY_TYPES) {
+ if (type < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) {
DisplayEventReceiver::Event event;
event.header.type = DisplayEventReceiver::DISPLAY_EVENT_HOTPLUG;
event.header.id = type;
size_t vsyncCount = 0;
nsecs_t timestamp = 0;
- for (int32_t i=0 ; i<HWC_NUM_DISPLAY_TYPES ; i++) {
+ for (int32_t i=0 ; i<DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES ; i++) {
timestamp = mVSyncEvent[i].header.timestamp;
if (timestamp) {
// we have a vsync event to dispatch
// FIXME: how do we decide which display id the fake
// vsync came from ?
mVSyncEvent[0].header.type = DisplayEventReceiver::DISPLAY_EVENT_VSYNC;
- mVSyncEvent[0].header.id = HWC_DISPLAY_PRIMARY;
+ mVSyncEvent[0].header.id = DisplayDevice::DISPLAY_PRIMARY;
mVSyncEvent[0].header.timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
mVSyncEvent[0].vsync.count++;
}
void EventThread::enableVSyncLocked() {
if (!mUseSoftwareVSync) {
// never enable h/w VSYNC when screen is off
- mFlinger->eventControl(HWC_DISPLAY_PRIMARY, SurfaceFlinger::EVENT_VSYNC, true);
+ mFlinger->eventControl(DisplayDevice::DISPLAY_PRIMARY,
+ SurfaceFlinger::EVENT_VSYNC, true);
mPowerHAL.vsyncHint(true);
}
mDebugVsyncEnabled = true;
}
void EventThread::disableVSyncLocked() {
- mFlinger->eventControl(HWC_DISPLAY_PRIMARY, SurfaceFlinger::EVENT_VSYNC, false);
+ mFlinger->eventControl(DisplayDevice::DISPLAY_PRIMARY,
+ SurfaceFlinger::EVENT_VSYNC, false);
mPowerHAL.vsyncHint(false);
mDebugVsyncEnabled = false;
}
mUseSoftwareVSync?"enabled":"disabled");
result.appendFormat(" numListeners=%u,\n events-delivered: %u\n",
mDisplayEventConnections.size(),
- mVSyncEvent[HWC_DISPLAY_PRIMARY].vsync.count);
+ mVSyncEvent[DisplayDevice::DISPLAY_PRIMARY].vsync.count);
for (size_t i=0 ; i<mDisplayEventConnections.size() ; i++) {
sp<Connection> connection =
mDisplayEventConnections.itemAt(i).promote();
#include <gui/DisplayEventReceiver.h>
#include <gui/IDisplayEventConnection.h>
-#include <hardware/hwcomposer_defs.h>
-
#include <utils/Errors.h>
#include <utils/threads.h>
#include <utils/SortedVector.h>
+#include "DisplayDevice.h"
#include "DisplayHardware/PowerHAL.h"
// ---------------------------------------------------------------------------
// protected by mLock
SortedVector< wp<Connection> > mDisplayEventConnections;
Vector< DisplayEventReceiver::Event > mPendingEvents;
- DisplayEventReceiver::Event mVSyncEvent[HWC_NUM_DISPLAY_TYPES];
+ DisplayEventReceiver::Event mVSyncEvent[DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES];
bool mUseSoftwareVSync;
// for debugging
{
mCurrentCrop.makeInvalid();
mFlinger->getRenderEngine().genTextures(1, &mTextureName);
+ mTexture.init(Texture::TEXTURE_EXTERNAL, mTextureName);
uint32_t layerFlags = 0;
if (flags & ISurfaceComposerClient::eHidden)
// pixels in the buffer.
// FIXME: the 3 lines below can produce slightly incorrect clipping when we have
// a viewport clipping and a window transform. we should use floating point to fix this.
- Rect activeCrop(s.transform.transform(s.active.crop));
+
+ Rect activeCrop(s.active.w, s.active.h);
+ if (!s.active.crop.isEmpty()) {
+ activeCrop = s.active.crop;
+ }
+
+ activeCrop = s.transform.transform(activeCrop);
activeCrop.intersect(hw->getViewport(), &activeCrop);
activeCrop = s.transform.inverse().transform(activeCrop);
mSurfaceFlingerConsumer->getTransformMatrix(textureMatrix);
// Set things up for texturing.
- engine.setupLayerTexturing(mTextureName, useFiltering, textureMatrix);
+ mTexture.setDimensions(mActiveBuffer->getWidth(), mActiveBuffer->getHeight());
+ mTexture.setFiltering(useFiltering);
+ mTexture.setMatrix(textureMatrix);
+
+ engine.setupLayerTexturing(mTexture);
} else {
engine.setupLayerBlackedOut();
}
switch (format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
+ case HAL_PIXEL_FORMAT_sRGB_A_8888:
return false;
}
// in all other case, we have no blending (also for unknown formats)
if (front.active.w != bufWidth ||
front.active.h != bufHeight) {
// reject this buffer
+ //ALOGD("rejecting buffer: bufWidth=%d, bufHeight=%d, front.active.{w=%d, h=%d}",
+ // bufWidth, bufHeight, front.active.w, front.active.h);
return true;
}
}
#include "DisplayHardware/HWComposer.h"
#include "DisplayHardware/FloatRect.h"
#include "RenderEngine/Mesh.h"
+#include "RenderEngine/Texture.h"
namespace android {
bool mNeedsFiltering;
// The mesh used to draw the layer in GLES composition mode
mutable Mesh mMesh;
+ // The mesh used to draw the layer in GLES composition mode
+ mutable Texture mTexture;
// page-flip thread (currently main thread)
bool mSecure; // no screenshots
mPlaneAlpha = 1.0f;
mPremultipliedAlpha = true;
mOpaque = true;
- mTextureTarget = GL_TEXTURE_EXTERNAL_OES;
+ mTextureEnabled = false;
const GLfloat m[16] = {1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1 };
memset(mColor, 0, sizeof(mColor));
memcpy(mProjectionMatrix, m, sizeof(mProjectionMatrix));
- memcpy(mTextureMatrix, m, sizeof(mTextureMatrix));
}
Description::~Description() {
}
}
-void Description::setTextureName(GLenum target, GLuint tname) {
- if (target != mTextureTarget) {
- mTextureTarget = target;
- }
- if (tname != mTextureName) {
- mTextureName = tname;
- mUniformsDirty = true;
- }
+void Description::setTexture(const Texture& texture) {
+ mTexture = texture;
+ mTextureEnabled = true;
+ mUniformsDirty = true;
}
void Description::disableTexture() {
- if (mTextureTarget != 0) {
- mTextureTarget = 0;
- }
- mTextureName = 0;
+ mTextureEnabled = false;
}
void Description::setColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha) {
mUniformsDirty = true;
}
-void Description::setTextureMatrix(GLfloat const* mtx) {
- memcpy(mTextureMatrix, mtx, sizeof(mTextureMatrix));
- mUniformsDirty = true;
-}
-
} /* namespace android */
*/
#include <GLES2/gl2.h>
+#include "Texture.h"
#ifndef SF_RENDER_ENGINE_DESCRIPTION_H_
#define SF_RENDER_ENGINE_DESCRIPTION_H_
bool mPremultipliedAlpha;
// whether this layer is marked as opaque
bool mOpaque;
- // texture target, TEXTURE_2D or TEXTURE_EXTERNAL
- GLenum mTextureTarget;
- // name of the texture
- GLuint mTextureName;
+ // Texture this layer uses
+ Texture mTexture;
+ bool mTextureEnabled;
+
// color used when texturing is disabled
GLclampf mColor[4];
// projection matrix
GLfloat mProjectionMatrix[16];
- // texture matrix
- GLfloat mTextureMatrix[16];
public:
Description();
void setPlaneAlpha(GLclampf planeAlpha);
void setPremultipliedAlpha(bool premultipliedAlpha);
void setOpaque(bool opaque);
- void setTextureName(GLenum target, GLuint tname);
+ void setTexture(const Texture& texture);
void disableTexture();
void setColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha);
void setProjectionMatrix(GLfloat const* mtx);
- void setTextureMatrix(GLfloat const* mtx);
private:
bool mUniformsDirty;
*/
#include <GLES/gl.h>
+#include <GLES/glext.h>
#include <utils/String8.h>
#include <cutils/compiler.h>
#include "GLES11RenderEngine.h"
-#include "GLExtensions.h"
#include "Mesh.h"
+#include "Texture.h"
// ---------------------------------------------------------------------------
namespace android {
glColor4f(0, 0, 0, alpha/255.0f);
}
-void GLES11RenderEngine::setupLayerTexturing(size_t textureName,
- bool useFiltering, const float* textureMatrix) {
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureName);
+void GLES11RenderEngine::setupLayerTexturing(const Texture& texture) {
+ GLuint target = texture.getTextureTarget();
+ glBindTexture(target, texture.getTextureName());
GLenum filter = GL_NEAREST;
- if (useFiltering) {
+ if (texture.getFiltering()) {
filter = GL_LINEAR;
}
- glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, filter);
- glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, filter);
+ glTexParameterx(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterx(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameterx(target, GL_TEXTURE_MAG_FILTER, filter);
+ glTexParameterx(target, GL_TEXTURE_MIN_FILTER, filter);
glMatrixMode(GL_TEXTURE);
- glLoadMatrixf(textureMatrix);
+ glLoadMatrixf(texture.getMatrix());
glMatrixMode(GL_MODELVIEW);
glDisable(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_EXTERNAL_OES);
glDisable(GL_BLEND);
}
+void GLES11RenderEngine::bindImageAsFramebuffer(EGLImageKHR image,
+ uint32_t* texName, uint32_t* fbName, uint32_t* status) {
+ GLuint tname, name;
+ // turn our EGLImage into a texture
+ glGenTextures(1, &tname);
+ glBindTexture(GL_TEXTURE_2D, tname);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, (GLeglImageOES)image);
+
+ // create a Framebuffer Object to render into
+ glGenFramebuffersOES(1, &name);
+ glBindFramebufferOES(GL_FRAMEBUFFER_OES, name);
+ glFramebufferTexture2DOES(GL_FRAMEBUFFER_OES,
+ GL_COLOR_ATTACHMENT0_OES, GL_TEXTURE_2D, tname, 0);
+
+ *status = glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES);
+ *texName = tname;
+ *fbName = name;
+}
+
+void GLES11RenderEngine::unbindFramebuffer(uint32_t texName, uint32_t fbName) {
+ glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
+ glDeleteFramebuffersOES(1, &fbName);
+ glDeleteTextures(1, &texName);
+}
+
void GLES11RenderEngine::fillWithColor(const Mesh& mesh, float r, float g, float b, float a) {
glColor4f(r, g, b, a);
glDisable(GL_TEXTURE_EXTERNAL_OES);
}
void GLES11RenderEngine::dump(String8& result) {
- const GLExtensions& extensions(GLExtensions::getInstance());
- result.appendFormat("GLES: %s, %s, %s\n",
- extensions.getVendor(),
- extensions.getRenderer(),
- extensions.getVersion());
- result.appendFormat("%s\n", extensions.getExtension());
+ RenderEngine::dump(result);
}
// ---------------------------------------------------------------------------
}; // namespace android
// ---------------------------------------------------------------------------
+
+#if defined(__gl2_h_)
+#error "don't include gl2/gl2.h in this file"
+#endif
class String8;
class Mesh;
+class Texture;
class GLES11RenderEngine : public RenderEngine {
GLuint mProtectedTexName;
GLint mMaxViewportDims[2];
GLint mMaxTextureSize;
+ virtual void bindImageAsFramebuffer(EGLImageKHR image,
+ uint32_t* texName, uint32_t* fbName, uint32_t* status);
+ virtual void unbindFramebuffer(uint32_t texName, uint32_t fbName);
+
public:
GLES11RenderEngine();
virtual void setViewportAndProjection(size_t vpw, size_t vph, size_t w, size_t h, bool yswap);
virtual void setupLayerBlending(bool premultipliedAlpha, bool opaque, int alpha);
virtual void setupDimLayerBlending(int alpha);
- virtual void setupLayerTexturing(size_t textureName, bool useFiltering, const float* textureMatrix);
+ virtual void setupLayerTexturing(const Texture& texture);
virtual void setupLayerBlackedOut();
virtual void disableTexturing();
virtual void disableBlending();
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
#include <utils/String8.h>
#include <utils/Trace.h>
#include <cutils/compiler.h>
#include "GLES20RenderEngine.h"
-#include "GLExtensions.h"
#include "Program.h"
#include "ProgramCache.h"
#include "Description.h"
#include "Mesh.h"
+#include "Texture.h"
// ---------------------------------------------------------------------------
namespace android {
disableTexturing();
}
-void GLES20RenderEngine::setupLayerTexturing(size_t textureName,
- bool useFiltering, const float* textureMatrix) {
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureName);
+void GLES20RenderEngine::setupLayerTexturing(const Texture& texture) {
+ GLuint target = texture.getTextureTarget();
+ glBindTexture(target, texture.getTextureName());
GLenum filter = GL_NEAREST;
- if (useFiltering) {
+ if (texture.getFiltering()) {
filter = GL_LINEAR;
}
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, filter);
- glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, filter);
+ glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_MAG_FILTER, filter);
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, filter);
- mState.setTextureName(GL_TEXTURE_EXTERNAL_OES, textureName);
- mState.setTextureMatrix(textureMatrix);
+ mState.setTexture(texture);
}
void GLES20RenderEngine::setupLayerBlackedOut() {
- const GLfloat m[16] = {1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1 };
glBindTexture(GL_TEXTURE_2D, mProtectedTexName);
- mState.setTextureName(GL_TEXTURE_2D, mProtectedTexName);
- mState.setTextureMatrix(m);
+ Texture texture(Texture::TEXTURE_2D, mProtectedTexName);
+ texture.setDimensions(1, 1); // FIXME: we should get that from somewhere
+ mState.setTexture(texture);
}
void GLES20RenderEngine::disableTexturing() {
glDisable(GL_BLEND);
}
+
+void GLES20RenderEngine::bindImageAsFramebuffer(EGLImageKHR image,
+ uint32_t* texName, uint32_t* fbName, uint32_t* status) {
+ GLuint tname, name;
+ // turn our EGLImage into a texture
+ glGenTextures(1, &tname);
+ glBindTexture(GL_TEXTURE_2D, tname);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, (GLeglImageOES)image);
+
+ // create a Framebuffer Object to render into
+ glGenFramebuffers(1, &name);
+ glBindFramebuffer(GL_FRAMEBUFFER, name);
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tname, 0);
+
+ *status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ *texName = tname;
+ *fbName = name;
+}
+
+void GLES20RenderEngine::unbindFramebuffer(uint32_t texName, uint32_t fbName) {
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ glDeleteFramebuffers(1, &fbName);
+ glDeleteTextures(1, &texName);
+}
+
void GLES20RenderEngine::fillWithColor(const Mesh& mesh, float r, float g, float b, float a) {
mState.setColor(r, g, b, a);
disableTexturing();
}
void GLES20RenderEngine::dump(String8& result) {
- const GLExtensions& extensions(GLExtensions::getInstance());
- result.appendFormat("GLES: %s, %s, %s\n",
- extensions.getVendor(),
- extensions.getRenderer(),
- extensions.getVersion());
- result.appendFormat("%s\n", extensions.getExtension());
+ RenderEngine::dump(result);
}
// ---------------------------------------------------------------------------
}; // namespace android
// ---------------------------------------------------------------------------
+
+#if defined(__gl_h_)
+#error "don't include gl/gl.h in this file"
+#endif
class String8;
class Mesh;
+class Texture;
class GLES20RenderEngine : public RenderEngine {
GLuint mProtectedTexName;
Description mState;
+ virtual void bindImageAsFramebuffer(EGLImageKHR image,
+ uint32_t* texName, uint32_t* fbName, uint32_t* status);
+ virtual void unbindFramebuffer(uint32_t texName, uint32_t fbName);
+
public:
GLES20RenderEngine();
virtual void setViewportAndProjection(size_t vpw, size_t vph, size_t w, size_t h, bool yswap);
virtual void setupLayerBlending(bool premultipliedAlpha, bool opaque, int alpha);
virtual void setupDimLayerBlending(int alpha);
- virtual void setupLayerTexturing(size_t textureName, bool useFiltering, const float* textureMatrix);
+ virtual void setupLayerTexturing(const Texture& texture);
virtual void setupLayerBlackedOut();
virtual void disableTexturing();
virtual void disableBlending();
if (mSamplerLoc >= 0) {
glUniform1i(mSamplerLoc, 0);
- glUniformMatrix4fv(mTextureMatrixLoc, 1, GL_FALSE, desc.mTextureMatrix);
+ glUniformMatrix4fv(mTextureMatrixLoc, 1, GL_FALSE, desc.mTexture.getMatrix());
}
if (mAlphaPlaneLoc >= 0) {
glUniform1f(mAlphaPlaneLoc, desc.mPlaneAlpha);
ProgramCache::Key ProgramCache::computeKey(const Description& description) {
Key needs;
needs.set(Key::TEXTURE_MASK,
- (description.mTextureTarget == GL_TEXTURE_EXTERNAL_OES) ? Key::TEXTURE_EXT :
- (description.mTextureTarget == GL_TEXTURE_2D) ? Key::TEXTURE_2D :
+ !description.mTextureEnabled ? Key::TEXTURE_OFF :
+ description.mTexture.getTextureTarget() == GL_TEXTURE_EXTERNAL_OES ? Key::TEXTURE_EXT :
+ description.mTexture.getTextureTarget() == GL_TEXTURE_2D ? Key::TEXTURE_2D :
Key::TEXTURE_OFF)
.set(Key::PLANE_ALPHA_MASK,
(description.mPlaneAlpha < 1) ? Key::PLANE_ALPHA_LT_ONE : Key::PLANE_ALPHA_EQ_ONE)
if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
fs << "#extension GL_OES_EGL_image_external : require";
}
+
+ // default precision is required-ish in fragment shaders
+ fs << "precision mediump float;";
+
if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
fs << "uniform samplerExternalOES sampler;"
<< "varying vec2 outTexCoords;";
} else {
fs << "gl_FragColor = color;";
}
+ if (needs.isOpaque()) {
+ fs << "gl_FragColor.a = 1.0;";
+ }
if (needs.hasPlaneAlpha()) {
// modulate the alpha value with planeAlpha
if (needs.isPremultiplied()) {
// ... and the color too if we're premultiplied
- if (needs.isOpaque()) {
- // ... we're opaque, only premultiply the color component
- fs << "gl_FragColor.rgb *= alphaPlane;"
- << "gl_FragColor.a = alphaPlane;";
- } else {
- fs << "gl_FragColor *= alphaPlane;";
- }
+ fs << "gl_FragColor *= alphaPlane;";
} else {
- // not premultiplied
- if (needs.isOpaque()) {
- fs << "gl_FragColor.a = alphaPlane;";
- } else {
- fs << "gl_FragColor.a *= alphaPlane;";
- }
- }
- } else {
- if (needs.isOpaque()) {
- fs << "gl_FragColor.a = 1.0;";
+ fs << "gl_FragColor.a *= alphaPlane;";
}
}
fs << dedent << "}";
EGL_NONE, EGL_NONE
};
- EGLContext ctxt = EGL_NO_CONTEXT; // eglCreateContext(display, config, NULL, contextAttributes);
+ EGLContext ctxt = eglCreateContext(display, config, NULL, contextAttributes);
if (ctxt == EGL_NO_CONTEXT) {
// maybe ES 2.x is not supported
ALOGW("can't create an ES 2.x context, trying 1.x");
glDeleteTextures(count, names);
}
+void RenderEngine::dump(String8& result) {
+ const GLExtensions& extensions(GLExtensions::getInstance());
+ result.appendFormat("GLES: %s, %s, %s\n",
+ extensions.getVendor(),
+ extensions.getRenderer(),
+ extensions.getVersion());
+ result.appendFormat("%s\n", extensions.getExtension());
+}
+
// ---------------------------------------------------------------------------
RenderEngine::BindImageAsFramebuffer::BindImageAsFramebuffer(
RenderEngine& engine, EGLImageKHR image) : mEngine(engine)
{
- GLuint tname, name;
- // turn our EGLImage into a texture
- glGenTextures(1, &tname);
- glBindTexture(GL_TEXTURE_2D, tname);
- glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, (GLeglImageOES)image);
- // create a Framebuffer Object to render into
- glGenFramebuffersOES(1, &name);
- glBindFramebufferOES(GL_FRAMEBUFFER_OES, name);
- glFramebufferTexture2DOES(GL_FRAMEBUFFER_OES,
- GL_COLOR_ATTACHMENT0_OES, GL_TEXTURE_2D, tname, 0);
- mStatus = glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES);
+ mEngine.bindImageAsFramebuffer(image, &mTexName, &mFbName, &mStatus);
+
ALOGE_IF(mStatus != GL_FRAMEBUFFER_COMPLETE_OES,
"glCheckFramebufferStatusOES error %d", mStatus);
- mTexName = tname;
- mFbName = name;
}
RenderEngine::BindImageAsFramebuffer::~BindImageAsFramebuffer() {
// back to main framebuffer
- glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
- glDeleteFramebuffersOES(1, &mFbName);
- glDeleteTextures(1, &mTexName);
-
+ mEngine.unbindFramebuffer(mTexName, mFbName);
}
status_t RenderEngine::BindImageAsFramebuffer::getStatus() const {
class Rect;
class Region;
class Mesh;
+class Texture;
class RenderEngine {
enum GlesVersion {
EGLContext mEGLContext;
void setEGLContext(EGLContext ctxt);
+ virtual void bindImageAsFramebuffer(EGLImageKHR image, uint32_t* texName, uint32_t* fbName, uint32_t* status) = 0;
+ virtual void unbindFramebuffer(uint32_t texName, uint32_t fbName) = 0;
+
protected:
RenderEngine();
virtual ~RenderEngine() = 0;
public:
static RenderEngine* create(EGLDisplay display, EGLConfig config);
+ // dump the extension strings. always call the base class.
+ virtual void dump(String8& result);
+
// helpers
void clearWithColor(float red, float green, float blue, float alpha);
void fillRegionWithColor(const Region& region, uint32_t height,
class BindImageAsFramebuffer {
RenderEngine& mEngine;
- unsigned int mTexName, mFbName;
- unsigned int mStatus;
+ uint32_t mTexName, mFbName;
+ uint32_t mStatus;
public:
BindImageAsFramebuffer(RenderEngine& engine, EGLImageKHR image);
~BindImageAsFramebuffer();
// set-up
virtual void checkErrors() const;
- virtual void dump(String8& result) = 0;
virtual void setViewportAndProjection(size_t vpw, size_t vph, size_t w, size_t h, bool yswap) = 0;
virtual void setupLayerBlending(bool premultipliedAlpha, bool opaque, int alpha) = 0;
virtual void setupDimLayerBlending(int alpha) = 0;
- virtual void setupLayerTexturing(size_t textureName, bool useFiltering, const float* textureMatrix) = 0;
+ virtual void setupLayerTexturing(const Texture& texture) = 0;
virtual void setupLayerBlackedOut() = 0;
virtual void disableTexturing() = 0;
--- /dev/null
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#include "Texture.h"
+
+namespace android {
+
+Texture::Texture() :
+ mTextureName(0), mTextureTarget(TEXTURE_2D),
+ mWidth(0), mHeight(0), mFiltering(false) {
+ const float m[16] = {1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1 };
+ memcpy(mTextureMatrix, m, sizeof(mTextureMatrix));
+}
+
+Texture::Texture(Target textureTarget, uint32_t textureName) :
+ mTextureName(textureName), mTextureTarget(textureTarget),
+ mWidth(0), mHeight(0), mFiltering(false) {
+ const float m[16] = {1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1 };
+ memcpy(mTextureMatrix, m, sizeof(mTextureMatrix));
+}
+
+void Texture::init(Target textureTarget, uint32_t textureName) {
+ mTextureName = textureName;
+ mTextureTarget = textureTarget;
+}
+
+Texture::~Texture() {
+}
+
+
+void Texture::setMatrix(float const* matrix) {
+ memcpy(mTextureMatrix, matrix, sizeof(mTextureMatrix));
+}
+
+void Texture::setFiltering(bool enabled) {
+ mFiltering = enabled;
+}
+
+void Texture::setDimensions(size_t width, size_t height) {
+ mWidth = width;
+ mHeight = height;
+}
+
+uint32_t Texture::getTextureName() const {
+ return mTextureName;
+}
+
+uint32_t Texture::getTextureTarget() const {
+ return mTextureTarget;
+}
+
+float const* Texture::getMatrix() const {
+ return mTextureMatrix;
+}
+
+bool Texture::getFiltering() const {
+ return mFiltering;
+}
+
+size_t Texture::getWidth() const {
+ return mWidth;
+}
+
+size_t Texture::getHeight() const {
+ return mHeight;
+}
+
+} /* namespace android */
--- /dev/null
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#ifndef SF_RENDER_ENGINE_TEXTURE_H
+#define SF_RENDER_ENGINE_TEXTURE_H
+
+namespace android {
+
+class Texture {
+ uint32_t mTextureName;
+ uint32_t mTextureTarget;
+ size_t mWidth;
+ size_t mHeight;
+ bool mFiltering;
+ float mTextureMatrix[16];
+
+public:
+ enum Target { TEXTURE_2D = 0x0DE1, TEXTURE_EXTERNAL = 0x8D65 };
+
+ Texture();
+ Texture(Target textureTarget, uint32_t textureName);
+ ~Texture();
+
+ void init(Target textureTarget, uint32_t textureName);
+
+ void setMatrix(float const* matrix);
+ void setFiltering(bool enabled);
+ void setDimensions(size_t width, size_t height);
+
+ uint32_t getTextureName() const;
+ uint32_t getTextureTarget() const;
+
+ float const* getMatrix() const;
+ bool getFiltering() const;
+ size_t getWidth() const;
+ size_t getHeight() const;
+};
+
+} /* namespace android */
+#endif /* SF_RENDER_ENGINE_TEXTURE_H */
#include <math.h>
#include <dlfcn.h>
+#if defined(HAVE_PTHREADS)
+#include <sys/resource.h>
+#endif
+
#include <EGL/egl.h>
#include <cutils/log.h>
#include "RenderEngine/RenderEngine.h"
-
#define DISPLAY_COUNT 1
/*
// ---------------------------------------------------------------------------
SurfaceFlinger::SurfaceFlinger()
- : BnSurfaceComposer(), Thread(false),
+ : BnSurfaceComposer(),
mTransactionFlags(0),
mTransactionPending(false),
mAnimTransactionPending(false),
void SurfaceFlinger::onFirstRef()
{
mEventQueue.init(this);
-
- run("SurfaceFlinger", PRIORITY_URGENT_DISPLAY);
-
- // Wait for the main thread to be done with its initialization
- mReadyToRunBarrier.wait();
}
-
SurfaceFlinger::~SurfaceFlinger()
{
EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
}
sp<IBinder> SurfaceFlinger::getBuiltInDisplay(int32_t id) {
- if (uint32_t(id) >= DisplayDevice::NUM_DISPLAY_TYPES) {
+ if (uint32_t(id) >= DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) {
ALOGE("getDefaultDisplay: id=%d is not a valid default display id", id);
return NULL;
}
return config;
}
+void SurfaceFlinger::init() {
-status_t SurfaceFlinger::readyToRun()
-{
ALOGI( "SurfaceFlinger's main thread ready to run. "
"Initializing graphics H/W...");
"couldn't create EGLContext");
// initialize our non-virtual displays
- for (size_t i=0 ; i<DisplayDevice::NUM_DISPLAY_TYPES ; i++) {
+ for (size_t i=0 ; i<DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES ; i++) {
DisplayDevice::DisplayType type((DisplayDevice::DisplayType)i);
// set-up the displays that are already connected
if (mHwc->isConnected(i) || type==DisplayDevice::DISPLAY_PRIMARY) {
// initialize our drawing state
mDrawingState = mCurrentState;
- // We're now ready to accept clients...
- mReadyToRunBarrier.open();
-
// set initial conditions (e.g. unblank default device)
initializeDisplays();
// start boot animation
startBootAnim();
-
- return NO_ERROR;
}
int32_t SurfaceFlinger::allocateHwcDisplayId(DisplayDevice::DisplayType type) {
- return (uint32_t(type) < DisplayDevice::NUM_DISPLAY_TYPES) ?
+ return (uint32_t(type) < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) ?
type : mHwc->allocateDisplayId();
}
status_t SurfaceFlinger::getDisplayInfo(const sp<IBinder>& display, DisplayInfo* info) {
int32_t type = NAME_NOT_FOUND;
- for (int i=0 ; i<DisplayDevice::NUM_DISPLAY_TYPES ; i++) {
+ for (int i=0 ; i<DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES ; i++) {
if (display == mBuiltinDisplays[i]) {
type = i;
break;
return res;
}
-bool SurfaceFlinger::threadLoop() {
- waitForEvent();
- return true;
+void SurfaceFlinger::run() {
+#if defined(HAVE_PTHREADS)
+ setpriority(PRIO_PROCESS, 0, PRIORITY_URGENT_DISPLAY);
+#endif
+ do {
+ waitForEvent();
+ } while (true);
}
void SurfaceFlinger::onVSyncReceived(int type, nsecs_t timestamp) {
ALOGW("WARNING: EventThread not started, ignoring vsync");
return;
}
- if (uint32_t(type) < DisplayDevice::NUM_DISPLAY_TYPES) {
+ if (uint32_t(type) < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) {
// we should only receive DisplayDevice::DisplayType from the vsync callback
mEventThread->onVSyncReceived(type, timestamp);
}
return;
}
- if (uint32_t(type) < DisplayDevice::NUM_DISPLAY_TYPES) {
+ if (uint32_t(type) < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) {
Mutex::Autolock _l(mStateLock);
if (connected) {
createBuiltinDisplayLocked((DisplayDevice::DisplayType)type);
}
void SurfaceFlinger::setUpHWComposer() {
+ for (size_t dpy=0 ; dpy<mDisplays.size() ; dpy++) {
+ mDisplays[dpy]->beginFrame();
+ }
+
HWComposer& hwc(getHwComposer());
if (hwc.initCheck() == NO_ERROR) {
// build the h/w work list
sp<DisplayDevice> hw(getDisplayDevice(draw.keyAt(i)));
if (hw != NULL)
hw->disconnect(getHwComposer());
- if (draw[i].type < DisplayDevice::NUM_DISPLAY_TYPES)
+ if (draw[i].type < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES)
mEventThread->onHotplugReceived(draw[i].type, false);
mDisplays.removeItem(draw.keyAt(i));
} else {
hw->acquireScreen();
int32_t type = hw->getDisplayType();
- if (type < DisplayDevice::NUM_DISPLAY_TYPES) {
+ if (type < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) {
// built-in display, tell the HWC
getHwComposer().acquire(type);
hw->releaseScreen();
int32_t type = hw->getDisplayType();
- if (type < DisplayDevice::NUM_DISPLAY_TYPES) {
+ if (type < DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES) {
if (type == DisplayDevice::DISPLAY_PRIMARY) {
// FIXME: eventthread only knows about the main display right now
mEventThread->onScreenReleased();
const sp<DisplayDevice> hw(mFlinger.getDisplayDevice(mDisplay));
if (hw == NULL) {
ALOGE("Attempt to unblank null display %p", mDisplay.get());
- } else if (hw->getDisplayType() >= DisplayDevice::NUM_DISPLAY_TYPES) {
+ } else if (hw->getDisplayType() >= DisplayDevice::DISPLAY_VIRTUAL) {
ALOGW("Attempt to unblank virtual display");
} else {
mFlinger.onScreenAcquired(hw);
const sp<DisplayDevice> hw(mFlinger.getDisplayDevice(mDisplay));
if (hw == NULL) {
ALOGE("Attempt to blank null display %p", mDisplay.get());
- } else if (hw->getDisplayType() >= DisplayDevice::NUM_DISPLAY_TYPES) {
+ } else if (hw->getDisplayType() >= DisplayDevice::DISPLAY_VIRTUAL) {
ALOGW("Attempt to blank virtual display");
} else {
mFlinger.onScreenReleased(hw);
int err = 0;
err = native_window_set_buffers_dimensions(window, reqWidth, reqHeight);
+ err |= native_window_set_scaling_mode(window, NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
err |= native_window_set_buffers_format(window, HAL_PIXEL_FORMAT_RGBA_8888);
err |= native_window_set_usage(window, usage);
#include <utils/SortedVector.h>
#include <utils/threads.h>
-#include <binder/BinderService.h>
#include <binder/IMemory.h>
#include <ui/PixelFormat.h>
eTransactionMask = 0x07
};
-class SurfaceFlinger : public BinderService<SurfaceFlinger>,
- public BnSurfaceComposer,
+class SurfaceFlinger : public BnSurfaceComposer,
private IBinder::DeathRecipient,
- private Thread,
private HWComposer::EventHandler
{
public:
SurfaceFlinger() ANDROID_API;
+ // must be called before clients can connect
+ void init() ANDROID_API;
+
+ // starts SurfaceFlinger main loop in the current thread
+ void run() ANDROID_API;
+
enum {
EVENT_VSYNC = HWC_EVENT_VSYNC
};
virtual void binderDied(const wp<IBinder>& who);
/* ------------------------------------------------------------------------
- * Thread interface
+ * RefBase interface
*/
- virtual bool threadLoop();
- virtual status_t readyToRun();
virtual void onFirstRef();
/* ------------------------------------------------------------------------
EGLConfig mEGLConfig;
EGLDisplay mEGLDisplay;
EGLint mEGLNativeVisualId;
- sp<IBinder> mBuiltinDisplays[DisplayDevice::NUM_DISPLAY_TYPES];
+ sp<IBinder> mBuiltinDisplays[DisplayDevice::NUM_BUILTIN_DISPLAY_TYPES];
// Can only accessed from the main thread, these members
// don't need synchronization
// these are thread safe
mutable MessageQueue mEventQueue;
- mutable Barrier mReadyToRunBarrier;
FrameTracker mAnimFrameTracker;
// protected by mDestroyedLayerLock;
* limitations under the License.
*/
-#include <binder/BinderService.h>
+#include <binder/IServiceManager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
#include "SurfaceFlinger.h"
using namespace android;
// When SF is launched in its own process, limit the number of
// binder threads to 4.
ProcessState::self()->setThreadPoolMaxThreadCount(4);
- SurfaceFlinger::publishAndJoinThreadPool(true);
+
+ // instantiate surfaceflinger
+ sp<SurfaceFlinger> flinger = new SurfaceFlinger();
+
+ // initialize before clients can connect
+ flinger->init();
+
+ // start the thread pool
+ sp<ProcessState> ps(ProcessState::self());
+ ps->startThreadPool();
+
+ // publish surface flinger
+ sp<IServiceManager> sm(defaultServiceManager());
+ sm->addService(String16(SurfaceFlinger::getServiceName()), flinger, false);
+
+ // run in this thread
+ flinger->run();
+
return 0;
}