*/
const Transform bufferOrientation(mCurrentTransform);
- const Transform transform(tr * s.transform * bufferOrientation);
+ Transform transform(tr * s.transform * bufferOrientation);
+
+ if (mSurfaceFlingerConsumer->getTransformToDisplayInverse()) {
+ /*
+ * the code below applies the display's inverse transform to the buffer
+ */
+ uint32_t invTransform = hw->getOrientationTransform();
+ // calculate the inverse transform
+ if (invTransform & NATIVE_WINDOW_TRANSFORM_ROT_90) {
+ invTransform ^= NATIVE_WINDOW_TRANSFORM_FLIP_V |
+ NATIVE_WINDOW_TRANSFORM_FLIP_H;
+ }
+ // and apply to the current transform
+ transform = transform * Transform(invTransform);
+ }
// this gives us only the "orientation" component of the transform
const uint32_t orientation = transform.getOrientation();
mSurfaceFlingerConsumer->setFilteringEnabled(useFiltering);
mSurfaceFlingerConsumer->getTransformMatrix(textureMatrix);
+ if (mSurfaceFlingerConsumer->getTransformToDisplayInverse()) {
+
+ /*
+ * the code below applies the display's inverse transform to the texture transform
+ */
+
+ // create a 4x4 transform matrix from the display transform flags
+ const mat4 flipH(-1,0,0,0, 0,1,0,0, 0,0,1,0, 1,0,0,1);
+ const mat4 flipV( 1,0,0,0, 0,-1,0,0, 0,0,1,0, 0,1,0,1);
+ const mat4 rot90( 0,1,0,0, -1,0,0,0, 0,0,1,0, 1,0,0,1);
+
+ mat4 tr;
+ uint32_t transform = hw->getOrientationTransform();
+ if (transform & NATIVE_WINDOW_TRANSFORM_ROT_90)
+ tr = tr * rot90;
+ if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_H)
+ tr = tr * flipH;
+ if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_V)
+ tr = tr * flipV;
+
+ // calculate the inverse
+ tr = inverse(tr);
+
+ // and finally apply it to the original texture matrix
+ const mat4 texTransform(mat4(static_cast<const float*>(textureMatrix)) * tr);
+ memcpy(textureMatrix, texTransform.asArray(), sizeof(textureMatrix));
+ }
+
// Set things up for texturing.
mTexture.setDimensions(mActiveBuffer->getWidth(), mActiveBuffer->getHeight());
mTexture.setFiltering(useFiltering);
*
* The GL code below is more logical (imho), and the difference with
* HWC is due to a limitation of the HWC API to integers -- a question
- * is suspend is wether we should ignore this problem or revert to
+ * is suspend is whether we should ignore this problem or revert to
* GL composition when a buffer scaling is applied (maybe with some
* minimal value)? Or, we could make GL behave like HWC -- but this feel
* like more of a hack.
// TODO: we probably want to generate the texture coords with the mesh
// here we assume that we only have 4 vertices
- Mesh::VertexArray texCoords(mMesh.getTexCoordArray());
- texCoords[0].s = left;
- texCoords[0].t = 1.0f - top;
- texCoords[1].s = left;
- texCoords[1].t = 1.0f - bottom;
- texCoords[2].s = right;
- texCoords[2].t = 1.0f - bottom;
- texCoords[3].s = right;
- texCoords[3].t = 1.0f - top;
+ Mesh::VertexArray<vec2> texCoords(mMesh.getTexCoordArray<vec2>());
+ texCoords[0] = vec2(left, 1.0f - top);
+ texCoords[1] = vec2(left, 1.0f - bottom);
+ texCoords[2] = vec2(right, 1.0f - bottom);
+ texCoords[3] = vec2(right, 1.0f - top);
RenderEngine& engine(mFlinger->getRenderEngine());
engine.setupLayerBlending(mPremultipliedAlpha, isOpaque(), s.alpha);
// subtract the transparent region and snap to the bounds
win = reduce(win, s.activeTransparentRegion);
- Mesh::VertexArray position(mesh.getPositionArray());
- tr.transform(position[0], win.left, win.top);
- tr.transform(position[1], win.left, win.bottom);
- tr.transform(position[2], win.right, win.bottom);
- tr.transform(position[3], win.right, win.top);
+ Mesh::VertexArray<vec2> position(mesh.getPositionArray<vec2>());
+ position[0] = tr.transform(win.left, win.top);
+ position[1] = tr.transform(win.left, win.bottom);
+ position[2] = tr.transform(win.right, win.bottom);
+ position[3] = tr.transform(win.right, win.top);
for (size_t i=0 ; i<4 ; i++) {
position[i].y = hw_h - position[i].y;
}