return true;
}
+void Compositor::UpdateLayerPixelData(std::vector<OverlayLayer> &layers) {
+ thread_->UpdateLayerPixelData(layers);
+}
+
+void Compositor::EnsurePixelDataUpdated() {
+ thread_->EnsurePixelDataUpdated();
+}
+
void Compositor::Reset() {
if (thread_)
thread_->ExitThread();
Compositor(const Compositor &) = delete;
bool BeginFrame(bool disable_explicit_sync);
+ // We offload uploading pixel data to compositor thread while
+ // DisplayQueue continues to validate layers.
+ void UpdateLayerPixelData(std::vector<OverlayLayer> &layers);
+ // We are ready to commit ensure any UpdateLayerPixelData requests
+ // are handled.
+ void EnsurePixelDataUpdated();
bool Draw(DisplayPlaneStateList &planes, std::vector<OverlayLayer> &layers,
const std::vector<HwcRect<int>> &display_frame);
bool DrawOffscreen(std::vector<OverlayLayer> &layers,
disable_explicit_sync_ = disable_explicit_sync;
}
+void CompositorThread::UpdateLayerPixelData(std::vector<OverlayLayer> &layers) {
+ if (!layers.empty()) {
+ pixel_data_lock_.lock();
+ std::vector<OverlayBuffer *>().swap(pixel_data_);
+ for (auto &layer : layers) {
+ if (layer.RawPixelDataChanged()) {
+ pixel_data_.emplace_back(layer.GetBuffer());
+ }
+ }
+
+ tasks_ |= kRefreshRawPixelData;
+ }
+
+ if (!pixel_data_.empty()) {
+ Resume();
+ }
+}
+
+void CompositorThread::EnsurePixelDataUpdated() {
+ pixel_data_lock_.lock();
+ pixel_data_lock_.unlock();
+}
+
void CompositorThread::FreeResources() {
tasks_lock_.lock();
tasks_ |= kReleaseResources;
HandleReleaseRequest();
}
+ if (tasks_ & kRefreshRawPixelData) {
+ HandleRawPixelUpdate();
+ }
+
if (signal) {
cevent_.Signal();
}
}
}
+void CompositorThread::HandleRawPixelUpdate() {
+ tasks_lock_.lock();
+ tasks_ &= ~kRefreshRawPixelData;
+ tasks_lock_.unlock();
+
+ std::vector<OverlayBuffer *> texture_uploads;
+ for (auto &buffer : pixel_data_) {
+ if (buffer->NeedsTextureUpload()) {
+ texture_uploads.emplace_back(buffer);
+ } else {
+ buffer->RefreshPixelData();
+ }
+ }
+
+ if (!texture_uploads.empty()) {
+ Ensure3DRenderer();
+ gpu_resource_handler_->HandleTextureUploads(texture_uploads);
+ }
+
+ pixel_data_lock_.unlock();
+}
+
void CompositorThread::Handle3DDrawRequest() {
tasks_lock_.lock();
tasks_ &= ~kRender3D;
const std::vector<OverlayLayer>& layers);
void SetExplicitSyncSupport(bool disable_explicit_sync);
+ void UpdateLayerPixelData(std::vector<OverlayLayer>& layers);
+ void EnsurePixelDataUpdated();
void FreeResources();
void HandleRoutine() override;
kRender3D = 1 << 1, // Render content.
kRenderMedia = 1 << 2,
kReleaseResources = 1 << 3, // Release surfaces from plane manager.
+ kRefreshRawPixelData = 1 << 4
};
void Handle3DDrawRequest();
void HandleMediaDrawRequest();
void HandleReleaseRequest();
+ void HandleRawPixelUpdate();
void Wait();
void Ensure3DRenderer();
void EnsureMediaRenderer();
SpinLock tasks_lock_;
+ SpinLock pixel_data_lock_;
std::unique_ptr<Renderer> gl_renderer_;
std::unique_ptr<Renderer> media_renderer_;
std::unique_ptr<NativeGpuResource> gpu_resource_handler_;
std::vector<OverlayBuffer*> buffers_;
+ std::vector<OverlayBuffer*> pixel_data_;
std::vector<DrawState> states_;
std::vector<DrawState> media_states_;
std::vector<ResourceHandle> purged_resources_;
return true;
}
+void NativeGLResource::HandleTextureUploads(
+ const std::vector<OverlayBuffer*>& buffers) {
+ EGLDisplay egl_display = eglGetCurrentDisplay();
+ for (auto& buffer : buffers) {
+ // Create EGLImage.
+ const ResourceHandle& import_image =
+ buffer->GetGpuResource(egl_display, true);
+
+ if (import_image.image_ == EGL_NO_IMAGE_KHR) {
+ ETRACE("Failed to make import image when uploading texture data.");
+ }
+ }
+}
+
NativeGLResource::~NativeGLResource() {
}
void ReleaseGPUResources(const std::vector<ResourceHandle>& handles) override;
+ void HandleTextureUploads(
+ const std::vector<OverlayBuffer*>& buffers) override;
+
private:
std::vector<GLuint> layer_textures_;
};
NativeGpuResource& operator=(NativeGpuResource&& rhs) = delete;
virtual bool PrepareResources(const std::vector<OverlayBuffer*>& buffers) = 0;
+ // Handle any texture upload requests for these buffers.
+ virtual void HandleTextureUploads(
+ const std::vector<OverlayBuffer*>& buffers) = 0;
virtual GpuResourceHandle GetResourceHandle(uint32_t layer_index) const = 0;
virtual void ReleaseGPUResources(
const std::vector<ResourceHandle>& handles) = 0;
const std::vector<ResourceHandle>& handles) override {
}
+ void HandleTextureUploads(
+ const std::vector<OverlayBuffer*>& buffers) override {
+ ETRACE("HandleTextureUploads is not implemented.");
+ }
+
private:
void Reset();
std::vector<struct vk_resource> layer_textures_;
void OverlayLayer::SetBuffer(HWCNativeHandle handle, int32_t acquire_fence,
ResourceManager* resource_manager,
- bool register_buffer) {
+ bool register_buffer, HwcLayer* layer) {
std::shared_ptr<OverlayBuffer> buffer(NULL);
if (resource_manager && register_buffer) {
if (buffer == NULL) {
buffer = OverlayBuffer::CreateOverlayBuffer();
- buffer->InitializeFromNativeHandle(handle, resource_manager);
+ bool is_cursor_layer = false;
+ if (layer) {
+ is_cursor_layer = layer->IsCursorLayer();
+ }
+ buffer->InitializeFromNativeHandle(handle, resource_manager,
+ is_cursor_layer);
if (register_buffer) {
resource_manager->RegisterBuffer(GETNATIVEBUFFER(handle), buffer);
}
}
- imported_buffer_.reset(new ImportedBuffer(buffer, acquire_fence));
- if (!register_buffer) {
- ValidateForOverlayUsage();
+
+ if (register_buffer && handle->is_raw_pixel_ && !surface_damage_.empty()) {
+ buffer->UpdateRawPixelBackingStore(handle->pixel_memory_);
+ state_ |= kRawPixelDataChanged;
}
+
+ imported_buffer_.reset(new ImportedBuffer(buffer, acquire_fence));
+ ValidateForOverlayUsage();
}
void OverlayLayer::SetBlending(HWCBlending blending) {
source_crop_height_ = layer->GetSourceCropHeight();
source_crop_ = layer->GetSourceCrop();
blending_ = layer->GetBlending();
+ surface_damage_ = layer->GetLayerDamage();
SetBuffer(layer->GetNativeHandle(), layer->GetAcquireFence(),
resource_manager, true);
- ValidateForOverlayUsage();
- surface_damage_ = layer->GetLayerDamage();
if (!handle_constraints) {
if (previous_layer) {
if (!layer->HasVisibleRegionChanged() && !content_changed &&
surface_damage_.empty() && !layer->HasLayerContentChanged() &&
- !(state_ & kNeedsReValidation)) {
+ !(state_ & kNeedsReValidation) && !(state_ & kRawPixelDataChanged)) {
state_ &= ~kLayerContentChanged;
}
}
OverlayBuffer* GetBuffer() const;
void SetBuffer(HWCNativeHandle handle, int32_t acquire_fence,
- ResourceManager* buffer_manager, bool register_buffer);
+ ResourceManager* buffer_manager, bool register_buffer,
+ HwcLayer* layer = NULL);
void SetSourceCrop(const HwcRect<float>& source_crop);
const HwcRect<float>& GetSourceCrop() const {
return state_ & kNeedsReValidation;
}
+ // Returns true if this layer is backed
+ // by raw pixel data and it has changed
+ // compared to previous frame.
+ bool RawPixelDataChanged() const {
+ return state_ & kRawPixelDataChanged;
+ }
+
void Dump();
private:
kDimensionsChanged = 1 << 1,
kInvisible = 1 << 2,
kSourceRectChanged = 1 << 3,
- kNeedsReValidation = 1 << 4
+ kNeedsReValidation = 1 << 4,
+ kRawPixelDataChanged = 1 << 5
};
struct ImportedBuffer {
uint32_t z_order = 0;
bool has_video_layer = false;
bool re_validate_commit = false;
+ bool handle_raw_pixel_update = false;
for (size_t layer_index = 0; layer_index < size; layer_index++) {
HwcLayer* layer = source_layers.at(layer_index);
continue;
}
+ if (overlay_layer->RawPixelDataChanged()) {
+ handle_raw_pixel_update = true;
+ }
+
if (overlay_layer->IsVideoLayer()) {
has_video_layer = true;
}
}
}
+ if (handle_raw_pixel_update) {
+ compositor_.UpdateLayerPixelData(layers);
+ }
+
// We may have skipped layers which are not visible.
size = layers.size();
if ((add_index == 0) || validate_layers) {
composition_passed = false;
}
}
+ } else if (handle_raw_pixel_update) {
+ compositor_.EnsurePixelDataUpdated();
}
if (!composition_passed) {
HwcBuffer meta_data_;
uint64_t gralloc1_buffer_descriptor_t_ = 0;
bool hwc_buffer_ = false;
+ // In case this is true, we expect meta_data_
+ // to be filled with correct width, height and
+ // format.
+ bool is_raw_pixel_ = false;
+ void* pixel_memory_ = NULL;
};
typedef struct gralloc_handle* HWCNativeHandle;
std::vector<hwcomposer::HwcLayer*> layers;
hwcomposer::HwcLayer* cursor_layer = NULL;
- for (auto & [ first, second ] : layers_) {
- if (second.GetLayerUsage() == IAHWC_LAYER_USAGE_CURSOR)
- cursor_layer = second.GetLayer();
+ for (std::pair<const iahwc_layer_t, IAHWCLayer>& l : layers_) {
+ IAHWCLayer& temp = l.second;
+ if (temp.GetLayer()->IsCursorLayer())
+ cursor_layer = temp.GetLayer();
else
- layers.emplace_back(second.GetLayer());
+ layers.emplace_back(temp.GetLayer());
}
if (cursor_layer)
int IAHWC::IAHWCLayer::SetLayerUsage(int32_t layer_usage) {
layer_usage_ = layer_usage;
+ if (layer_usage_ == IAHWC_LAYER_USAGE_CURSOR) {
+ iahwc_layer_.MarkAsCursorLayer();
+ }
return IAHWC_ERROR_NONE;
}
uint32_t total_planes = 0;
HwcBuffer meta_data_;
bool hwc_buffer_ = false;
+ // In case this is true, we expect meta_data_
+ // to be filled with correct width, height and
+ // format.
+ bool is_raw_pixel_ = false;
+ void* pixel_memory_ = NULL;
uint32_t gbm_flags = 0;
};
switch (layer_parameter.type) {
case LAYER_TYPE_GL:
- renderer = new GLCubeLayerRenderer(buffer_handler, false);
+ renderer = new GLCubeLayerRenderer(buffer_handler, true);
break;
default:
printf("un-recognized layer type!\n");
LOCAL_SRC_FILES := \
physicaldisplay.cpp \
+ pixelbuffer.cpp \
drm/drmdisplay.cpp \
drm/drmbuffer.cpp \
drm/drmplane.cpp \
+ drm/drmpixelbuffer.cpp \
drm/drmdisplaymanager.cpp \
drm/drmscopedtypes.cpp
wsi_SOURCES = \
physicaldisplay.cpp \
+ pixelbuffer.cpp \
drm/drmdisplay.cpp \
drm/drmbuffer.cpp \
+ drm/drmpixelbuffer.cpp \
drm/drmplane.cpp \
drm/drmdisplaymanager.cpp \
drm/drmscopedtypes.cpp \
}
void DrmBuffer::InitializeFromNativeHandle(HWCNativeHandle handle,
- ResourceManager* resource_manager) {
+ ResourceManager* resource_manager,
+ bool is_cursor_buffer) {
+ resource_manager_ = resource_manager;
const NativeBufferHandler* handler =
- resource_manager->GetNativeBufferHandler();
- handler->CopyHandle(handle, &image_.handle_);
- if (!handler->ImportBuffer(image_.handle_)) {
- ETRACE("Failed to Import buffer.");
- return;
+ resource_manager_->GetNativeBufferHandler();
+ if (handle->is_raw_pixel_) {
+ data_ = handle->pixel_memory_;
+ PixelBuffer* buffer = PixelBuffer::CreatePixelBuffer();
+ pixel_buffer_.reset(buffer);
+ pixel_buffer_->Initialize(handler, handle->meta_data_.width_,
+ handle->meta_data_.height_,
+ handle->meta_data_.format_, data_, image_);
+ if (is_cursor_buffer) {
+ image_.handle_->meta_data_.usage_ = hwcomposer::kLayerCursor;
+ }
+ } else {
+ handler->CopyHandle(handle, &image_.handle_);
+ if (!handler->ImportBuffer(image_.handle_)) {
+ ETRACE("Failed to Import buffer.");
+ return;
+ }
}
- resource_manager_ = resource_manager;
media_image_.handle_ = image_.handle_;
Initialize(image_.handle_->meta_data_);
}
+void DrmBuffer::UpdateRawPixelBackingStore(void* addr) {
+ if (pixel_buffer_) {
+ data_ = addr;
+ }
+}
+
+void DrmBuffer::RefreshPixelData() {
+ if (pixel_buffer_ && data_) {
+ pixel_buffer_->Refresh(data_, image_);
+ }
+}
+
+bool DrmBuffer::NeedsTextureUpload() const {
+ if (pixel_buffer_) {
+ return pixel_buffer_->NeedsTextureUpload();
+ }
+
+ return false;
+}
+
const ResourceHandle& DrmBuffer::GetGpuResource(GpuDisplay egl_display,
bool external_import) {
if (image_.image_ == 0) {
if (image_.texture_ != 0) {
glBindTexture(target, image_.texture_);
+ if (pixel_buffer_ && pixel_buffer_->NeedsTextureUpload()) {
+ glTexImage2D(GL_TEXTURE_2D, 0, format_, width_, height_, 0, format_,
+ GL_UNSIGNED_BYTE, data_);
+ }
+
glEGLImageTargetTexture2DOES(target, (GLeglImageOES)image_.image_);
glBindTexture(target, 0);
} else {
glBindTexture(target, texture);
glEGLImageTargetTexture2DOES(target, (GLeglImageOES)image_.image_);
if (external_import) {
+ if (pixel_buffer_ && pixel_buffer_->NeedsTextureUpload()) {
+ glTexImage2D(GL_TEXTURE_2D, 0, format_, width_, height_, 0, format_,
+ GL_UNSIGNED_BYTE, data_);
+ }
glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
#include "overlaybuffer.h"
+#include "pixelbuffer.h"
+
namespace hwcomposer {
class NativeBufferHandler;
~DrmBuffer() override;
void InitializeFromNativeHandle(HWCNativeHandle handle,
- ResourceManager* buffer_manager) override;
+ ResourceManager* buffer_manager,
+ bool is_cursor_buffer) override;
+
+ void UpdateRawPixelBackingStore(void* addr) override;
+ void RefreshPixelData() override;
+ bool NeedsTextureUpload() const override;
uint32_t GetWidth() const override {
return width_;
void Dump() override;
private:
- void Initialize(const HwcBuffer& bo);
+ void Initialize(const HwcBuffer& bo);
uint32_t width_ = 0;
uint32_t height_ = 0;
uint32_t format_ = 0;
ResourceManager* resource_manager_ = 0;
ResourceHandle image_;
MediaResourceHandle media_image_;
+ std::unique_ptr<PixelBuffer> pixel_buffer_;
+ void* data_;
};
} // namespace hwcomposer
--- /dev/null
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "drmpixelbuffer.h"
+
+#include <sys/mman.h>
+
+namespace hwcomposer {
+
+#define DMA_BUF_SYNC_READ (1 << 0)
+#define DMA_BUF_SYNC_WRITE (2 << 0)
+#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
+#define DMA_BUF_SYNC_START (0 << 2)
+#define DMA_BUF_SYNC_END (1 << 2)
+#define DMA_BUF_BASE 'b'
+#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+struct dma_buf_sync {
+ __u64 flags;
+};
+
+DrmPixelBuffer::DrmPixelBuffer() {
+}
+
+DrmPixelBuffer::~DrmPixelBuffer() {
+}
+
+void* DrmPixelBuffer::Map(uint32_t prime_fd, size_t size) {
+ void* addr =
+ mmap(nullptr, size, (PROT_READ | PROT_WRITE), MAP_SHARED, prime_fd, 0);
+ if (addr == MAP_FAILED)
+ return nullptr;
+
+ struct dma_buf_sync sync_start = {0};
+ sync_start.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW;
+ int rv = ioctl(prime_fd, DMA_BUF_IOCTL_SYNC, &sync_start);
+ if (rv) {
+ ETRACE("DMA_BUF_IOCTL_SYNC failed during Map \n");
+ munmap(addr, size);
+ return nullptr;
+ }
+
+ return addr;
+}
+
+void DrmPixelBuffer::Unmap(uint32_t prime_fd, void* addr, size_t size) {
+ if (addr) {
+ struct dma_buf_sync sync_start = {0};
+ sync_start.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_RW;
+ int rv = ioctl(prime_fd, DMA_BUF_IOCTL_SYNC, &sync_start);
+ munmap(addr, size);
+ addr = nullptr;
+ }
+}
+
+PixelBuffer* PixelBuffer::CreatePixelBuffer() {
+ return new DrmPixelBuffer();
+}
+};
--- /dev/null
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef WSI_DRM_PIXELBUFFER_H_
+#define WSI_DRM_PIXELBUFFER_H_
+
+#include "pixelbuffer.h"
+
+namespace hwcomposer {
+
+class ResourceManager;
+
+class DrmPixelBuffer : public PixelBuffer {
+ public:
+ DrmPixelBuffer();
+ ~DrmPixelBuffer() override;
+
+ void* Map(uint32_t prime_fd, size_t size) override;
+
+ void Unmap(uint32_t prime_fd, void* addr, size_t size) override;
+};
+
+} // namespace hwcomposer
+#endif
}
virtual void InitializeFromNativeHandle(HWCNativeHandle handle,
- ResourceManager* buffer_manager) = 0;
+ ResourceManager* buffer_manager,
+ bool is_cursor_buffer) = 0;
+
+ // If this buffer is backed by raw pixel data, we refresh the contents
+ // in this case. Expectation is that when InitializeFromNativeHandle
+ // is called we already know if this is backed by pixel data or
+ // not.
+ virtual void RefreshPixelData() = 0;
+
+ // If this buffer is backed by raw pixel data, we update the pixel data
+ // pointer in this case. Expectation is that when InitializeFromNativeHandle
+ // is called we already know if this is backed by pixel data or
+ // not.
+ virtual void UpdateRawPixelBackingStore(void* addr) = 0;
+
+ virtual bool NeedsTextureUpload() const = 0;
virtual uint32_t GetWidth() const = 0;
--- /dev/null
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "pixelbuffer.h"
+
+#include "resourcemanager.h"
+
+namespace hwcomposer {
+
+PixelBuffer::PixelBuffer() {
+}
+
+PixelBuffer::~PixelBuffer() {
+}
+
+void PixelBuffer::Initialize(const NativeBufferHandler *buffer_handler,
+ uint32_t width, uint32_t height, uint32_t format,
+ void *addr, ResourceHandle &resource) {
+ if (!buffer_handler->CreateBuffer(width, height, format, &resource.handle_)) {
+ ETRACE("PixelBuffer: CreateBuffer failed");
+ return;
+ }
+
+ HWCNativeHandle &handle = resource.handle_;
+ if (!buffer_handler->ImportBuffer(handle)) {
+ ETRACE("PixelBuffer: ImportBuffer failed");
+ return;
+ }
+
+ if (handle->meta_data_.prime_fd_ <= 0) {
+ ETRACE("PixelBuffer: prime_fd_ is invalid.");
+ return;
+ }
+
+ size_t size = handle->meta_data_.height_ * handle->meta_data_.pitches_[0];
+ void *ptr = Map(handle->meta_data_.prime_fd_, size);
+ if (!ptr) {
+ return;
+ }
+
+ memcpy(ptr, addr, size);
+ Unmap(handle->meta_data_.prime_fd_, ptr, size);
+ needs_texture_upload_ = false;
+}
+
+void PixelBuffer::Refresh(void *addr, const ResourceHandle &resource) {
+ needs_texture_upload_ = true;
+ const HWCNativeHandle &handle = resource.handle_;
+ size_t size = handle->meta_data_.height_ * handle->meta_data_.pitches_[0];
+ void *ptr = Map(handle->meta_data_.prime_fd_, size);
+ if (!ptr) {
+ return;
+ }
+
+ memcpy(ptr, addr, size);
+ Unmap(handle->meta_data_.prime_fd_, ptr, size);
+ needs_texture_upload_ = false;
+}
+};
--- /dev/null
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef WSI_PIXELBUFFER_H_
+#define WSI_PIXELBUFFER_H_
+
+#include <platformdefines.h>
+#include <memory>
+#include "compositordefs.h"
+#include "hwcdefs.h"
+#include <nativebufferhandler.h>
+
+namespace hwcomposer {
+
+class ResourceManager;
+
+// Represent's raw pixel data.
+class PixelBuffer {
+ public:
+ static PixelBuffer* CreatePixelBuffer();
+ PixelBuffer();
+ virtual ~PixelBuffer();
+
+ // Map's buffer represented by prime_fd to CPU memory.
+ virtual void* Map(uint32_t prime_fd, size_t size) = 0;
+
+ // UnMap previously mapped buffer represented by prime_fd.
+ virtual void Unmap(uint32_t prime_fd, void* addr, size_t size) = 0;
+
+ // Creats buffer taking into consideration width, height and format.
+ // It will try to update buffer wth addr in case we are able to map
+ // and unmap the buffer. If NeedsTextureUpload() is true after this
+ // call than caller is responsible for uploading the data to respective
+ // texture.
+ void Initialize(const NativeBufferHandler* buffer_handler, uint32_t width,
+ uint32_t height, uint32_t format, void* addr,
+ ResourceHandle& handle);
+
+ // Returns true if this buffer cannot be mapped.
+ bool NeedsTextureUpload() const {
+ return needs_texture_upload_;
+ }
+
+ // Updates resource with pixel data addr.
+ void Refresh(void* addr, const ResourceHandle& resource);
+
+ private:
+ bool needs_texture_upload_ = true;
+};
+
+} // namespace hwcomposer
+#endif