bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
* probably trigger put_pages()?
*/
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
if (ret)
goto fail;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
omap_obj->height, 0);
mutex_lock(&omap_obj->lock);
if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
- (omap_obj->flags & OMAP_BO_TILED)) {
+ (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
refcount_read(&omap_obj->dma_addr_cnt),
omap_obj->vaddr, omap_obj->roll);
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
int ret;
/* Validate the flags and compute the memory and cache flags. */
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n");
return NULL;
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.