This will come in very handy for tiled buffers on intel hardware.
Also add some padding to interface structures to allow future binary backwards
compatible changes.
buf->start = rep->buffer_start;
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
+ buf->pageAlignment = rep->page_alignment;
}
-int drmBOCreate(int fd, void *ttm, unsigned long start, unsigned long size,
- void *user_buffer, drm_bo_type_t type, unsigned mask,
+int drmBOCreate(int fd, unsigned long start, unsigned long size,
+ unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
+ unsigned mask,
unsigned hint, drmBO *buf)
{
drm_bo_arg_t arg;
req->hint = hint;
req->size = size;
req->type = type;
+ req->page_alignment = pageAlignment;
buf->virtual = NULL;
unsigned type;
unsigned flags;
unsigned signaled;
+ unsigned pad[4]; /* for future expansion */
} drmFence;
typedef struct _drmBO{
unsigned long start;
unsigned replyFlags;
unsigned fenceFlags;
+ unsigned pageAlignment;
void *virtual;
void *mapVirtual;
int mapCount;
+ unsigned pad[8]; /* for future expansion */
} drmBO;
* Buffer object functions.
*/
-extern int drmBOCreate(int fd, void *ttm, unsigned long start, unsigned long size,
- void *user_buffer, drm_bo_type_t type, unsigned mask,
- unsigned hint, drmBO *buf);
+extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
+ unsigned pageAlignment,void *user_buffer,
+ drm_bo_type_t type, unsigned mask,
+ unsigned hint, drmBO *buf);
extern int drmBODestroy(int fd, drmBO *buf);
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
extern int drmBOUnReference(int fd, drmBO *buf);
unsigned long buffer_start;
drm_bo_type_t type;
unsigned long offset;
-
+ uint32_t page_alignment;
atomic_t mapped;
uint32_t flags;
uint32_t mask;
mutex_lock(&dev->struct_mutex);
do {
- node = drm_mm_search_free(mm, size, 0, 1);
+ node = drm_mm_search_free(mm, size, buf->page_alignment, 1);
if (node)
break;
return -ENOMEM;
}
- node = drm_mm_get_block(node, size, 0);
+ node = drm_mm_get_block(node, size, buf->page_alignment);
mutex_unlock(&dev->struct_mutex);
BUG_ON(!node);
node->private = (void *)buf;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_type;
rep->rep_flags = 0;
+ rep->page_alignment = bo->page_alignment;
if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
drm_bo_type_t type,
uint32_t mask,
uint32_t hint,
+ uint32_t page_alignment,
unsigned long buffer_start,
drm_buffer_object_t ** buf_obj)
{
bo->num_pages = num_pages;
bo->node_card = NULL;
bo->node_ttm = NULL;
+ bo->page_alignment = page_alignment;
if (bo->type == drm_bo_type_fake) {
bo->offset = buffer_start;
bo->buffer_start = 0;
req->type,
req->mask,
req->hint,
+ req->page_alignment,
req->buffer_start, &entry);
if (rep.ret)
break;
drm_mm_node_t *align_splitoff = NULL;
drm_mm_node_t *child;
- unsigned tmp = size % alignment;
+ unsigned tmp = 0;
+
+ if (alignment)
+ tmp = size % alignment;
if (tmp) {
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
drm_bo_type_t type;
unsigned arg_handle;
drm_u64_t buffer_start;
+ unsigned page_alignment;
unsigned expand_pad[4]; /*Future expansion */
enum {
drm_bo_create,
drm_u64_t buffer_start;
unsigned fence_flags;
unsigned rep_flags;
+ unsigned page_alignment;
unsigned expand_pad[4]; /*Future expansion */
}drm_bo_arg_reply_t;