if (trans->base.usage & PIPE_TRANSFER_WRITE) {
if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
struct virgl_screen *vs = virgl_screen(ctx->screen);
- vbuf->base.clean = FALSE;
vctx->num_transfers++;
vs->vws->transfer_put(vs->vws, vbuf->base.hw_res,
&transfer->box, trans->base.stride, trans->base.layer_stride, trans->offset, transfer->level);
util_range_add(&vbuf->valid_buffer_range, transfer->box.x + box->x,
transfer->box.x + box->x + box->width);
-
- vbuf->base.clean = FALSE;
}
static const struct u_resource_vtbl virgl_buffer_vtbl =
uint32_t size;
uint32_t vbind;
buf = CALLOC_STRUCT(virgl_buffer);
- buf->base.clean = TRUE;
buf->base.u.b = *template;
buf->base.u.b.screen = &vs->base;
buf->base.u.vtbl = &virgl_buffer_vtbl;
if (!surf)
return NULL;
- res->clean = FALSE;
handle = virgl_object_assign_handle();
pipe_reference_init(&surf->base.reference, 1);
pipe_resource_reference(&surf->base.texture, resource);
struct virgl_resource *grres = virgl_resource(res);
struct virgl_buffer *vbuf = virgl_buffer(res);
- grres->clean = FALSE;
-
if (virgl_res_needs_flush_wait(vctx, &vbuf->base, usage)) {
ctx->flush(ctx, NULL, 0);
struct virgl_resource *dres = virgl_resource(dst);
struct virgl_resource *sres = virgl_resource(src);
- dres->clean = FALSE;
virgl_encode_resource_copy_region(vctx, dres,
dst_level, dstx, dsty, dstz,
sres, src_level,
struct virgl_resource *dres = virgl_resource(blit->dst.resource);
struct virgl_resource *sres = virgl_resource(blit->src.resource);
- dres->clean = FALSE;
virgl_encode_blit(vctx, dres, sres,
blit);
}
query->type = query_type;
query->index = index;
query->handle = handle;
- query->buf->clean = FALSE;
virgl_encoder_create_query(vctx, handle, query_type, index, query->buf, 0);
return (struct pipe_query *)query;
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_query *query = virgl_query(q);
- query->buf->clean = FALSE;
virgl_encoder_begin_query(vctx, query->handle);
return true;
}
struct virgl_resource *res,
unsigned usage)
{
+ struct virgl_screen *vs = virgl_screen(vctx->base.screen);
bool readback = true;
- if (res->clean)
+ if (vs->vws->res_is_synced(vs->vws, res->hw_res))
readback = false;
else if (usage & PIPE_TRANSFER_DISCARD_RANGE)
readback = false;
struct virgl_resource {
struct u_resource u;
struct virgl_hw_res *hw_res;
- boolean clean;
};
struct virgl_buffer {
t->base.buffer_offset = buffer_offset;
t->base.buffer_size = buffer_size;
t->handle = handle;
- res->clean = FALSE;
virgl_encoder_create_so_target(vctx, handle, res, buffer_offset, buffer_size);
return &t->base;
}
if (trans->base.usage & PIPE_TRANSFER_WRITE) {
if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
struct virgl_screen *vs = virgl_screen(ctx->screen);
- vtex->base.clean = FALSE;
vctx->num_transfers++;
vs->vws->transfer_put(vs->vws, vtex->base.hw_res,
&transfer->box, trans->base.stride, l_stride, trans->offset, transfer->level);
unsigned vbind;
tex = CALLOC_STRUCT(virgl_texture);
- tex->base.clean = TRUE;
tex->base.u.b = *template;
tex->base.u.b.screen = &vs->base;
pipe_reference_init(&tex->base.u.b.reference, 1);
struct virgl_hw_res *res,
enum virgl_bo_usage usage);
+ bool (*res_is_synced)(struct virgl_winsys *vws,
+ struct virgl_hw_res *res);
+
int (*get_caps)(struct virgl_winsys *vws, struct virgl_drm_caps *caps);
/* fence */
res->stride = stride;
pipe_reference_init(&res->reference, 1);
res->num_cs_references = 0;
+ res->synced = true;
return res;
}
if (res) {
LIST_DEL(&res->head);
+ res->synced = true;
--qdws->num_delayed;
pipe_mutex_unlock(qdws->mutex);
pipe_reference_init(&res->reference, 1);
if (write_buf)
cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
+ if (usage & VIRGL_USAGE_WRITE)
+ res->synced = false;
+
if (index_in_list == -1)
virgl_drm_add_res(qdws, cbuf, res, usage);
else
cbuf->bo_usage[index_in_list] |= usage;
}
+static bool virgl_drm_res_is_synced(struct virgl_winsys *qws,
+ struct virgl_hw_res *res)
+{
+ return res->synced;
+}
+
static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
struct virgl_cmd_buf *_cbuf,
struct virgl_hw_res *res,
qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
qdws->base.emit_res = virgl_drm_emit_res;
qdws->base.res_is_referenced = virgl_drm_res_is_ref;
-
+ qdws->base.res_is_synced = virgl_drm_res_is_synced;
qdws->base.cs_create_fence = virgl_cs_create_fence;
qdws->base.fence_wait = virgl_fence_wait;
qdws->base.fence_reference = virgl_fence_reference;
int64_t start, end;
boolean flinked;
uint32_t flink;
+
+ bool synced; /* guest copy is synced with host copy */
};
struct virgl_drm_winsys
return TRUE;
}
+static bool virgl_vtest_res_is_synced(struct virgl_winsys *vws,
+ struct virgl_hw_res *res)
+{
+ return res->synced;
+}
+
static int virgl_vtest_get_caps(struct virgl_winsys *vws,
struct virgl_drm_caps *caps)
{
vtws->base.emit_res = virgl_vtest_emit_res;
vtws->base.res_is_referenced = virgl_vtest_res_is_ref;
+ vtws->base.res_is_synced = virgl_vtest_res_is_synced;
vtws->base.get_caps = virgl_vtest_get_caps;
vtws->base.cs_create_fence = virgl_cs_create_fence;
uint32_t bind;
boolean cacheable;
int64_t start, end;
-
+ bool synced; /* remote copy is synced with host copy */
};
struct virgl_vtest_cmd_buf {