vchan->otherend_id = recv_request->vchan_id;
hab_open_request_free(recv_request);
+ vchan->session_id = open_id;
+
/* Send Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan,
0, sub_id, open_id);
vchan->otherend_id = otherend_vchan_id;
+ vchan->session_id = open_id;
+
/* Send Init-Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
pchan, vchan->id, sub_id, open_id);
}
vchan = hab_get_vchan_fromvcid(vcid, ctx);
- if (!vchan || vchan->otherend_closed)
- return -ENODEV;
+ if (!vchan || vchan->otherend_closed) {
+ ret = -ENODEV;
+ goto err;
+ }
HAB_HEADER_SET_SIZE(header, sizebytes);
if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT)
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
while (1) {
ret = physical_channel_send(vchan->pchan, &header, data);
schedule();
}
- hab_vchan_put(vchan);
+
+err:
+ if (vchan)
+ hab_vchan_put(vchan);
+
return ret;
}
int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
vchan = hab_get_vchan_fromvcid(vcid, ctx);
- if (!vchan || vchan->otherend_closed)
+ if (!vchan)
return ERR_PTR(-ENODEV);
if (nonblocking_flag) {
if (!message) {
if (nonblocking_flag)
ret = -EAGAIN;
+ else if (vchan->otherend_closed)
+ ret = -ENODEV;
else
ret = -EPIPE;
}
void hab_send_close_msg(struct virtual_channel *vchan)
{
- struct hab_header header;
+ struct hab_header header = {0};
if (vchan && !vchan->otherend_closed) {
HAB_HEADER_SET_SIZE(header, 0);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
physical_channel_send(vchan->pchan, &header, NULL);
}
}
HAB_PAYLOAD_TYPE_EXPORT_ACK,
HAB_PAYLOAD_TYPE_PROFILE,
HAB_PAYLOAD_TYPE_CLOSE,
+ HAB_PAYLOAD_TYPE_MAX,
};
#define LOOPBACK_DOM 0xFF
#define HABCFG_GET_BE(_local_cfg_, _vmid_, _mmid_) \
((settings)->vmid_mmid_list[_vmid_].is_listener[_mmid_])
+struct hab_header {
+ uint32_t id_type_size;
+ uint32_t session_id;
+ uint32_t signature;
+ uint32_t sequence;
+} __packed;
+
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
#define HAB_HEADER_SIZE_SHIFT 0
#define HAB_HEADER_TYPE_SHIFT 16
#define HAB_VCID_GET_ID(vcid) \
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
+
+#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid))
+
#define HAB_HEADER_SET_SIZE(header, size) \
- ((header).info = (((header).info) & (~HAB_HEADER_SIZE_MASK)) | \
- (((size) << HAB_HEADER_SIZE_SHIFT) & HAB_HEADER_SIZE_MASK))
+ ((header).id_type_size = ((header).id_type_size & \
+ (~HAB_HEADER_SIZE_MASK)) | \
+ (((size) << HAB_HEADER_SIZE_SHIFT) & \
+ HAB_HEADER_SIZE_MASK))
#define HAB_HEADER_SET_TYPE(header, type) \
- ((header).info = (((header).info) & (~HAB_HEADER_TYPE_MASK)) | \
- (((type) << HAB_HEADER_TYPE_SHIFT) & HAB_HEADER_TYPE_MASK))
+ ((header).id_type_size = ((header).id_type_size & \
+ (~HAB_HEADER_TYPE_MASK)) | \
+ (((type) << HAB_HEADER_TYPE_SHIFT) & \
+ HAB_HEADER_TYPE_MASK))
#define HAB_HEADER_SET_ID(header, id) \
- ((header).info = (((header).info) & (~HAB_HEADER_ID_MASK)) | \
- ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) \
- & HAB_HEADER_ID_MASK))
+ ((header).id_type_size = ((header).id_type_size & \
+ (~HAB_HEADER_ID_MASK)) | \
+ ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) & \
+ HAB_HEADER_ID_MASK))
#define HAB_HEADER_GET_SIZE(header) \
- ((((header).info) & HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
+ (((header).id_type_size & \
+ HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
#define HAB_HEADER_GET_TYPE(header) \
- ((((header).info) & HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
+ (((header).id_type_size & \
+ HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
#define HAB_HEADER_GET_ID(header) \
- (((((header).info) & HAB_HEADER_ID_MASK) >> \
+ ((((header).id_type_size & HAB_HEADER_ID_MASK) >> \
(HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK)
-struct hab_header {
- uint32_t info;
-};
+#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
struct physical_channel {
char name[MAX_VMID_NAME_SIZE];
int id;
int otherend_id;
int otherend_closed;
+ uint32_t session_id;
};
/*
int open_id);
int hab_open_request_send(struct hab_open_request *request);
int hab_open_request_add(struct physical_channel *pchan,
- struct hab_header *header);
+ size_t sizebytes, int request_type);
void hab_open_request_free(struct hab_open_request *request);
int hab_open_listen(struct uhab_context *ctx,
struct hab_device *dev,
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
struct physical_channel *pchan);
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
- uint32_t vchan_id);
+ struct hab_header *header);
void hab_vchan_put(struct virtual_channel *vchan);
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
int cnt; /* pages allocated for local file */
struct list_head imp_list;
struct file *filp;
+ rwlock_t implist_lock;
};
void *habmm_hyp_allocate_grantable(int page_count,
return rc;
}
+/*
+ * exporter - grant & revoke
+ * degenerate sharabled page list based on CPU friendly virtual "address".
+ * The result as an array is stored in ppdata to return to caller
+ * page size 4KB is assumed
+ */
int habmem_hyp_grant_user(unsigned long address,
int page_count,
int flags,
if (!priv)
return NULL;
+ rwlock_init(&priv->implist_lock);
INIT_LIST_HEAD(&priv->imp_list);
return priv;
pglist->kva = NULL;
}
+ write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
+ write_unlock(&priv->implist_lock);
return 0;
}
int kernel)
{
struct importer_context *priv = imp_ctx;
- struct pages_list *pglist;
+ struct pages_list *pglist, *tmp;
int found = 0;
uint64_t pg_index = index >> PAGE_SHIFT;
- list_for_each_entry(pglist, &priv->imp_list, list) {
+ write_lock(&priv->implist_lock);
+ list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
if (kernel) {
if (pglist->kva == (void *)((uintptr_t)index))
found = 1;
}
}
+ write_unlock(&priv->implist_lock);
if (!found) {
pr_err("failed to find export id on index %llx\n", index);
return -EINVAL;
struct pages_list *pglist;
int bfound = 0;
+ read_lock(&imp_ctx->implist_lock);
list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
if (pglist->index == vma->vm_pgoff) {
bfound = 1;
break;
}
}
+ read_unlock(&imp_ctx->implist_lock);
if (!bfound) {
pr_err("Failed to find pglist vm_pgoff: %d\n", vma->vm_pgoff);
struct hab_export_ack *expect_ack)
{
int ret = 0;
- struct hab_export_ack_recvd *ack_recvd;
+ struct hab_export_ack_recvd *ack_recvd, *tmp;
spin_lock_bh(&ctx->expq_lock);
- list_for_each_entry(ack_recvd, &ctx->exp_rxq, node) {
+ list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
if (ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
HAB_HEADER_SET_SIZE(header, sizebytes);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
ret = physical_channel_send(vchan->pchan, &header, exp);
if (ret != 0) {
vchan->otherend_closed);
}
- if (!ret && !vchan->otherend_closed) {
+ /* return all the received messages before the remote close */
+ if (!ret && !hab_rx_queue_empty(vchan)) {
spin_lock_bh(&vchan->rx_lock);
- if (!list_empty(&vchan->rx_list)) {
- message = list_first_entry(&vchan->rx_list,
+ message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
- list_del(&message->node);
- }
+ list_del(&message->node);
spin_unlock_bh(&vchan->rx_lock);
}
return 0;
}
-static int hab_send_export_ack(struct physical_channel *pchan,
- struct export_desc *exp)
+static int hab_send_export_ack(struct virtual_channel *vchan,
+ struct physical_channel *pchan,
+ struct export_desc *exp)
{
struct hab_export_ack exp_ack = {
.export_id = exp->export_id,
HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
HAB_HEADER_SET_ID(header, exp->vcid_local);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
return physical_channel_send(pchan, &header, &exp_ack);
}
static int hab_receive_create_export_ack(struct physical_channel *pchan,
- struct uhab_context *ctx)
+ struct uhab_context *ctx, size_t sizebytes)
{
struct hab_export_ack_recvd *ack_recvd =
kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
if (!ack_recvd)
return -ENOMEM;
+ if (sizeof(ack_recvd->ack) != sizebytes)
+ pr_err("exp ack size %lu is not as arrived %zu\n",
+ sizeof(ack_recvd->ack), sizebytes);
+
if (physical_channel_read(pchan,
&ack_recvd->ack,
- sizeof(ack_recvd->ack)) != sizeof(ack_recvd->ack))
+ sizebytes) != sizebytes)
return -EIO;
spin_lock_bh(&ctx->expq_lock);
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
+ uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
struct virtual_channel *vchan = NULL;
struct export_desc *exp_desc;
struct timeval tv;
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
payload_type != HAB_PAYLOAD_TYPE_ACK) {
- vchan = hab_vchan_get(pchan, vchan_id);
+
+ /* sanity check the received message */
+ if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
+ vchan_id > (HAB_HEADER_ID_MASK >> HAB_HEADER_ID_SHIFT)
+ || !vchan_id || !session_id) {
+ pr_err("Invalid message received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+ }
+
+ vchan = hab_vchan_get(pchan, header);
if (!vchan) {
+ pr_debug("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+
+ if (sizebytes)
+ pr_err("message is dropped\n");
+
return;
} else if (vchan->otherend_closed) {
hab_vchan_put(vchan);
+ pr_debug("vchan remote is closed, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+
+ if (sizebytes)
+ pr_err("message is dropped\n");
+
return;
}
+ } else {
+ if (sizebytes != sizeof(struct hab_open_send_data)) {
+ pr_err("Invalid open request received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+ }
}
switch (payload_type) {
case HAB_PAYLOAD_TYPE_INIT:
case HAB_PAYLOAD_TYPE_INIT_ACK:
case HAB_PAYLOAD_TYPE_ACK:
- ret = hab_open_request_add(pchan, header);
- if (ret)
+ ret = hab_open_request_add(pchan, sizebytes, payload_type);
+ if (ret) {
+ pr_err("open request add failed, ret %d, payload type %d, sizebytes %zx\n",
+ ret, payload_type, sizebytes);
break;
+ }
wake_up_interruptible(&dev->openq);
break;
exp_desc->domid_local = pchan->dom_id;
hab_export_enqueue(vchan, exp_desc);
- hab_send_export_ack(pchan, exp_desc);
+ hab_send_export_ack(vchan, pchan, exp_desc);
break;
case HAB_PAYLOAD_TYPE_EXPORT_ACK:
- ret = hab_receive_create_export_ack(pchan, vchan->ctx);
- if (ret)
+ ret = hab_receive_create_export_ack(pchan, vchan->ctx,
+ sizebytes);
+ if (ret) {
+ pr_err("failed to handled export ack %d\n", ret);
break;
-
+ }
wake_up_interruptible(&vchan->ctx->exp_wq);
break;
}
int hab_open_request_add(struct physical_channel *pchan,
- struct hab_header *header)
+ size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_device *dev = pchan->habdev;
if (!node)
return -ENOMEM;
- if (physical_channel_read(pchan, &data, HAB_HEADER_GET_SIZE(*header)) !=
- HAB_HEADER_GET_SIZE(*header))
+ if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
return -EIO;
request = &node->request;
- request->type = HAB_HEADER_GET_TYPE(*header);
+ request->type = request_type;
request->pchan = pchan;
request->vchan_id = data.vchan_id;
request->sub_id = data.sub_id;
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
- struct export_desc *exp;
+ struct export_desc *exp, *exp_tmp;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
struct virtual_channel *vc, *vc_tmp;
+ spin_lock_bh(&vchan->rx_lock);
list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
list_del(&message->node);
hab_msg_free(message);
}
+ spin_unlock_bh(&vchan->rx_lock);
do {
found = 0;
write_lock(&ctx->exp_lock);
- list_for_each_entry(exp, &ctx->exp_whse, node) {
+ list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
if (exp->vcid_local == vchan->id) {
list_del(&exp->node);
found = 1;
do {
found = 0;
spin_lock_bh(&ctx->imp_lock);
- list_for_each_entry(exp, &ctx->imp_whse, node) {
+ list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if (exp->vcid_remote == vchan->id) {
list_del(&exp->node);
found = 1;
}
struct virtual_channel*
-hab_vchan_get(struct physical_channel *pchan, uint32_t vchan_id)
+hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
+ uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
+ uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
spin_lock_bh(&pchan->vid_lock);
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
if (vchan)
- if (!kref_get_unless_zero(&vchan->refcount))
+ if ((vchan->session_id != session_id) ||
+ (!kref_get_unless_zero(&vchan->refcount)))
vchan = NULL;
spin_unlock_bh(&pchan->vid_lock);
return 0;
}
+#define HAB_HEAD_SIGNATURE 0xBEE1BEE1
+
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
return -EAGAIN; /* not enough free space */
}
+ header->signature = HAB_HEAD_SIGNATURE;
+
if (hab_pipe_write(dev->pipe_ep,
(unsigned char *)header,
sizeof(*header)) != sizeof(*header)) {
sizeof(header)) != sizeof(header))
break; /* no data available */
+ if (header.signature != HAB_HEAD_SIGNATURE) {
+ pr_err("HAB signature mismatch, expect %X, received %X, id_type_size %X, session %X, sequence %X\n",
+ HAB_HEAD_SIGNATURE, header.signature,
+ header.id_type_size,
+ header.session_id,
+ header.sequence);
+ }
+
hab_msg_recv(pchan, &header);
}
spin_unlock_bh(&pchan->rxbuf_lock);