1 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
15 struct virtual_channel *
16 hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan,
20 struct virtual_channel *vchan;
25 vchan = kzalloc(sizeof(*vchan), GFP_KERNEL);
29 /* This should be the first thing we do in this function */
30 idr_preload(GFP_KERNEL);
31 spin_lock_bh(&pchan->vid_lock);
32 id = idr_alloc(&pchan->vchan_idr, vchan, 1,
33 (HAB_VCID_ID_MASK >> HAB_VCID_ID_SHIFT) + 1, GFP_NOWAIT);
34 spin_unlock_bh(&pchan->vid_lock);
38 pr_err("idr failed %d\n", id);
42 mb(); /* id must be generated done before pchan_get */
46 /* vchan need both vcid and openid to be properly located */
47 vchan->session_id = openid;
48 write_lock(&pchan->vchans_lock);
49 list_add_tail(&vchan->pnode, &pchan->vchannels);
51 write_unlock(&pchan->vchans_lock);
52 vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
53 ((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
55 ((pchan->dom_id << HAB_VCID_DOMID_SHIFT) &
57 spin_lock_init(&vchan->rx_lock);
58 INIT_LIST_HEAD(&vchan->rx_list);
59 init_waitqueue_head(&vchan->rx_queue);
61 kref_init(&vchan->refcount);
63 vchan->otherend_closed = pchan->closed;
72 hab_vchan_free(struct kref *ref)
74 struct virtual_channel *vchan =
75 container_of(ref, struct virtual_channel, refcount);
76 struct hab_message *message, *msg_tmp;
77 struct physical_channel *pchan = vchan->pchan;
78 struct uhab_context *ctx = vchan->ctx;
79 struct virtual_channel *vc, *vc_tmp;
81 spin_lock_bh(&vchan->rx_lock);
82 list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
83 list_del(&message->node);
84 hab_msg_free(message);
86 spin_unlock_bh(&vchan->rx_lock);
88 /* the release vchan from ctx was done earlier in vchan close() */
89 hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
91 /* release vchan from pchan. no more msg for this vchan */
92 write_lock_bh(&pchan->vchans_lock);
93 list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
96 /* the ref is held in case of pchan is freed */
101 write_unlock_bh(&pchan->vchans_lock);
103 /* release idr at the last so same idr will not be used early */
104 spin_lock_bh(&pchan->vid_lock);
105 idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
106 spin_unlock_bh(&pchan->vid_lock);
108 hab_pchan_put(pchan); /* no more need for pchan from this vchan */
114 * only for msg recv path to retrieve vchan from vcid and openid based on
117 struct virtual_channel*
118 hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
120 struct virtual_channel *vchan;
121 uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
122 uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
123 size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
124 uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
126 spin_lock_bh(&pchan->vid_lock);
127 vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
129 if (vchan->session_id != session_id)
131 * skipped if session is different even vcid
135 else if (!vchan->otherend_id /*&& !vchan->session_id*/) {
137 * not paired vchan can be fetched right after it is
138 * alloc'ed. so it has to be skipped during search
141 pr_warn("vcid %x is not paired yet session %d refcnt %d type %d sz %zd\n",
142 vchan->id, vchan->otherend_id,
143 get_refcnt(vchan->refcount),
144 payload_type, sizebytes);
146 } else if (vchan->otherend_closed || vchan->closed) {
147 pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
148 vchan->otherend_closed, vchan->closed,
149 vchan->id, vchan->otherend_id,
150 vchan->session_id, get_refcnt(vchan->refcount),
151 vchan_id, session_id, payload_type, sizebytes);
153 } else if (!kref_get_unless_zero(&vchan->refcount)) {
155 * this happens when refcnt is already zero
156 * (put from other thread) or there is an actual error
158 pr_err("failed to inc vcid %pK %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
159 vchan, vchan->id, vchan->otherend_id,
160 vchan->session_id, get_refcnt(vchan->refcount),
161 vchan_id, session_id, payload_type, sizebytes);
165 spin_unlock_bh(&pchan->vid_lock);
170 void hab_vchan_stop(struct virtual_channel *vchan)
173 vchan->otherend_closed = 1;
174 wake_up(&vchan->rx_queue);
176 wake_up_interruptible(&vchan->ctx->exp_wq);
178 pr_err("NULL ctx for vchan %x\n", vchan->id);
182 void hab_vchans_stop(struct physical_channel *pchan)
184 struct virtual_channel *vchan, *tmp;
186 read_lock(&pchan->vchans_lock);
187 list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) {
188 hab_vchan_stop(vchan);
190 read_unlock(&pchan->vchans_lock);
193 void hab_vchan_stop_notify(struct virtual_channel *vchan)
195 hab_send_close_msg(vchan);
196 hab_vchan_stop(vchan);
199 static int hab_vchans_per_pchan_empty(struct physical_channel *pchan)
203 read_lock(&pchan->vchans_lock);
204 empty = list_empty(&pchan->vchannels);
206 struct virtual_channel *vchan;
208 list_for_each_entry(vchan, &pchan->vchannels, pnode) {
209 pr_err("vchan %pK id %x remote id %x session %d ref %d closed %d remote close %d\n",
210 vchan, vchan->id, vchan->otherend_id,
212 get_refcnt(vchan->refcount), vchan->closed,
213 vchan->otherend_closed);
217 read_unlock(&pchan->vchans_lock);
222 static int hab_vchans_empty(int vmid)
225 struct physical_channel *pchan;
226 struct hab_device *hab_dev;
228 for (i = 0; i < hab_driver.ndevices; i++) {
229 hab_dev = &hab_driver.devp[i];
231 spin_lock_bh(&hab_dev->pchan_lock);
232 list_for_each_entry(pchan, &hab_dev->pchannels, node) {
233 if (pchan->vmid_remote == vmid) {
234 if (!hab_vchans_per_pchan_empty(pchan)) {
236 spin_unlock_bh(&hab_dev->pchan_lock);
237 pr_info("vmid %d %s's vchans are not closed\n",
243 spin_unlock_bh(&hab_dev->pchan_lock);
250 * block until all vchans of a given GVM are explicitly closed
251 * with habmm_socket_close() by hab clients themselves
253 void hab_vchans_empty_wait(int vmid)
255 pr_info("waiting for GVM%d's sockets closure\n", vmid);
257 while (!hab_vchans_empty(vmid))
258 usleep_range(10000, 12000);
260 pr_info("all of GVM%d's sockets are closed\n", vmid);
263 int hab_vchan_find_domid(struct virtual_channel *vchan)
265 return vchan ? vchan->pchan->dom_id : -1;
268 /* this sould be only called once after refcnt is zero */
269 static void hab_vchan_schedule_free(struct kref *ref)
271 struct virtual_channel *vchanin =
272 container_of(ref, struct virtual_channel, refcount);
273 struct uhab_context *ctx = vchanin->ctx;
274 struct virtual_channel *vchan, *tmp;
278 * similar logic is in ctx free. if ctx free runs first,
281 write_lock_bh(&ctx->ctx_lock);
282 list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
283 if (vchan == vchanin) {
284 pr_debug("vchan free refcnt = %d\n",
285 get_refcnt(vchan->refcount));
287 list_del(&vchan->node);
292 write_unlock_bh(&ctx->ctx_lock);
295 hab_vchan_stop_notify(vchan);
300 void hab_vchan_put(struct virtual_channel *vchan)
303 kref_put(&vchan->refcount, hab_vchan_schedule_free);
306 int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
307 char *names, size_t name_size, uint32_t flags)
309 struct virtual_channel *vchan = hab_get_vchan_fromvcid(vcid, ctx);
313 if (vchan->otherend_closed) {
314 hab_vchan_put(vchan);
318 *ids = vchan->pchan->vmid_local |
319 ((uint64_t)vchan->pchan->vmid_remote) << 32;
321 names[name_size/2] = 0;
323 hab_vchan_put(vchan);