OSDN Git Service

8d818b8fafb177a09070dd9158ceab1c88fb9ec6
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / soc / qcom / hab / hab_vchan.c
1 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  */
13 #include "hab.h"
14
15 struct virtual_channel *
16 hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan,
17                                 int openid)
18 {
19         int id;
20         struct virtual_channel *vchan;
21
22         if (!pchan || !ctx)
23                 return NULL;
24
25         vchan = kzalloc(sizeof(*vchan), GFP_KERNEL);
26         if (!vchan)
27                 return NULL;
28
29         /* This should be the first thing we do in this function */
30         idr_preload(GFP_KERNEL);
31         spin_lock_bh(&pchan->vid_lock);
32         id = idr_alloc(&pchan->vchan_idr, vchan, 1,
33                 (HAB_VCID_ID_MASK >> HAB_VCID_ID_SHIFT) + 1, GFP_NOWAIT);
34         spin_unlock_bh(&pchan->vid_lock);
35         idr_preload_end();
36
37         if (id <= 0) {
38                 pr_err("idr failed %d\n", id);
39                 kfree(vchan);
40                 return NULL;
41         }
42         mb(); /* id must be generated done before pchan_get */
43
44         hab_pchan_get(pchan);
45         vchan->pchan = pchan;
46         /* vchan need both vcid and openid to be properly located */
47         vchan->session_id = openid;
48         write_lock(&pchan->vchans_lock);
49         list_add_tail(&vchan->pnode, &pchan->vchannels);
50         pchan->vcnt++;
51         write_unlock(&pchan->vchans_lock);
52         vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
53                 ((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
54                         HAB_VCID_MMID_MASK) |
55                 ((pchan->dom_id << HAB_VCID_DOMID_SHIFT) &
56                         HAB_VCID_DOMID_MASK);
57         spin_lock_init(&vchan->rx_lock);
58         INIT_LIST_HEAD(&vchan->rx_list);
59         init_waitqueue_head(&vchan->rx_queue);
60
61         kref_init(&vchan->refcount);
62
63         vchan->otherend_closed = pchan->closed;
64
65         hab_ctx_get(ctx);
66         vchan->ctx = ctx;
67
68         return vchan;
69 }
70
71 static void
72 hab_vchan_free(struct kref *ref)
73 {
74         struct virtual_channel *vchan =
75                 container_of(ref, struct virtual_channel, refcount);
76         struct hab_message *message, *msg_tmp;
77         struct physical_channel *pchan = vchan->pchan;
78         struct uhab_context *ctx = vchan->ctx;
79         struct virtual_channel *vc, *vc_tmp;
80
81         spin_lock_bh(&vchan->rx_lock);
82         list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
83                 list_del(&message->node);
84                 hab_msg_free(message);
85         }
86         spin_unlock_bh(&vchan->rx_lock);
87
88         /* the release vchan from ctx was done earlier in vchan close() */
89         hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
90
91         /* release vchan from pchan. no more msg for this vchan */
92         write_lock_bh(&pchan->vchans_lock);
93         list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
94                 if (vchan == vc) {
95                         list_del(&vc->pnode);
96                         /* the ref is held in case of pchan is freed */
97                         pchan->vcnt--;
98                         break;
99                 }
100         }
101         write_unlock_bh(&pchan->vchans_lock);
102
103         /* release idr at the last so same idr will not be used early */
104         spin_lock_bh(&pchan->vid_lock);
105         idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
106         spin_unlock_bh(&pchan->vid_lock);
107
108         hab_pchan_put(pchan); /* no more need for pchan from this vchan */
109
110         kfree(vchan);
111 }
112
113 /*
114  * only for msg recv path to retrieve vchan from vcid and openid based on
115  * pchan's vchan list
116  */
117 struct virtual_channel*
118 hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
119 {
120         struct virtual_channel *vchan;
121         uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
122         uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
123         size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
124         uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
125
126         spin_lock_bh(&pchan->vid_lock);
127         vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
128         if (vchan) {
129                 if (vchan->session_id != session_id)
130                         /*
131                          * skipped if session is different even vcid
132                          * is the same
133                          */
134                         vchan = NULL;
135                 else if (!vchan->otherend_id /*&& !vchan->session_id*/) {
136                         /*
137                          * not paired vchan can be fetched right after it is
138                          * alloc'ed. so it has to be skipped during search
139                          * for remote msg
140                          */
141                         pr_warn("vcid %x is not paired yet session %d refcnt %d type %d sz %zd\n",
142                                 vchan->id, vchan->otherend_id,
143                                 get_refcnt(vchan->refcount),
144                                 payload_type, sizebytes);
145                         vchan = NULL;
146                 } else if (vchan->otherend_closed || vchan->closed) {
147                         pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
148                                 vchan->otherend_closed, vchan->closed,
149                                 vchan->id, vchan->otherend_id,
150                                 vchan->session_id, get_refcnt(vchan->refcount),
151                                 vchan_id, session_id, payload_type, sizebytes);
152                         vchan = NULL;
153                 } else if (!kref_get_unless_zero(&vchan->refcount)) {
154                         /*
155                          * this happens when refcnt is already zero
156                          * (put from other thread) or there is an actual error
157                          */
158                         pr_err("failed to inc vcid %pK %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
159                                 vchan, vchan->id, vchan->otherend_id,
160                                 vchan->session_id, get_refcnt(vchan->refcount),
161                                 vchan_id, session_id, payload_type, sizebytes);
162                         vchan = NULL;
163                 }
164         }
165         spin_unlock_bh(&pchan->vid_lock);
166
167         return vchan;
168 }
169
170 void hab_vchan_stop(struct virtual_channel *vchan)
171 {
172         if (vchan) {
173                 vchan->otherend_closed = 1;
174                 wake_up(&vchan->rx_queue);
175                 if (vchan->ctx)
176                         wake_up_interruptible(&vchan->ctx->exp_wq);
177                 else
178                         pr_err("NULL ctx for vchan %x\n", vchan->id);
179         }
180 }
181
182 void hab_vchans_stop(struct physical_channel *pchan)
183 {
184         struct virtual_channel *vchan, *tmp;
185
186         read_lock(&pchan->vchans_lock);
187         list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) {
188                 hab_vchan_stop(vchan);
189         }
190         read_unlock(&pchan->vchans_lock);
191 }
192
193 void hab_vchan_stop_notify(struct virtual_channel *vchan)
194 {
195         hab_send_close_msg(vchan);
196         hab_vchan_stop(vchan);
197 }
198
199 static int hab_vchans_per_pchan_empty(struct physical_channel *pchan)
200 {
201         int empty;
202
203         read_lock(&pchan->vchans_lock);
204         empty = list_empty(&pchan->vchannels);
205         if (!empty) {
206                 struct virtual_channel *vchan;
207
208                 list_for_each_entry(vchan, &pchan->vchannels, pnode) {
209                         pr_err("vchan %pK id %x remote id %x session %d ref %d closed %d remote close %d\n",
210                                    vchan, vchan->id, vchan->otherend_id,
211                                    vchan->session_id,
212                                    get_refcnt(vchan->refcount), vchan->closed,
213                                    vchan->otherend_closed);
214                 }
215
216         }
217         read_unlock(&pchan->vchans_lock);
218
219         return empty;
220 }
221
222 static int hab_vchans_empty(int vmid)
223 {
224         int i, empty = 1;
225         struct physical_channel *pchan;
226         struct hab_device *hab_dev;
227
228         for (i = 0; i < hab_driver.ndevices; i++) {
229                 hab_dev = &hab_driver.devp[i];
230
231                 spin_lock_bh(&hab_dev->pchan_lock);
232                 list_for_each_entry(pchan, &hab_dev->pchannels, node) {
233                         if (pchan->vmid_remote == vmid) {
234                                 if (!hab_vchans_per_pchan_empty(pchan)) {
235                                         empty = 0;
236                                         spin_unlock_bh(&hab_dev->pchan_lock);
237                                         pr_info("vmid %d %s's vchans are not closed\n",
238                                                         vmid, pchan->name);
239                                         break;
240                                 }
241                         }
242                 }
243                 spin_unlock_bh(&hab_dev->pchan_lock);
244         }
245
246         return empty;
247 }
248
249 /*
250  * block until all vchans of a given GVM are explicitly closed
251  * with habmm_socket_close() by hab clients themselves
252  */
253 void hab_vchans_empty_wait(int vmid)
254 {
255         pr_info("waiting for GVM%d's sockets closure\n", vmid);
256
257         while (!hab_vchans_empty(vmid))
258                 usleep_range(10000, 12000);
259
260         pr_info("all of GVM%d's sockets are closed\n", vmid);
261 }
262
263 int hab_vchan_find_domid(struct virtual_channel *vchan)
264 {
265         return vchan ? vchan->pchan->dom_id : -1;
266 }
267
268 /* this sould be only called once after refcnt is zero */
269 static void hab_vchan_schedule_free(struct kref *ref)
270 {
271         struct virtual_channel *vchanin =
272                 container_of(ref, struct virtual_channel, refcount);
273         struct uhab_context *ctx = vchanin->ctx;
274         struct virtual_channel *vchan, *tmp;
275         int bnotify = 0;
276
277         /*
278          * similar logic is in ctx free. if ctx free runs first,
279          * this is skipped
280          */
281         write_lock_bh(&ctx->ctx_lock);
282         list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
283                 if (vchan == vchanin) {
284                         pr_debug("vchan free refcnt = %d\n",
285                                          get_refcnt(vchan->refcount));
286                         ctx->vcnt--;
287                         list_del(&vchan->node);
288                         bnotify = 1;
289                         break;
290                 }
291         }
292         write_unlock_bh(&ctx->ctx_lock);
293
294         if (bnotify)
295                 hab_vchan_stop_notify(vchan);
296
297         hab_vchan_free(ref);
298 }
299
300 void hab_vchan_put(struct virtual_channel *vchan)
301 {
302         if (vchan)
303                 kref_put(&vchan->refcount, hab_vchan_schedule_free);
304 }
305
306 int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
307                            char *names, size_t name_size, uint32_t flags)
308 {
309         struct virtual_channel *vchan = hab_get_vchan_fromvcid(vcid, ctx);
310         if (!vchan)
311                 return -EINVAL;
312
313         if (vchan->otherend_closed) {
314                 hab_vchan_put(vchan);
315                 return -ENODEV;
316         }
317
318         *ids = vchan->pchan->vmid_local |
319                 ((uint64_t)vchan->pchan->vmid_remote) << 32;
320         names[0] = 0;
321         names[name_size/2] = 0;
322
323         hab_vchan_put(vchan);
324
325         return 0;
326 }