OSDN Git Service

soc: qcom: hab: add dts parsing into hab driver
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / soc / qcom / hab / hab.c
1 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  */
13 #include "hab.h"
14
15 #define HAB_DEVICE_CNSTR(__name__, __id__, __num__) { \
16         .name = __name__,\
17         .id = __id__,\
18         .pchannels = LIST_HEAD_INIT(hab_devices[__num__].pchannels),\
19         .pchan_lock = __MUTEX_INITIALIZER(hab_devices[__num__].pchan_lock),\
20         .openq_list = LIST_HEAD_INIT(hab_devices[__num__].openq_list),\
21         .openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
22         }
23
24 /*
25  * The following has to match habmm definitions, order does not matter if
26  * hab config does not care either. When hab config is not present, the default
27  * is as guest VM all pchans are pchan opener (FE)
28  */
29 static struct hab_device hab_devices[] = {
30         HAB_DEVICE_CNSTR(DEVICE_AUD1_NAME, MM_AUD_1, 0),
31         HAB_DEVICE_CNSTR(DEVICE_AUD2_NAME, MM_AUD_2, 1),
32         HAB_DEVICE_CNSTR(DEVICE_AUD3_NAME, MM_AUD_3, 2),
33         HAB_DEVICE_CNSTR(DEVICE_AUD4_NAME, MM_AUD_4, 3),
34         HAB_DEVICE_CNSTR(DEVICE_CAM1_NAME, MM_CAM_1, 4),
35         HAB_DEVICE_CNSTR(DEVICE_CAM2_NAME, MM_CAM_2, 5),
36         HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 6),
37         HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 7),
38         HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 8),
39         HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 9),
40         HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 10),
41         HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 11),
42         HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 12),
43         HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 13),
44         HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 14),
45         HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 15),
46         HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 16),
47         HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 17),
48         HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 18),
49         HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 19),
50 };
51
52 struct hab_driver hab_driver = {
53         .ndevices = ARRAY_SIZE(hab_devices),
54         .devp = hab_devices,
55 };
56
57 struct uhab_context *hab_ctx_alloc(int kernel)
58 {
59         struct uhab_context *ctx;
60
61         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
62         if (!ctx)
63                 return NULL;
64
65         ctx->closing = 0;
66         INIT_LIST_HEAD(&ctx->vchannels);
67         INIT_LIST_HEAD(&ctx->exp_whse);
68         INIT_LIST_HEAD(&ctx->imp_whse);
69
70         INIT_LIST_HEAD(&ctx->exp_rxq);
71         init_waitqueue_head(&ctx->exp_wq);
72         spin_lock_init(&ctx->expq_lock);
73
74         spin_lock_init(&ctx->imp_lock);
75         rwlock_init(&ctx->exp_lock);
76         rwlock_init(&ctx->ctx_lock);
77
78         kref_init(&ctx->refcount);
79         ctx->import_ctx = habmem_imp_hyp_open();
80         if (!ctx->import_ctx) {
81                 pr_err("habmem_imp_hyp_open failed\n");
82                 kfree(ctx);
83                 return NULL;
84         }
85         ctx->kernel = kernel;
86
87         return ctx;
88 }
89
90 void hab_ctx_free(struct kref *ref)
91 {
92         struct uhab_context *ctx =
93                 container_of(ref, struct uhab_context, refcount);
94         struct hab_export_ack_recvd *ack_recvd, *tmp;
95
96         habmem_imp_hyp_close(ctx->import_ctx, ctx->kernel);
97
98         list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
99                 list_del(&ack_recvd->node);
100                 kfree(ack_recvd);
101         }
102
103         kfree(ctx);
104 }
105
106 struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
107                 struct uhab_context *ctx)
108 {
109         struct virtual_channel *vchan;
110
111         read_lock(&ctx->ctx_lock);
112         list_for_each_entry(vchan, &ctx->vchannels, node) {
113                 if (vcid == vchan->id) {
114                         kref_get(&vchan->refcount);
115                         read_unlock(&ctx->ctx_lock);
116                         return vchan;
117                 }
118         }
119         read_unlock(&ctx->ctx_lock);
120         return NULL;
121 }
122
123 static struct hab_device *find_hab_device(unsigned int mm_id)
124 {
125         int i;
126
127         for (i = 0; i < hab_driver.ndevices; i++) {
128                 if (hab_driver.devp[i].id == HAB_MMID_GET_MAJOR(mm_id))
129                         return &hab_driver.devp[i];
130         }
131
132         pr_err("find_hab_device failed: id=%d\n", mm_id);
133         return NULL;
134 }
135 /*
136  *   open handshake in FE and BE
137
138  *   frontend            backend
139  *  send(INIT)          wait(INIT)
140  *  wait(INIT_ACK)      send(INIT_ACK)
141  *  send(ACK)           wait(ACK)
142
143  */
144 struct virtual_channel *frontend_open(struct uhab_context *ctx,
145                 unsigned int mm_id,
146                 int dom_id)
147 {
148         int ret, open_id = 0;
149         struct physical_channel *pchan = NULL;
150         struct hab_device *dev;
151         struct virtual_channel *vchan = NULL;
152         static atomic_t open_id_counter = ATOMIC_INIT(0);
153         struct hab_open_request request;
154         struct hab_open_request *recv_request;
155         int sub_id = HAB_MMID_GET_MINOR(mm_id);
156
157         dev = find_hab_device(mm_id);
158         if (dev == NULL) {
159                 pr_err("HAB device %d is not initialized\n", mm_id);
160                 ret = -EINVAL;
161                 goto err;
162         }
163
164         pchan = hab_pchan_find_domid(dev, dom_id);
165         if (!pchan) {
166                 pr_err("hab_pchan_find_domid failed: dom_id=%d\n", dom_id);
167                 ret = -EINVAL;
168                 goto err;
169         }
170
171         vchan = hab_vchan_alloc(ctx, pchan);
172         if (!vchan) {
173                 pr_err("vchan alloc failed\n");
174                 ret = -ENOMEM;
175                 goto err;
176         }
177
178         /* Send Init sequence */
179         open_id = atomic_inc_return(&open_id_counter);
180         hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT, pchan,
181                 vchan->id, sub_id, open_id);
182         ret = hab_open_request_send(&request);
183         if (ret) {
184                 pr_err("hab_open_request_send failed: %d\n", ret);
185                 goto err;
186         }
187
188         /* Wait for Init-Ack sequence */
189         hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, pchan,
190                 0, sub_id, open_id);
191         ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
192         if (ret || !recv_request) {
193                 pr_err("hab_open_listen failed: %d\n", ret);
194                 goto err;
195         }
196
197         vchan->otherend_id = recv_request->vchan_id;
198         hab_open_request_free(recv_request);
199
200         vchan->session_id = open_id;
201         pr_debug("vchan->session_id:%d\n", vchan->session_id);
202
203         /* Send Ack sequence */
204         hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan,
205                 0, sub_id, open_id);
206         ret = hab_open_request_send(&request);
207         if (ret)
208                 goto err;
209
210         hab_pchan_put(pchan);
211
212         return vchan;
213 err:
214         if (vchan)
215                 hab_vchan_put(vchan);
216         if (pchan)
217                 hab_pchan_put(pchan);
218
219         return ERR_PTR(ret);
220 }
221
222 struct virtual_channel *backend_listen(struct uhab_context *ctx,
223                 unsigned int mm_id)
224 {
225         int ret;
226         int open_id;
227         int sub_id = HAB_MMID_GET_MINOR(mm_id);
228         struct physical_channel *pchan = NULL;
229         struct hab_device *dev;
230         struct virtual_channel *vchan = NULL;
231         struct hab_open_request request;
232         struct hab_open_request *recv_request;
233         uint32_t otherend_vchan_id;
234
235         dev = find_hab_device(mm_id);
236         if (dev == NULL) {
237                 pr_err("failed to find dev based on id %d\n", mm_id);
238                 ret = -EINVAL;
239                 goto err;
240         }
241
242         while (1) {
243                 /* Wait for Init sequence */
244                 hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT,
245                         NULL, 0, sub_id, 0);
246                 ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
247                 if (ret || !recv_request) {
248                         pr_err("hab_open_listen failed: %d\n", ret);
249                         goto err;
250                 }
251
252                 otherend_vchan_id = recv_request->vchan_id;
253                 open_id = recv_request->open_id;
254                 pchan = recv_request->pchan;
255                 hab_pchan_get(pchan);
256                 hab_open_request_free(recv_request);
257
258                 vchan = hab_vchan_alloc(ctx, pchan);
259                 if (!vchan) {
260                         ret = -ENOMEM;
261                         goto err;
262                 }
263
264                 vchan->otherend_id = otherend_vchan_id;
265
266                 vchan->session_id = open_id;
267                 pr_debug("vchan->session_id:%d\n", vchan->session_id);
268
269                 /* Send Init-Ack sequence */
270                 hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
271                                 pchan, vchan->id, sub_id, open_id);
272                 ret = hab_open_request_send(&request);
273                 if (ret)
274                         goto err;
275
276                 /* Wait for Ack sequence */
277                 hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK,
278                                 pchan, 0, sub_id, open_id);
279                 ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
280
281                 if (ret != -EAGAIN)
282                         break;
283
284                 hab_vchan_put(vchan);
285                 vchan = NULL;
286                 hab_pchan_put(pchan);
287                 pchan = NULL;
288         }
289
290         if (ret || !recv_request) {
291                 pr_err("backend_listen failed: %d\n", ret);
292                 ret = -EINVAL;
293                 goto err;
294         }
295
296         hab_open_request_free(recv_request);
297         hab_pchan_put(pchan);
298         return vchan;
299 err:
300         pr_err("listen on mmid %d failed\n", mm_id);
301         if (vchan)
302                 hab_vchan_put(vchan);
303         if (pchan)
304                 hab_pchan_put(pchan);
305         return ERR_PTR(ret);
306 }
307
308 long hab_vchan_send(struct uhab_context *ctx,
309                 int vcid,
310                 size_t sizebytes,
311                 void *data,
312                 unsigned int flags)
313 {
314         struct virtual_channel *vchan;
315         int ret;
316         struct hab_header header = HAB_HEADER_INITIALIZER;
317         int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING;
318
319         if (sizebytes > HAB_MAX_MSG_SIZEBYTES) {
320                 pr_err("Message too large, %lu bytes\n", sizebytes);
321                 return -EINVAL;
322         }
323
324         vchan = hab_get_vchan_fromvcid(vcid, ctx);
325         if (!vchan || vchan->otherend_closed) {
326                 ret = -ENODEV;
327                 goto err;
328         }
329
330         HAB_HEADER_SET_SIZE(header, sizebytes);
331         if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT)
332                 HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE);
333         else
334                 HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
335
336         HAB_HEADER_SET_ID(header, vchan->otherend_id);
337         HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
338
339         while (1) {
340                 ret = physical_channel_send(vchan->pchan, &header, data);
341
342                 if (vchan->otherend_closed || nonblocking_flag ||
343                         ret != -EAGAIN)
344                         break;
345
346                 schedule();
347         }
348
349
350 err:
351         if (vchan)
352                 hab_vchan_put(vchan);
353
354         return ret;
355 }
356
357 struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
358                                 int vcid,
359                                 unsigned int flags)
360 {
361         struct virtual_channel *vchan;
362         struct hab_message *message;
363         int ret = 0;
364         int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
365
366         vchan = hab_get_vchan_fromvcid(vcid, ctx);
367         if (!vchan)
368                 return ERR_PTR(-ENODEV);
369
370         if (nonblocking_flag) {
371                 /*
372                  * Try to pull data from the ring in this context instead of
373                  * IRQ handler. Any available messages will be copied and queued
374                  * internally, then fetched by hab_msg_dequeue()
375                  */
376                 physical_channel_rx_dispatch((unsigned long) vchan->pchan);
377         }
378
379         message = hab_msg_dequeue(vchan, !nonblocking_flag);
380         if (!message) {
381                 if (nonblocking_flag)
382                         ret = -EAGAIN;
383                 else if (vchan->otherend_closed)
384                         ret = -ENODEV;
385                 else
386                         ret = -EPIPE;
387         }
388
389         hab_vchan_put(vchan);
390         return ret ? ERR_PTR(ret) : message;
391 }
392
393 bool hab_is_loopback(void)
394 {
395         return hab_driver.b_loopback;
396 }
397
398 int hab_vchan_open(struct uhab_context *ctx,
399                 unsigned int mmid,
400                 int32_t *vcid,
401                 uint32_t flags)
402 {
403         struct virtual_channel *vchan = NULL;
404         struct hab_device *dev;
405
406         pr_debug("Open mmid=%d, loopback mode=%d, loopback num=%d\n",
407                 mmid, hab_driver.b_loopback, hab_driver.loopback_num);
408
409         if (!vcid)
410                 return -EINVAL;
411
412         if (hab_is_loopback()) {
413                 if (!hab_driver.loopback_num) {
414                         hab_driver.loopback_num = 1;
415                         vchan = backend_listen(ctx, mmid);
416                 } else {
417                         hab_driver.loopback_num = 0;
418                         vchan = frontend_open(ctx, mmid, LOOPBACK_DOM);
419                 }
420         } else {
421                 dev = find_hab_device(mmid);
422
423                 if (dev) {
424                         struct physical_channel *pchan =
425                         hab_pchan_find_domid(dev, HABCFG_VMID_DONT_CARE);
426
427                         if (pchan->is_be)
428                                 vchan = backend_listen(ctx, mmid);
429                         else
430                                 vchan = frontend_open(ctx, mmid,
431                                                 HABCFG_VMID_DONT_CARE);
432                 } else {
433                         pr_err("failed to find device, mmid %d\n", mmid);
434                 }
435         }
436
437         if (IS_ERR(vchan)) {
438                 pr_err("vchan open failed over mmid=%d\n", mmid);
439                 return PTR_ERR(vchan);
440         }
441
442         pr_debug("vchan id %x, remote id %x\n",
443                 vchan->id, vchan->otherend_id);
444
445         write_lock(&ctx->ctx_lock);
446         list_add_tail(&vchan->node, &ctx->vchannels);
447         write_unlock(&ctx->ctx_lock);
448
449         *vcid = vchan->id;
450
451         return 0;
452 }
453
454 void hab_send_close_msg(struct virtual_channel *vchan)
455 {
456         struct hab_header header = {0};
457
458         if (vchan && !vchan->otherend_closed) {
459                 HAB_HEADER_SET_SIZE(header, 0);
460                 HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE);
461                 HAB_HEADER_SET_ID(header, vchan->otherend_id);
462                 HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
463                 physical_channel_send(vchan->pchan, &header, NULL);
464         }
465 }
466
467 static void hab_vchan_close_impl(struct kref *ref)
468 {
469         struct virtual_channel *vchan =
470                 container_of(ref, struct virtual_channel, usagecnt);
471
472         list_del(&vchan->node);
473         hab_vchan_stop_notify(vchan);
474         hab_vchan_put(vchan);
475 }
476
477
478 void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
479 {
480         struct virtual_channel *vchan, *tmp;
481
482         if (!ctx)
483                 return;
484
485         write_lock(&ctx->ctx_lock);
486         list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
487                 if (vchan->id == vcid) {
488                         kref_put(&vchan->usagecnt, hab_vchan_close_impl);
489                         break;
490                 }
491         }
492
493         write_unlock(&ctx->ctx_lock);
494 }
495
496 /*
497  * To name the pchan - the pchan has two ends, either FE or BE locally.
498  * if is_be is true, then this is listener for BE. pchane name use remote
499  * FF's vmid from the table.
500  * if is_be is false, then local is FE as opener. pchan name use local FE's
501  * vmid (self)
502  */
503 static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
504                                 int vmid_local, int vmid_remote, int is_be)
505 {
506         char pchan_name[MAX_VMID_NAME_SIZE];
507         struct physical_channel *pchan = NULL;
508         int ret;
509         int vmid = is_be ? vmid_remote : vmid_local;
510
511         if (!mmid_device) {
512                 pr_err("habdev %pK, vmid local %d, remote %d, is be %d\n",
513                                 mmid_device, vmid_local, vmid_remote, is_be);
514                 return -EINVAL;
515         }
516
517         snprintf(pchan_name, MAX_VMID_NAME_SIZE, "vm%d-", vmid);
518         strlcat(pchan_name, mmid_device->name, MAX_VMID_NAME_SIZE);
519
520         ret = habhyp_commdev_alloc((void **)&pchan, is_be, pchan_name,
521                                         vmid_remote, mmid_device);
522         if (ret == 0) {
523                 pr_debug("pchan %s added, vmid local %d, remote %d, is_be %d, total %d\n",
524                                 pchan_name, vmid_local, vmid_remote, is_be,
525                                 mmid_device->pchan_cnt);
526         } else {
527                 pr_err("failed %d to allocate pchan %s, vmid local %d, remote %d, is_be %d, total %d\n",
528                                 ret, pchan_name, vmid_local, vmid_remote,
529                                 is_be, mmid_device->pchan_cnt);
530         }
531
532         return ret;
533 }
534
535 static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
536 {
537         int k, ret = 0;
538
539         pr_debug("%d as mmid %d in vmid %d\n",
540                         HABCFG_GET_MMID(settings, i, j), j, i);
541
542         switch (HABCFG_GET_MMID(settings, i, j)) {
543         case MM_AUD_START/100:
544                 for (k = MM_AUD_START + 1; k < MM_AUD_END; k++) {
545                         /*
546                          * if this local pchan end is BE, then use
547                          * remote FE's vmid. If local end is FE, then
548                          * use self vmid
549                          */
550                         ret += hab_initialize_pchan_entry(
551                                         find_hab_device(k),
552                                         settings->self,
553                                         HABCFG_GET_VMID(settings, i),
554                                         HABCFG_GET_BE(settings, i, j));
555                 }
556                 break;
557
558         case MM_CAM_START/100:
559                 for (k = MM_CAM_START + 1; k < MM_CAM_END; k++) {
560                         ret += hab_initialize_pchan_entry(
561                                         find_hab_device(k),
562                                         settings->self,
563                                         HABCFG_GET_VMID(settings, i),
564                                         HABCFG_GET_BE(settings, i, j));
565                 }
566                 break;
567
568         case MM_DISP_START/100:
569                 for (k = MM_DISP_START + 1; k < MM_DISP_END; k++) {
570                         ret += hab_initialize_pchan_entry(
571                                         find_hab_device(k),
572                                         settings->self,
573                                         HABCFG_GET_VMID(settings, i),
574                                         HABCFG_GET_BE(settings, i, j));
575                 }
576                 break;
577
578         case MM_GFX_START/100:
579                 for (k = MM_GFX_START + 1; k < MM_GFX_END; k++) {
580                         ret += hab_initialize_pchan_entry(
581                                         find_hab_device(k),
582                                         settings->self,
583                                         HABCFG_GET_VMID(settings, i),
584                                         HABCFG_GET_BE(settings, i, j));
585                 }
586                 break;
587
588         case MM_VID_START/100:
589                 for (k = MM_VID_START + 1; k < MM_VID_END; k++) {
590                         ret += hab_initialize_pchan_entry(
591                                         find_hab_device(k),
592                                         settings->self,
593                                         HABCFG_GET_VMID(settings, i),
594                                         HABCFG_GET_BE(settings, i, j));
595                 }
596                 break;
597
598         case MM_MISC_START/100:
599                 for (k = MM_MISC_START + 1; k < MM_MISC_END; k++) {
600                         ret += hab_initialize_pchan_entry(
601                                         find_hab_device(k),
602                                         settings->self,
603                                         HABCFG_GET_VMID(settings, i),
604                                         HABCFG_GET_BE(settings, i, j));
605                 }
606                 break;
607
608         case MM_QCPE_START/100:
609                 for (k = MM_QCPE_START + 1; k < MM_QCPE_END; k++) {
610                         ret += hab_initialize_pchan_entry(
611                                         find_hab_device(k),
612                                         settings->self,
613                                         HABCFG_GET_VMID(settings, i),
614                                         HABCFG_GET_BE(settings, i, j));
615                 }
616                 break;
617
618         case MM_CLK_START/100:
619                 for (k = MM_CLK_START + 1; k < MM_CLK_END; k++) {
620                         ret += hab_initialize_pchan_entry(
621                                         find_hab_device(k),
622                                         settings->self,
623                                         HABCFG_GET_VMID(settings, i),
624                                         HABCFG_GET_BE(settings, i, j));
625                 }
626                 break;
627
628         default:
629                 pr_err("failed to find mmid %d, i %d, j %d\n",
630                         HABCFG_GET_MMID(settings, i, j), i, j);
631
632                 break;
633         }
634 }
635
636 /*
637  * generate pchan list based on hab settings table.
638  * return status 0: success, otherwise failure
639  */
640 static int hab_generate_pchan_list(struct local_vmid *settings)
641 {
642         int i, j;
643
644         /* scan by valid VMs, then mmid */
645         pr_debug("self vmid is %d\n", settings->self);
646         for (i = 0; i < HABCFG_VMID_MAX; i++) {
647                 if (HABCFG_GET_VMID(settings, i) != HABCFG_VMID_INVALID &&
648                         HABCFG_GET_VMID(settings, i) != settings->self) {
649                         pr_debug("create pchans for vm %d\n", i);
650
651                         for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) {
652                                 if (HABCFG_GET_MMID(settings, i, j)
653                                                 != HABCFG_VMID_INVALID)
654                                         hab_generate_pchan(settings, i, j);
655                         }
656                 }
657         }
658
659         return 0;
660 }
661
662 /*
663  * This function checks hypervisor plug-in readiness, read in hab configs,
664  * and configure pchans
665  */
666 int do_hab_parse(void)
667 {
668         int result;
669         int i;
670         struct hab_device *device;
671         int pchan_total = 0;
672
673         /* first check if hypervisor plug-in is ready */
674         result = hab_hypervisor_register();
675         if (result) {
676                 pr_err("register HYP plug-in failed, ret %d\n", result);
677                 return result;
678         }
679
680         /* Initialize open Q before first pchan starts */
681         for (i = 0; i < hab_driver.ndevices; i++) {
682                 device = &hab_driver.devp[i];
683                 init_waitqueue_head(&device->openq);
684         }
685
686         /* read in hab config and create pchans*/
687         memset(&hab_driver.settings, HABCFG_VMID_INVALID,
688                                 sizeof(hab_driver.settings));
689
690         result = hab_parse(&hab_driver.settings);
691         if (result) {
692                 pr_warn("hab_parse failed and use the default settings\n");
693                 fill_default_gvm_settings(&hab_driver.settings, 2,
694                                         MM_AUD_START, MM_ID_MAX);
695         }
696
697         /* now generate hab pchan list */
698         result  = hab_generate_pchan_list(&hab_driver.settings);
699         if (result) {
700                 pr_err("generate pchan list failed, ret %d\n", result);
701         } else {
702                 for (i = 0; i < hab_driver.ndevices; i++) {
703                         device = &hab_driver.devp[i];
704                         pchan_total += device->pchan_cnt;
705                 }
706                 pr_debug("ret %d, total %d pchans added, ndevices %d\n",
707                                  result, pchan_total, hab_driver.ndevices);
708         }
709
710         return result;
711 }
712
713 static int hab_open(struct inode *inodep, struct file *filep)
714 {
715         int result = 0;
716         struct uhab_context *ctx;
717
718         ctx = hab_ctx_alloc(0);
719
720         if (!ctx) {
721                 pr_err("hab_ctx_alloc failed\n");
722                 filep->private_data = NULL;
723                 return -ENOMEM;
724         }
725
726         filep->private_data = ctx;
727
728         return result;
729 }
730
731 static int hab_release(struct inode *inodep, struct file *filep)
732 {
733         struct uhab_context *ctx = filep->private_data;
734         struct virtual_channel *vchan, *tmp;
735
736         if (!ctx)
737                 return 0;
738
739         pr_debug("inode %pK, filep %pK\n", inodep, filep);
740
741         write_lock(&ctx->ctx_lock);
742
743         list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
744                 list_del(&vchan->node);
745                 hab_vchan_stop_notify(vchan);
746                 hab_vchan_put(vchan);
747         }
748
749         write_unlock(&ctx->ctx_lock);
750
751         hab_ctx_put(ctx);
752         filep->private_data = NULL;
753
754         return 0;
755 }
756
757 static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
758 {
759         struct uhab_context *ctx = (struct uhab_context *)filep->private_data;
760         struct hab_open *open_param;
761         struct hab_close *close_param;
762         struct hab_recv *recv_param;
763         struct hab_send *send_param;
764         struct hab_message *msg;
765         void *send_data;
766         unsigned char data[256] = { 0 };
767         long ret = 0;
768
769         if (_IOC_SIZE(cmd) && (cmd & IOC_IN)) {
770                 if (_IOC_SIZE(cmd) > sizeof(data))
771                         return -EINVAL;
772
773                 if (copy_from_user(data, (void __user *)arg, _IOC_SIZE(cmd))) {
774                         pr_err("copy_from_user failed cmd=%x size=%d\n",
775                                 cmd, _IOC_SIZE(cmd));
776                         return -EFAULT;
777                 }
778         }
779
780         switch (cmd) {
781         case IOCTL_HAB_VC_OPEN:
782                 open_param = (struct hab_open *)data;
783                 ret = hab_vchan_open(ctx, open_param->mmid,
784                         &open_param->vcid, open_param->flags);
785                 break;
786         case IOCTL_HAB_VC_CLOSE:
787                 close_param = (struct hab_close *)data;
788                 hab_vchan_close(ctx, close_param->vcid);
789                 break;
790         case IOCTL_HAB_SEND:
791                 send_param = (struct hab_send *)data;
792                 if (send_param->sizebytes > HAB_MAX_MSG_SIZEBYTES) {
793                         ret = -EINVAL;
794                         break;
795                 }
796
797                 send_data = kzalloc(send_param->sizebytes, GFP_TEMPORARY);
798                 if (!send_data) {
799                         ret = -ENOMEM;
800                         break;
801                 }
802
803                 if (copy_from_user(send_data, (void __user *)send_param->data,
804                                 send_param->sizebytes)) {
805                         ret = -EFAULT;
806                 } else {
807                         ret = hab_vchan_send(ctx, send_param->vcid,
808                                                 send_param->sizebytes,
809                                                 send_data,
810                                                 send_param->flags);
811                 }
812                 kfree(send_data);
813                 break;
814         case IOCTL_HAB_RECV:
815                 recv_param = (struct hab_recv *)data;
816                 if (!recv_param->data) {
817                         ret = -EINVAL;
818                         break;
819                 }
820
821                 msg = hab_vchan_recv(ctx, recv_param->vcid, recv_param->flags);
822
823                 if (IS_ERR(msg)) {
824                         recv_param->sizebytes = 0;
825                         ret = PTR_ERR(msg);
826                         break;
827                 }
828
829                 if (recv_param->sizebytes < msg->sizebytes) {
830                         recv_param->sizebytes = 0;
831                         ret = -EINVAL;
832                 } else if (copy_to_user((void __user *)recv_param->data,
833                                         msg->data,
834                                         msg->sizebytes)) {
835                         pr_err("copy_to_user failed: vc=%x size=%d\n",
836                                 recv_param->vcid, (int)msg->sizebytes);
837                         recv_param->sizebytes = 0;
838                         ret = -EFAULT;
839                 } else {
840                         recv_param->sizebytes = msg->sizebytes;
841                 }
842
843                 hab_msg_free(msg);
844                 break;
845         case IOCTL_HAB_VC_EXPORT:
846                 ret = hab_mem_export(ctx, (struct hab_export *)data, 0);
847                 break;
848         case IOCTL_HAB_VC_IMPORT:
849                 ret = hab_mem_import(ctx, (struct hab_import *)data, 0);
850                 break;
851         case IOCTL_HAB_VC_UNEXPORT:
852                 ret = hab_mem_unexport(ctx, (struct hab_unexport *)data, 0);
853                 break;
854         case IOCTL_HAB_VC_UNIMPORT:
855                 ret = hab_mem_unimport(ctx, (struct hab_unimport *)data, 0);
856                 break;
857         default:
858                 ret = -ENOIOCTLCMD;
859         }
860
861         if (ret == 0 && _IOC_SIZE(cmd) && (cmd & IOC_OUT))
862                 if (copy_to_user((void __user *) arg, data, _IOC_SIZE(cmd))) {
863                         pr_err("copy_to_user failed: cmd=%x\n", cmd);
864                         ret = -EFAULT;
865                 }
866
867         return ret;
868 }
869
870 static const struct file_operations hab_fops = {
871         .owner = THIS_MODULE,
872         .open = hab_open,
873         .release = hab_release,
874         .mmap = habmem_imp_hyp_mmap,
875         .unlocked_ioctl = hab_ioctl
876 };
877
878 /*
879  * These map sg functions are pass through because the memory backing the
880  * sg list is already accessible to the kernel as they come from a the
881  * dedicated shared vm pool
882  */
883
884 static int hab_map_sg(struct device *dev, struct scatterlist *sgl,
885         int nelems, enum dma_data_direction dir,
886         struct dma_attrs *attrs)
887 {
888         /* return nelems directly */
889         return nelems;
890 }
891
892 static void hab_unmap_sg(struct device *dev,
893         struct scatterlist *sgl, int nelems,
894         enum dma_data_direction dir,
895         struct dma_attrs *attrs)
896 {
897         /*Do nothing */
898 }
899
900 static const struct dma_map_ops hab_dma_ops = {
901         .map_sg         = hab_map_sg,
902         .unmap_sg       = hab_unmap_sg,
903 };
904
905 static int __init hab_init(void)
906 {
907         int result;
908         dev_t dev;
909
910         result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
911
912         if (result < 0) {
913                 pr_err("alloc_chrdev_region failed: %d\n", result);
914                 return result;
915         }
916
917         cdev_init(&hab_driver.cdev, &hab_fops);
918         hab_driver.cdev.owner = THIS_MODULE;
919         hab_driver.cdev.ops = &hab_fops;
920         dev = MKDEV(MAJOR(hab_driver.major), 0);
921
922         result = cdev_add(&hab_driver.cdev, dev, 1);
923
924         if (result < 0) {
925                 unregister_chrdev_region(dev, 1);
926                 pr_err("cdev_add failed: %d\n", result);
927                 return result;
928         }
929
930         hab_driver.class = class_create(THIS_MODULE, "hab");
931
932         if (IS_ERR(hab_driver.class)) {
933                 result = PTR_ERR(hab_driver.class);
934                 pr_err("class_create failed: %d\n", result);
935                 goto err;
936         }
937
938         hab_driver.dev = device_create(hab_driver.class, NULL,
939                                         dev, &hab_driver, "hab");
940
941         if (IS_ERR(hab_driver.dev)) {
942                 result = PTR_ERR(hab_driver.dev);
943                 pr_err("device_create failed: %d\n", result);
944                 goto err;
945         }
946
947         /* read in hab config, then configure pchans */
948         result = do_hab_parse();
949
950         if (!result) {
951                 hab_driver.kctx = hab_ctx_alloc(1);
952                 if (!hab_driver.kctx) {
953                         pr_err("hab_ctx_alloc failed");
954                         result = -ENOMEM;
955                         hab_hypervisor_unregister();
956                         goto err;
957                 }
958
959                 set_dma_ops(hab_driver.dev, &hab_dma_ops);
960
961                 return result;
962         }
963
964 err:
965         if (!IS_ERR_OR_NULL(hab_driver.dev))
966                 device_destroy(hab_driver.class, dev);
967         if (!IS_ERR_OR_NULL(hab_driver.class))
968                 class_destroy(hab_driver.class);
969         cdev_del(&hab_driver.cdev);
970         unregister_chrdev_region(dev, 1);
971
972         pr_err("Error in hab init, result %d\n", result);
973         return result;
974 }
975
976 static void __exit hab_exit(void)
977 {
978         dev_t dev;
979
980         hab_hypervisor_unregister();
981         hab_ctx_put(hab_driver.kctx);
982         dev = MKDEV(MAJOR(hab_driver.major), 0);
983         device_destroy(hab_driver.class, dev);
984         class_destroy(hab_driver.class);
985         cdev_del(&hab_driver.cdev);
986         unregister_chrdev_region(dev, 1);
987 }
988
989 subsys_initcall(hab_init);
990 module_exit(hab_exit);
991
992 MODULE_DESCRIPTION("Hypervisor abstraction layer");
993 MODULE_LICENSE("GPL v2");