OSDN Git Service

soc: hab: change lifecycle of exp_id from vchan to ctx
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / soc / qcom / hab / hab_mem_linux.c
1 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  */
13 #include "hab.h"
14 #include <linux/fdtable.h>
15 #include <linux/dma-buf.h>
16 #include "hab_grantable.h"
17
18
19 struct pages_list {
20         struct list_head list;
21         struct page **pages;
22         long npages;
23         uint64_t index; /* for mmap first call */
24         int kernel;
25         void *kva;
26         void *uva;
27         int refcntk;
28         int refcntu;
29         uint32_t userflags;
30         struct file *filp_owner;
31         struct file *filp_mapper;
32         struct dma_buf *dmabuf;
33         int32_t export_id;
34         int32_t vcid;
35         struct physical_channel *pchan;
36 };
37
38 struct importer_context {
39         int cnt; /* pages allocated for local file */
40         struct list_head imp_list;
41         struct file *filp;
42         rwlock_t implist_lock;
43 };
44
45 void *habmm_hyp_allocate_grantable(int page_count,
46                 uint32_t *sizebytes)
47 {
48         if (!sizebytes || !page_count)
49                 return NULL;
50
51         *sizebytes = page_count * sizeof(struct grantable);
52         return vmalloc(*sizebytes);
53 }
54
55 static int match_file(const void *p, struct file *file, unsigned int fd)
56 {
57         /*
58          * We must return fd + 1 because iterate_fd stops searching on
59          * non-zero return, but 0 is a valid fd.
60          */
61         return (p == file) ? (fd + 1) : 0;
62 }
63
64
65 static int habmem_get_dma_pages_from_va(unsigned long address,
66                 int page_count,
67                 struct page **pages)
68 {
69         struct vm_area_struct *vma;
70         struct dma_buf *dmabuf = NULL;
71         unsigned long offset;
72         unsigned long page_offset;
73         struct scatterlist *s;
74         struct sg_table *sg_table = NULL;
75         struct dma_buf_attachment *attach = NULL;
76         struct page *page;
77         int i, j, rc = 0;
78         int fd;
79
80         vma = find_vma(current->mm, address);
81         if (!vma || !vma->vm_file) {
82                 pr_err("cannot find vma\n");
83                 rc = -EBADF;
84                 goto err;
85         }
86
87         /* Look for the fd that matches this the vma file */
88         fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
89         if (fd == 0) {
90                 pr_err("iterate_fd failed\n");
91                 rc = -EBADF;
92                 goto err;
93         }
94
95         offset = address - vma->vm_start;
96         page_offset = offset/PAGE_SIZE;
97
98         dmabuf = dma_buf_get(fd - 1);
99         if (IS_ERR_OR_NULL(dmabuf)) {
100                 pr_err("dma_buf_get failed fd %d ret %pK\n", fd, dmabuf);
101                 rc = -EBADF;
102                 goto err;
103         }
104
105         attach = dma_buf_attach(dmabuf, hab_driver.dev);
106         if (IS_ERR_OR_NULL(attach)) {
107                 pr_err("dma_buf_attach failed\n");
108                 rc = -EBADF;
109                 goto err;
110         }
111
112         sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
113
114         if (IS_ERR_OR_NULL(sg_table)) {
115                 pr_err("dma_buf_map_attachment failed\n");
116                 rc = -EBADF;
117                 goto err;
118         }
119
120         for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
121                 page = sg_page(s);
122
123                 for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) {
124                         pages[rc] = nth_page(page, j);
125                         rc++;
126                         if (rc >= page_count)
127                                 break;
128                 }
129                 if (rc >= page_count)
130                         break;
131
132                 if (page_offset > (s->length >> PAGE_SHIFT)) {
133                         /* carry-over the remaining offset to next s list */
134                         page_offset = page_offset-(s->length >> PAGE_SHIFT);
135                 } else {
136                         /*
137                          * the page_offset is within this s list
138                          * there is no more offset for the next s list
139                          */
140                         page_offset = 0;
141                 }
142
143         }
144
145 err:
146         if (!IS_ERR_OR_NULL(sg_table))
147                 dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
148         if (!IS_ERR_OR_NULL(attach))
149                 dma_buf_detach(dmabuf, attach);
150         if (!IS_ERR_OR_NULL(dmabuf))
151                 dma_buf_put(dmabuf);
152         return rc;
153 }
154
155 static int habmem_get_dma_pages_from_fd(int32_t fd,
156                 int page_count,
157                 struct page **pages)
158 {
159         struct dma_buf *dmabuf = NULL;
160         struct scatterlist *s;
161         struct sg_table *sg_table = NULL;
162         struct dma_buf_attachment *attach = NULL;
163         struct page *page;
164         int i, j, rc = 0;
165
166         dmabuf = dma_buf_get(fd);
167         if (IS_ERR_OR_NULL(dmabuf)) {
168                 pr_err("dma_buf_get failed fd %d ret %pK\n", fd, dmabuf);
169                 rc = -EBADF;
170                 goto err;
171         }
172
173         attach = dma_buf_attach(dmabuf, hab_driver.dev);
174         if (IS_ERR_OR_NULL(attach)) {
175                 pr_err("dma_buf_attach failed\n");
176                 rc = -EBADF;
177                 goto err;
178         }
179
180         sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
181
182         if (IS_ERR_OR_NULL(sg_table)) {
183                 pr_err("dma_buf_map_attachment failed\n");
184                 rc = -EBADF;
185                 goto err;
186         }
187
188         for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
189                 page = sg_page(s);
190                 pr_debug("sgl length %d\n", s->length);
191
192                 for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
193                         pages[rc] = nth_page(page, j);
194                         rc++;
195                         if (WARN_ON(rc >= page_count))
196                                 break;
197                 }
198         }
199
200 err:
201         if (!IS_ERR_OR_NULL(sg_table))
202                 dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
203         if (!IS_ERR_OR_NULL(attach))
204                 dma_buf_detach(dmabuf, attach);
205         if (!IS_ERR_OR_NULL(dmabuf))
206                 dma_buf_put(dmabuf);
207         return rc;
208 }
209
210 /*
211  * exporter - grant & revoke
212  * degenerate sharabled page list based on CPU friendly virtual "address".
213  * The result as an array is stored in ppdata to return to caller
214  * page size 4KB is assumed
215  */
216 int habmem_hyp_grant_user(unsigned long address,
217                 int page_count,
218                 int flags,
219                 int remotedom,
220                 void *ppdata,
221                 int *compressed,
222                 int *compressed_size)
223 {
224         int i, ret = 0;
225         struct grantable *item = (struct grantable *)ppdata;
226         struct page **pages;
227
228         pages = vmalloc(page_count * sizeof(struct page *));
229         if (!pages)
230                 return -ENOMEM;
231
232         down_read(&current->mm->mmap_sem);
233
234         if (HABMM_EXP_MEM_TYPE_DMA & flags) {
235                 ret = habmem_get_dma_pages_from_va(address,
236                         page_count,
237                         pages);
238         } else if (HABMM_EXPIMP_FLAGS_FD & flags) {
239                 ret = habmem_get_dma_pages_from_fd(address,
240                         page_count,
241                         pages);
242         } else {
243                 ret = get_user_pages(current, current->mm,
244                         address,
245                         page_count,
246                         1,
247                         1,
248                         pages,
249                         NULL);
250         }
251
252         if (ret > 0) {
253                 for (i = 0; i < page_count; i++)
254                         item[i].pfn = page_to_pfn(pages[i]);
255         } else {
256                 pr_err("get %d user pages failed %d flags %d\n",
257                         page_count, ret, flags);
258         }
259
260         vfree(pages);
261         up_read(&current->mm->mmap_sem);
262         return ret;
263 }
264 /*
265  * exporter - grant & revoke
266  * generate shareable page list based on CPU friendly virtual "address".
267  * The result as an array is stored in ppdata to return to caller
268  * page size 4KB is assumed
269  */
270 int habmem_hyp_grant(unsigned long address,
271                 int page_count,
272                 int flags,
273                 int remotedom,
274                 void *ppdata,
275                 int *compressed,
276                 int *compressed_size)
277 {
278         int i;
279         struct grantable *item;
280         void *kva = (void *)(uintptr_t)address;
281         int is_vmalloc = is_vmalloc_addr(kva);
282
283         item = (struct grantable *)ppdata;
284
285         for (i = 0; i < page_count; i++) {
286                 kva = (void *)(uintptr_t)(address + i*PAGE_SIZE);
287                 if (is_vmalloc)
288                         item[i].pfn = page_to_pfn(vmalloc_to_page(kva));
289                 else
290                         item[i].pfn = page_to_pfn(virt_to_page(kva));
291         }
292
293         return 0;
294 }
295
296 int habmem_hyp_revoke(void *expdata, uint32_t count)
297 {
298         return 0;
299 }
300
301 void *habmem_imp_hyp_open(void)
302 {
303         struct importer_context *priv;
304
305         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
306         if (!priv)
307                 return NULL;
308
309         rwlock_init(&priv->implist_lock);
310         INIT_LIST_HEAD(&priv->imp_list);
311
312         return priv;
313 }
314
315 void habmem_imp_hyp_close(void *imp_ctx, int kernel)
316 {
317         struct importer_context *priv = imp_ctx;
318         struct pages_list *pglist, *pglist_tmp;
319
320         if (!priv)
321                 return;
322
323         list_for_each_entry_safe(pglist, pglist_tmp, &priv->imp_list, list) {
324                 if (kernel && pglist->kva)
325                         vunmap(pglist->kva);
326
327                 list_del(&pglist->list);
328                 priv->cnt--;
329
330                 kfree(pglist->pages);
331                 kfree(pglist);
332         }
333
334         kfree(priv);
335 }
336
337 static struct sg_table *hab_mem_map_dma_buf(
338         struct dma_buf_attachment *attachment,
339         enum dma_data_direction direction)
340 {
341         struct dma_buf *dmabuf = attachment->dmabuf;
342         struct pages_list *pglist = dmabuf->priv;
343         struct sg_table *sgt;
344         struct scatterlist *sg;
345         int i;
346         int ret = 0;
347         struct page **pages = pglist->pages;
348
349         sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
350         if (!sgt)
351                 return ERR_PTR(-ENOMEM);
352
353         ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL);
354         if (ret) {
355                 kfree(sgt);
356                 return ERR_PTR(-ENOMEM);
357         }
358
359         for_each_sg(sgt->sgl, sg, pglist->npages, i) {
360                 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
361         }
362
363         return sgt;
364 }
365
366
367 static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
368         struct sg_table *sgt,
369         enum dma_data_direction direction)
370 {
371         sg_free_table(sgt);
372         kfree(sgt);
373 }
374
375 static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
376 {
377         struct page *page;
378         struct pages_list *pglist;
379         unsigned long offset, fault_offset, fault_index;
380         int page_idx;
381
382         if (vma == NULL)
383                 return VM_FAULT_SIGBUS;
384
385         offset = vma->vm_pgoff << PAGE_SHIFT;
386
387         /* PHY address */
388         fault_offset =
389                 (unsigned long)vmf->virtual_address - vma->vm_start + offset;
390         fault_index = fault_offset>>PAGE_SHIFT;
391
392         pglist  = vma->vm_private_data;
393
394         page_idx = fault_index - pglist->index;
395         if (page_idx < 0 || page_idx >= pglist->npages) {
396                 pr_err("Out of page array! page_idx %d, pg cnt %ld",
397                         page_idx, pglist->npages);
398                 return VM_FAULT_SIGBUS;
399         }
400
401         page = pglist->pages[page_idx];
402         get_page(page);
403         vmf->page = page;
404         return 0;
405 }
406
407 static void hab_map_open(struct vm_area_struct *vma)
408 {
409 }
410
411 static void hab_map_close(struct vm_area_struct *vma)
412 {
413 }
414
415 static const struct vm_operations_struct habmem_vm_ops = {
416         .fault = hab_map_fault,
417         .open = hab_map_open,
418         .close = hab_map_close,
419 };
420
421 static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
422 {
423         struct pages_list *pglist = dmabuf->priv;
424         uint32_t obj_size = pglist->npages << PAGE_SHIFT;
425
426         if (vma == NULL)
427                 return VM_FAULT_SIGBUS;
428
429         /* Check for valid size. */
430         if (obj_size < vma->vm_end - vma->vm_start)
431                 return -EINVAL;
432
433         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
434         vma->vm_ops = &habmem_vm_ops;
435         vma->vm_private_data = pglist;
436         vma->vm_flags |= VM_MIXEDMAP;
437
438         return 0;
439 }
440
441 static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
442 {
443 }
444
445 static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
446                 unsigned long offset)
447 {
448         return NULL;
449 }
450
451 static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf,
452                 unsigned long offset,
453                 void *ptr)
454 {
455 }
456
457 static struct dma_buf_ops dma_buf_ops = {
458         .map_dma_buf = hab_mem_map_dma_buf,
459         .unmap_dma_buf = hab_mem_unmap_dma_buf,
460         .mmap = hab_mem_mmap,
461         .release = hab_mem_dma_buf_release,
462         .kmap_atomic = hab_mem_dma_buf_kmap,
463         .kunmap_atomic = hab_mem_dma_buf_kunmap,
464         .kmap = hab_mem_dma_buf_kmap,
465         .kunmap = hab_mem_dma_buf_kunmap,
466 };
467
468 static int habmem_imp_hyp_map_fd(void *imp_ctx,
469         struct export_desc *exp,
470         uint32_t userflags,
471         int32_t *pfd)
472 {
473         struct page **pages;
474         struct compressed_pfns *pfn_table =
475                         (struct compressed_pfns *)exp->payload;
476         struct pages_list *pglist;
477         struct importer_context *priv = imp_ctx;
478         unsigned long pfn;
479         int i, j, k = 0;
480         pgprot_t prot = PAGE_KERNEL;
481         int32_t fd, size;
482         int ret;
483         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
484
485         if (!pfn_table || !priv)
486                 return -EINVAL;
487         size = exp->payload_count * sizeof(struct page *);
488         pages = kmalloc(size, GFP_KERNEL);
489         if (!pages)
490                 return -ENOMEM;
491
492         pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
493         if (!pglist) {
494                 kfree(pages);
495                 return -ENOMEM;
496         }
497
498         pfn = pfn_table->first_pfn;
499         for (i = 0; i < pfn_table->nregions; i++) {
500                 for (j = 0; j < pfn_table->region[i].size; j++) {
501                         pages[k] = pfn_to_page(pfn+j);
502                         k++;
503                 }
504                 pfn += pfn_table->region[i].size + pfn_table->region[i].space;
505         }
506
507         pglist->pages = pages;
508         pglist->npages = exp->payload_count;
509         pglist->kernel = 0;
510         pglist->index = 0;
511         pglist->refcntk = pglist->refcntu = 0;
512         pglist->userflags = userflags;
513         pglist->export_id = exp->export_id;
514         pglist->vcid = exp->vcid_remote;
515         pglist->pchan = exp->pchan;
516
517         if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
518                 prot = pgprot_writecombine(prot);
519
520         exp_info.ops = &dma_buf_ops;
521         exp_info.size = exp->payload_count << PAGE_SHIFT;
522         exp_info.flags = O_RDWR;
523         exp_info.priv = pglist;
524         pglist->dmabuf = dma_buf_export(&exp_info);
525         if (IS_ERR(pglist->dmabuf)) {
526                 ret = PTR_ERR(pglist->dmabuf);
527                 kfree(pages);
528                 kfree(pglist);
529                 return ret;
530         }
531
532         fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
533         if (fd < 0) {
534                 dma_buf_put(pglist->dmabuf);
535                 kfree(pages);
536                 kfree(pglist);
537                 return -EINVAL;
538         }
539
540         pglist->refcntk++;
541
542         write_lock(&priv->implist_lock);
543         list_add_tail(&pglist->list,  &priv->imp_list);
544         priv->cnt++;
545         write_unlock(&priv->implist_lock);
546
547         *pfd = fd;
548
549         return 0;
550 }
551
552 static int habmem_imp_hyp_map_kva(void *imp_ctx,
553         struct export_desc *exp,
554         uint32_t userflags,
555         void **pkva)
556 {
557         struct page **pages;
558         struct compressed_pfns *pfn_table =
559                 (struct compressed_pfns *)exp->payload;
560         struct pages_list *pglist;
561         struct importer_context *priv = imp_ctx;
562         unsigned long pfn;
563         int i, j, k = 0, size;
564         pgprot_t prot = PAGE_KERNEL;
565
566         if (!pfn_table || !priv)
567                 return -EINVAL;
568         size = exp->payload_count * sizeof(struct page *);
569         pages = kmalloc(size, GFP_KERNEL);
570         if (!pages)
571                 return -ENOMEM;
572         pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
573         if (!pglist) {
574                 kfree(pages);
575                 return -ENOMEM;
576         }
577
578         pfn = pfn_table->first_pfn;
579         for (i = 0; i < pfn_table->nregions; i++) {
580                 for (j = 0; j < pfn_table->region[i].size; j++) {
581                         pages[k] = pfn_to_page(pfn+j);
582                         k++;
583                 }
584                 pfn += pfn_table->region[i].size + pfn_table->region[i].space;
585         }
586
587         pglist->pages = pages;
588         pglist->npages = exp->payload_count;
589         pglist->kernel = 1;
590         pglist->refcntk = pglist->refcntu = 0;
591         pglist->userflags = userflags;
592         pglist->export_id = exp->export_id;
593         pglist->vcid = exp->vcid_remote;
594         pglist->pchan = exp->pchan;
595
596         if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
597                 prot = pgprot_writecombine(prot);
598
599         pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
600         if (pglist->kva == NULL) {
601                 kfree(pages);
602                 pr_err("%ld pages vmap failed\n", pglist->npages);
603                 kfree(pglist);
604                 return -ENOMEM;
605         }
606
607         pr_debug("%ld pages vmap pass, return %p\n",
608                         pglist->npages, pglist->kva);
609
610         pglist->refcntk++;
611
612         write_lock(&priv->implist_lock);
613         list_add_tail(&pglist->list,  &priv->imp_list);
614         priv->cnt++;
615         write_unlock(&priv->implist_lock);
616
617         *pkva = pglist->kva;
618
619         return 0;
620 }
621
622 static int habmem_imp_hyp_map_uva(void *imp_ctx,
623         struct export_desc *exp,
624         uint32_t userflags,
625         uint64_t *index)
626 {
627         struct page **pages;
628         struct compressed_pfns *pfn_table =
629                 (struct compressed_pfns *)exp->payload;
630         struct pages_list *pglist;
631         struct importer_context *priv = imp_ctx;
632         unsigned long pfn;
633         int i, j, k = 0, size;
634
635         if (!pfn_table || !priv)
636                 return -EINVAL;
637         size = exp->payload_count * sizeof(struct page *);
638         pages = kmalloc(size, GFP_KERNEL);
639         if (!pages)
640                 return -ENOMEM;
641
642         pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
643         if (!pglist) {
644                 kfree(pages);
645                 return -ENOMEM;
646         }
647
648         pfn = pfn_table->first_pfn;
649         for (i = 0; i < pfn_table->nregions; i++) {
650                 for (j = 0; j < pfn_table->region[i].size; j++) {
651                         pages[k] = pfn_to_page(pfn+j);
652                         k++;
653                 }
654                 pfn += pfn_table->region[i].size + pfn_table->region[i].space;
655         }
656
657         pglist->pages = pages;
658         pglist->npages = exp->payload_count;
659         pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
660         pglist->refcntk = pglist->refcntu = 0;
661         pglist->userflags = userflags;
662         pglist->export_id = exp->export_id;
663         pglist->vcid = exp->vcid_remote;
664         pglist->pchan = exp->pchan;
665
666         write_lock(&priv->implist_lock);
667         list_add_tail(&pglist->list,  &priv->imp_list);
668         priv->cnt++;
669         write_unlock(&priv->implist_lock);
670
671         *index = pglist->index << PAGE_SHIFT;
672
673         return 0;
674 }
675
676 int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
677                 struct export_desc *exp, int kernel)
678 {
679         int ret = 0;
680
681         if (kernel)
682                 ret = habmem_imp_hyp_map_kva(imp_ctx, exp,
683                                         param->flags,
684                                         (void **)&param->kva);
685         else if (param->flags & HABMM_EXPIMP_FLAGS_FD)
686                 ret = habmem_imp_hyp_map_fd(imp_ctx, exp,
687                                         param->flags,
688                                         (int32_t *)&param->kva);
689         else
690                 ret = habmem_imp_hyp_map_uva(imp_ctx, exp,
691                                         param->flags,
692                                         &param->index);
693
694         return ret;
695 }
696
697 int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel)
698 {
699         struct importer_context *priv = imp_ctx;
700         struct pages_list *pglist, *tmp;
701         int found = 0;
702
703         write_lock(&priv->implist_lock);
704         list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
705                 if (pglist->export_id == exp->export_id &&
706                         pglist->pchan == exp->pchan) {
707                         found = 1;
708                         list_del(&pglist->list);
709                         priv->cnt--;
710                         break;
711                 }
712         }
713         write_unlock(&priv->implist_lock);
714
715         if (!found) {
716                 pr_err("failed to find export id %u\n", exp->export_id);
717                 return -EINVAL;
718         }
719
720         pr_debug("detach pglist %p, kernel %d, list cnt %d\n",
721                 pglist, pglist->kernel, priv->cnt);
722
723         if (pglist->kva)
724                 vunmap(pglist->kva);
725
726         if (pglist->dmabuf)
727                 dma_buf_put(pglist->dmabuf);
728
729         kfree(pglist->pages);
730         kfree(pglist);
731
732         return 0;
733 }
734
735 int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
736 {
737         struct uhab_context *ctx = (struct uhab_context *) filp->private_data;
738         struct importer_context *imp_ctx = ctx->import_ctx;
739         long length = vma->vm_end - vma->vm_start;
740         struct pages_list *pglist;
741         int bfound = 0;
742
743         read_lock(&imp_ctx->implist_lock);
744         list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
745                 if (pglist->index == vma->vm_pgoff) {
746                         bfound = 1;
747                         break;
748                 }
749         }
750         read_unlock(&imp_ctx->implist_lock);
751
752         if (!bfound) {
753                 pr_err("Failed to find pglist vm_pgoff: %ld\n", vma->vm_pgoff);
754                 return -EINVAL;
755         }
756
757         if (length > pglist->npages * PAGE_SIZE) {
758                 pr_err("Error vma length %ld not matching page list %ld\n",
759                         length, pglist->npages * PAGE_SIZE);
760                 return -EINVAL;
761         }
762
763         vma->vm_ops = &habmem_vm_ops;
764
765         vma->vm_private_data = pglist;
766
767         if (!(pglist->userflags & HABMM_IMPORT_FLAGS_CACHED))
768                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
769
770         return 0;
771 }
772
773 int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp)
774 {
775         struct importer_context *priv = imp_ctx;
776         struct pages_list *pglist;
777         int found = 0;
778
779         read_lock(&priv->implist_lock);
780         list_for_each_entry(pglist, &priv->imp_list, list) {
781                 if (pglist->export_id == exp->export_id &&
782                         pglist->pchan == exp->pchan) {
783                         found = 1;
784                         break;
785                 }
786         }
787         read_unlock(&priv->implist_lock);
788
789         return found;
790 }