OSDN Git Service

staging: vc04_services: prevent integer overflow in create_pagelist()
[android-x86/kernel.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_2835_arm.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/errno.h>
7 #include <linux/interrupt.h>
8 #include <linux/pagemap.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/uaccess.h>
13 #include <linux/mm.h>
14 #include <linux/of.h>
15 #include <soc/bcm2835/raspberrypi-firmware.h>
16
17 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
18
19 #include "vchiq_arm.h"
20 #include "vchiq_connected.h"
21 #include "vchiq_pagelist.h"
22
23 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
24
25 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
26 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
27
28 #define BELL0   0x00
29 #define BELL2   0x08
30
31 struct vchiq_2835_state {
32         int inited;
33         struct vchiq_arm_state arm_state;
34 };
35
36 struct vchiq_pagelist_info {
37         struct pagelist *pagelist;
38         size_t pagelist_buffer_size;
39         dma_addr_t dma_addr;
40         enum dma_data_direction dma_dir;
41         unsigned int num_pages;
42         unsigned int pages_need_release;
43         struct page **pages;
44         struct scatterlist *scatterlist;
45         unsigned int scatterlist_mapped;
46 };
47
48 static void __iomem *g_regs;
49 /* This value is the size of the L2 cache lines as understood by the
50  * VPU firmware, which determines the required alignment of the
51  * offsets/sizes in pagelists.
52  *
53  * Modern VPU firmware looks for a DT "cache-line-size" property in
54  * the VCHIQ node and will overwrite it with the actual L2 cache size,
55  * which the kernel must then respect.  That property was rejected
56  * upstream, so we have to use the VPU firmware's compatibility value
57  * of 32.
58  */
59 static unsigned int g_cache_line_size = 32;
60 static unsigned int g_fragments_size;
61 static char *g_fragments_base;
62 static char *g_free_fragments;
63 static struct semaphore g_free_fragments_sema;
64 static struct device *g_dev;
65
66 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
67
68 static irqreturn_t
69 vchiq_doorbell_irq(int irq, void *dev_id);
70
71 static struct vchiq_pagelist_info *
72 create_pagelist(char __user *buf, size_t count, unsigned short type);
73
74 static void
75 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
76               int actual);
77
78 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
79 {
80         struct device *dev = &pdev->dev;
81         struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
82         struct rpi_firmware *fw = drvdata->fw;
83         struct vchiq_slot_zero *vchiq_slot_zero;
84         struct resource *res;
85         void *slot_mem;
86         dma_addr_t slot_phys;
87         u32 channelbase;
88         int slot_mem_size, frag_mem_size;
89         int err, irq, i;
90
91         /*
92          * VCHI messages between the CPU and firmware use
93          * 32-bit bus addresses.
94          */
95         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
96
97         if (err < 0)
98                 return err;
99
100         g_cache_line_size = drvdata->cache_line_size;
101         g_fragments_size = 2 * g_cache_line_size;
102
103         /* Allocate space for the channels in coherent memory */
104         slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
105         frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
106
107         slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
108                                        &slot_phys, GFP_KERNEL);
109         if (!slot_mem) {
110                 dev_err(dev, "could not allocate DMA memory\n");
111                 return -ENOMEM;
112         }
113
114         WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
115
116         vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
117         if (!vchiq_slot_zero)
118                 return -EINVAL;
119
120         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
121                 (int)slot_phys + slot_mem_size;
122         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
123                 MAX_FRAGMENTS;
124
125         g_fragments_base = (char *)slot_mem + slot_mem_size;
126
127         g_free_fragments = g_fragments_base;
128         for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
129                 *(char **)&g_fragments_base[i*g_fragments_size] =
130                         &g_fragments_base[(i + 1)*g_fragments_size];
131         }
132         *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
133         sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
134
135         if (vchiq_init_state(state, vchiq_slot_zero) != VCHIQ_SUCCESS)
136                 return -EINVAL;
137
138         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
139         g_regs = devm_ioremap_resource(&pdev->dev, res);
140         if (IS_ERR(g_regs))
141                 return PTR_ERR(g_regs);
142
143         irq = platform_get_irq(pdev, 0);
144         if (irq <= 0) {
145                 dev_err(dev, "failed to get IRQ\n");
146                 return irq;
147         }
148
149         err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
150                                "VCHIQ doorbell", state);
151         if (err) {
152                 dev_err(dev, "failed to register irq=%d\n", irq);
153                 return err;
154         }
155
156         /* Send the base address of the slots to VideoCore */
157         channelbase = slot_phys;
158         err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
159                                     &channelbase, sizeof(channelbase));
160         if (err || channelbase) {
161                 dev_err(dev, "failed to set channelbase\n");
162                 return err ? : -ENXIO;
163         }
164
165         g_dev = dev;
166         vchiq_log_info(vchiq_arm_log_level,
167                 "vchiq_init - done (slots %pK, phys %pad)",
168                 vchiq_slot_zero, &slot_phys);
169
170         vchiq_call_connected_callbacks();
171
172         return 0;
173 }
174
175 VCHIQ_STATUS_T
176 vchiq_platform_init_state(struct vchiq_state *state)
177 {
178         VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
179         struct vchiq_2835_state *platform_state;
180
181         state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
182         if (!state->platform_state)
183                 return VCHIQ_ERROR;
184
185         platform_state = (struct vchiq_2835_state *)state->platform_state;
186
187         platform_state->inited = 1;
188         status = vchiq_arm_init_state(state, &platform_state->arm_state);
189
190         if (status != VCHIQ_SUCCESS)
191                 platform_state->inited = 0;
192
193         return status;
194 }
195
196 struct vchiq_arm_state*
197 vchiq_platform_get_arm_state(struct vchiq_state *state)
198 {
199         struct vchiq_2835_state *platform_state;
200
201         platform_state   = (struct vchiq_2835_state *)state->platform_state;
202
203         WARN_ON_ONCE(!platform_state->inited);
204
205         return &platform_state->arm_state;
206 }
207
208 void
209 remote_event_signal(struct remote_event *event)
210 {
211         wmb();
212
213         event->fired = 1;
214
215         dsb(sy);         /* data barrier operation */
216
217         if (event->armed)
218                 writel(0, g_regs + BELL2); /* trigger vc interrupt */
219 }
220
221 VCHIQ_STATUS_T
222 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
223                         int dir)
224 {
225         struct vchiq_pagelist_info *pagelistinfo;
226
227         pagelistinfo = create_pagelist((char __user *)offset, size,
228                                        (dir == VCHIQ_BULK_RECEIVE)
229                                        ? PAGELIST_READ
230                                        : PAGELIST_WRITE);
231
232         if (!pagelistinfo)
233                 return VCHIQ_ERROR;
234
235         bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
236
237         /*
238          * Store the pagelistinfo address in remote_data,
239          * which isn't used by the slave.
240          */
241         bulk->remote_data = pagelistinfo;
242
243         return VCHIQ_SUCCESS;
244 }
245
246 void
247 vchiq_complete_bulk(struct vchiq_bulk *bulk)
248 {
249         if (bulk && bulk->remote_data && bulk->actual)
250                 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
251                               bulk->actual);
252 }
253
254 void
255 vchiq_dump_platform_state(void *dump_context)
256 {
257         char buf[80];
258         int len;
259
260         len = snprintf(buf, sizeof(buf),
261                 "  Platform: 2835 (VC master)");
262         vchiq_dump(dump_context, buf, len + 1);
263 }
264
265 VCHIQ_STATUS_T
266 vchiq_platform_suspend(struct vchiq_state *state)
267 {
268         return VCHIQ_ERROR;
269 }
270
271 VCHIQ_STATUS_T
272 vchiq_platform_resume(struct vchiq_state *state)
273 {
274         return VCHIQ_SUCCESS;
275 }
276
277 void
278 vchiq_platform_paused(struct vchiq_state *state)
279 {
280 }
281
282 void
283 vchiq_platform_resumed(struct vchiq_state *state)
284 {
285 }
286
287 int
288 vchiq_platform_videocore_wanted(struct vchiq_state *state)
289 {
290         return 1; // autosuspend not supported - videocore always wanted
291 }
292
293 int
294 vchiq_platform_use_suspend_timer(void)
295 {
296         return 0;
297 }
298 void
299 vchiq_dump_platform_use_state(struct vchiq_state *state)
300 {
301         vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
302 }
303 void
304 vchiq_platform_handle_timeout(struct vchiq_state *state)
305 {
306         (void)state;
307 }
308 /*
309  * Local functions
310  */
311
312 static irqreturn_t
313 vchiq_doorbell_irq(int irq, void *dev_id)
314 {
315         struct vchiq_state *state = dev_id;
316         irqreturn_t ret = IRQ_NONE;
317         unsigned int status;
318
319         /* Read (and clear) the doorbell */
320         status = readl(g_regs + BELL0);
321
322         if (status & 0x4) {  /* Was the doorbell rung? */
323                 remote_event_pollall(state);
324                 ret = IRQ_HANDLED;
325         }
326
327         return ret;
328 }
329
330 static void
331 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
332 {
333         if (pagelistinfo->scatterlist_mapped) {
334                 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
335                              pagelistinfo->num_pages, pagelistinfo->dma_dir);
336         }
337
338         if (pagelistinfo->pages_need_release) {
339                 unsigned int i;
340
341                 for (i = 0; i < pagelistinfo->num_pages; i++)
342                         put_page(pagelistinfo->pages[i]);
343         }
344
345         dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
346                           pagelistinfo->pagelist, pagelistinfo->dma_addr);
347 }
348
349 /* There is a potential problem with partial cache lines (pages?)
350  * at the ends of the block when reading. If the CPU accessed anything in
351  * the same line (page?) then it may have pulled old data into the cache,
352  * obscuring the new data underneath. We can solve this by transferring the
353  * partial cache lines separately, and allowing the ARM to copy into the
354  * cached area.
355  */
356
357 static struct vchiq_pagelist_info *
358 create_pagelist(char __user *buf, size_t count, unsigned short type)
359 {
360         struct pagelist *pagelist;
361         struct vchiq_pagelist_info *pagelistinfo;
362         struct page **pages;
363         u32 *addrs;
364         unsigned int num_pages, offset, i, k;
365         int actual_pages;
366         size_t pagelist_size;
367         struct scatterlist *scatterlist, *sg;
368         int dma_buffers;
369         dma_addr_t dma_addr;
370
371         if (count >= INT_MAX - PAGE_SIZE)
372                 return NULL;
373
374         offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
375         num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
376
377         if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
378                          sizeof(struct vchiq_pagelist_info)) /
379                         (sizeof(u32) + sizeof(pages[0]) +
380                          sizeof(struct scatterlist)))
381                 return NULL;
382
383         pagelist_size = sizeof(struct pagelist) +
384                         (num_pages * sizeof(u32)) +
385                         (num_pages * sizeof(pages[0]) +
386                         (num_pages * sizeof(struct scatterlist))) +
387                         sizeof(struct vchiq_pagelist_info);
388
389         /* Allocate enough storage to hold the page pointers and the page
390          * list
391          */
392         pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
393                                       GFP_KERNEL);
394
395         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
396
397         if (!pagelist)
398                 return NULL;
399
400         addrs           = pagelist->addrs;
401         pages           = (struct page **)(addrs + num_pages);
402         scatterlist     = (struct scatterlist *)(pages + num_pages);
403         pagelistinfo    = (struct vchiq_pagelist_info *)
404                           (scatterlist + num_pages);
405
406         pagelist->length = count;
407         pagelist->type = type;
408         pagelist->offset = offset;
409
410         /* Populate the fields of the pagelistinfo structure */
411         pagelistinfo->pagelist = pagelist;
412         pagelistinfo->pagelist_buffer_size = pagelist_size;
413         pagelistinfo->dma_addr = dma_addr;
414         pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
415                                   DMA_TO_DEVICE : DMA_FROM_DEVICE;
416         pagelistinfo->num_pages = num_pages;
417         pagelistinfo->pages_need_release = 0;
418         pagelistinfo->pages = pages;
419         pagelistinfo->scatterlist = scatterlist;
420         pagelistinfo->scatterlist_mapped = 0;
421
422         if (is_vmalloc_addr(buf)) {
423                 unsigned long length = count;
424                 unsigned int off = offset;
425
426                 for (actual_pages = 0; actual_pages < num_pages;
427                      actual_pages++) {
428                         struct page *pg = vmalloc_to_page(buf + (actual_pages *
429                                                                  PAGE_SIZE));
430                         size_t bytes = PAGE_SIZE - off;
431
432                         if (!pg) {
433                                 cleanup_pagelistinfo(pagelistinfo);
434                                 return NULL;
435                         }
436
437                         if (bytes > length)
438                                 bytes = length;
439                         pages[actual_pages] = pg;
440                         length -= bytes;
441                         off = 0;
442                 }
443                 /* do not try and release vmalloc pages */
444         } else {
445                 actual_pages = get_user_pages_fast(
446                                           (unsigned long)buf & PAGE_MASK,
447                                           num_pages,
448                                           type == PAGELIST_READ,
449                                           pages);
450
451                 if (actual_pages != num_pages) {
452                         vchiq_log_info(vchiq_arm_log_level,
453                                        "%s - only %d/%d pages locked",
454                                        __func__, actual_pages, num_pages);
455
456                         /* This is probably due to the process being killed */
457                         while (actual_pages > 0) {
458                                 actual_pages--;
459                                 put_page(pages[actual_pages]);
460                         }
461                         cleanup_pagelistinfo(pagelistinfo);
462                         return NULL;
463                 }
464                  /* release user pages */
465                 pagelistinfo->pages_need_release = 1;
466         }
467
468         /*
469          * Initialize the scatterlist so that the magic cookie
470          *  is filled if debugging is enabled
471          */
472         sg_init_table(scatterlist, num_pages);
473         /* Now set the pages for each scatterlist */
474         for (i = 0; i < num_pages; i++) {
475                 unsigned int len = PAGE_SIZE - offset;
476
477                 if (len > count)
478                         len = count;
479                 sg_set_page(scatterlist + i, pages[i], len, offset);
480                 offset = 0;
481                 count -= len;
482         }
483
484         dma_buffers = dma_map_sg(g_dev,
485                                  scatterlist,
486                                  num_pages,
487                                  pagelistinfo->dma_dir);
488
489         if (dma_buffers == 0) {
490                 cleanup_pagelistinfo(pagelistinfo);
491                 return NULL;
492         }
493
494         pagelistinfo->scatterlist_mapped = 1;
495
496         /* Combine adjacent blocks for performance */
497         k = 0;
498         for_each_sg(scatterlist, sg, dma_buffers, i) {
499                 u32 len = sg_dma_len(sg);
500                 u32 addr = sg_dma_address(sg);
501
502                 /* Note: addrs is the address + page_count - 1
503                  * The firmware expects blocks after the first to be page-
504                  * aligned and a multiple of the page size
505                  */
506                 WARN_ON(len == 0);
507                 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
508                 WARN_ON(i && (addr & ~PAGE_MASK));
509                 if (k > 0 &&
510                     ((addrs[k - 1] & PAGE_MASK) +
511                      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
512                     == (addr & PAGE_MASK))
513                         addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
514                 else
515                         addrs[k++] = (addr & PAGE_MASK) |
516                                 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
517         }
518
519         /* Partial cache lines (fragments) require special measures */
520         if ((type == PAGELIST_READ) &&
521                 ((pagelist->offset & (g_cache_line_size - 1)) ||
522                 ((pagelist->offset + pagelist->length) &
523                 (g_cache_line_size - 1)))) {
524                 char *fragments;
525
526                 if (down_killable(&g_free_fragments_sema)) {
527                         cleanup_pagelistinfo(pagelistinfo);
528                         return NULL;
529                 }
530
531                 WARN_ON(g_free_fragments == NULL);
532
533                 down(&g_free_fragments_mutex);
534                 fragments = g_free_fragments;
535                 WARN_ON(fragments == NULL);
536                 g_free_fragments = *(char **) g_free_fragments;
537                 up(&g_free_fragments_mutex);
538                 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
539                         (fragments - g_fragments_base) / g_fragments_size;
540         }
541
542         return pagelistinfo;
543 }
544
545 static void
546 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
547               int actual)
548 {
549         struct pagelist *pagelist = pagelistinfo->pagelist;
550         struct page **pages = pagelistinfo->pages;
551         unsigned int num_pages = pagelistinfo->num_pages;
552
553         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
554                         __func__, pagelistinfo->pagelist, actual);
555
556         /*
557          * NOTE: dma_unmap_sg must be called before the
558          * cpu can touch any of the data/pages.
559          */
560         dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
561                      pagelistinfo->num_pages, pagelistinfo->dma_dir);
562         pagelistinfo->scatterlist_mapped = 0;
563
564         /* Deal with any partial cache lines (fragments) */
565         if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
566                 char *fragments = g_fragments_base +
567                         (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
568                         g_fragments_size;
569                 int head_bytes, tail_bytes;
570
571                 head_bytes = (g_cache_line_size - pagelist->offset) &
572                         (g_cache_line_size - 1);
573                 tail_bytes = (pagelist->offset + actual) &
574                         (g_cache_line_size - 1);
575
576                 if ((actual >= 0) && (head_bytes != 0)) {
577                         if (head_bytes > actual)
578                                 head_bytes = actual;
579
580                         memcpy((char *)kmap(pages[0]) +
581                                 pagelist->offset,
582                                 fragments,
583                                 head_bytes);
584                         kunmap(pages[0]);
585                 }
586                 if ((actual >= 0) && (head_bytes < actual) &&
587                         (tail_bytes != 0)) {
588                         memcpy((char *)kmap(pages[num_pages - 1]) +
589                                 ((pagelist->offset + actual) &
590                                 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
591                                 fragments + g_cache_line_size,
592                                 tail_bytes);
593                         kunmap(pages[num_pages - 1]);
594                 }
595
596                 down(&g_free_fragments_mutex);
597                 *(char **)fragments = g_free_fragments;
598                 g_free_fragments = fragments;
599                 up(&g_free_fragments_mutex);
600                 up(&g_free_fragments_sema);
601         }
602
603         /* Need to mark all the pages dirty. */
604         if (pagelist->type != PAGELIST_WRITE &&
605             pagelistinfo->pages_need_release) {
606                 unsigned int i;
607
608                 for (i = 0; i < num_pages; i++)
609                         set_page_dirty(pages[i]);
610         }
611
612         cleanup_pagelistinfo(pagelistinfo);
613 }