OSDN Git Service

Merge tag 'trace-v5.0-rc4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[uclinux-h8/linux.git] / drivers / iommu / tegra-gart.c
1 /*
2  * IOMMU API for GART in Tegra20
3  *
4  * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * Author: Hiroshi DOYU <hdoyu@nvidia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #define pr_fmt(fmt)     "%s(): " fmt, __func__
23
24 #include <linux/init.h>
25 #include <linux/moduleparam.h>
26 #include <linux/platform_device.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/mm.h>
31 #include <linux/list.h>
32 #include <linux/device.h>
33 #include <linux/io.h>
34 #include <linux/iommu.h>
35 #include <linux/of.h>
36
37 #include <asm/cacheflush.h>
38
39 /* bitmap of the page sizes currently supported */
40 #define GART_IOMMU_PGSIZES      (SZ_4K)
41
42 #define GART_REG_BASE           0x24
43 #define GART_CONFIG             (0x24 - GART_REG_BASE)
44 #define GART_ENTRY_ADDR         (0x28 - GART_REG_BASE)
45 #define GART_ENTRY_DATA         (0x2c - GART_REG_BASE)
46 #define GART_ENTRY_PHYS_ADDR_VALID      (1 << 31)
47
48 #define GART_PAGE_SHIFT         12
49 #define GART_PAGE_SIZE          (1 << GART_PAGE_SHIFT)
50 #define GART_PAGE_MASK                                          \
51         (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
52
53 struct gart_client {
54         struct device           *dev;
55         struct list_head        list;
56 };
57
58 struct gart_device {
59         void __iomem            *regs;
60         u32                     *savedata;
61         u32                     page_count;     /* total remappable size */
62         dma_addr_t              iovmm_base;     /* offset to vmm_area */
63         spinlock_t              pte_lock;       /* for pagetable */
64         struct list_head        client;
65         spinlock_t              client_lock;    /* for client list */
66         struct device           *dev;
67
68         struct iommu_device     iommu;          /* IOMMU Core handle */
69 };
70
71 struct gart_domain {
72         struct iommu_domain domain;             /* generic domain handle */
73         struct gart_device *gart;               /* link to gart device   */
74 };
75
76 static struct gart_device *gart_handle; /* unique for a system */
77
78 static bool gart_debug;
79
80 #define GART_PTE(_pfn)                                          \
81         (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
82
83 static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
84 {
85         return container_of(dom, struct gart_domain, domain);
86 }
87
88 /*
89  * Any interaction between any block on PPSB and a block on APB or AHB
90  * must have these read-back to ensure the APB/AHB bus transaction is
91  * complete before initiating activity on the PPSB block.
92  */
93 #define FLUSH_GART_REGS(gart)   ((void)readl((gart)->regs + GART_CONFIG))
94
95 #define for_each_gart_pte(gart, iova)                                   \
96         for (iova = gart->iovmm_base;                                   \
97              iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
98              iova += GART_PAGE_SIZE)
99
100 static inline void gart_set_pte(struct gart_device *gart,
101                                 unsigned long offs, u32 pte)
102 {
103         writel(offs, gart->regs + GART_ENTRY_ADDR);
104         writel(pte, gart->regs + GART_ENTRY_DATA);
105
106         dev_dbg(gart->dev, "%s %08lx:%08x\n",
107                  pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
108 }
109
110 static inline unsigned long gart_read_pte(struct gart_device *gart,
111                                           unsigned long offs)
112 {
113         unsigned long pte;
114
115         writel(offs, gart->regs + GART_ENTRY_ADDR);
116         pte = readl(gart->regs + GART_ENTRY_DATA);
117
118         return pte;
119 }
120
121 static void do_gart_setup(struct gart_device *gart, const u32 *data)
122 {
123         unsigned long iova;
124
125         for_each_gart_pte(gart, iova)
126                 gart_set_pte(gart, iova, data ? *(data++) : 0);
127
128         writel(1, gart->regs + GART_CONFIG);
129         FLUSH_GART_REGS(gart);
130 }
131
132 #ifdef DEBUG
133 static void gart_dump_table(struct gart_device *gart)
134 {
135         unsigned long iova;
136         unsigned long flags;
137
138         spin_lock_irqsave(&gart->pte_lock, flags);
139         for_each_gart_pte(gart, iova) {
140                 unsigned long pte;
141
142                 pte = gart_read_pte(gart, iova);
143
144                 dev_dbg(gart->dev, "%s %08lx:%08lx\n",
145                         (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
146                         iova, pte & GART_PAGE_MASK);
147         }
148         spin_unlock_irqrestore(&gart->pte_lock, flags);
149 }
150 #else
151 static inline void gart_dump_table(struct gart_device *gart)
152 {
153 }
154 #endif
155
156 static inline bool gart_iova_range_valid(struct gart_device *gart,
157                                          unsigned long iova, size_t bytes)
158 {
159         unsigned long iova_start, iova_end, gart_start, gart_end;
160
161         iova_start = iova;
162         iova_end = iova_start + bytes - 1;
163         gart_start = gart->iovmm_base;
164         gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
165
166         if (iova_start < gart_start)
167                 return false;
168         if (iova_end > gart_end)
169                 return false;
170         return true;
171 }
172
173 static int gart_iommu_attach_dev(struct iommu_domain *domain,
174                                  struct device *dev)
175 {
176         struct gart_domain *gart_domain = to_gart_domain(domain);
177         struct gart_device *gart = gart_domain->gart;
178         struct gart_client *client, *c;
179         int err = 0;
180
181         client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
182         if (!client)
183                 return -ENOMEM;
184         client->dev = dev;
185
186         spin_lock(&gart->client_lock);
187         list_for_each_entry(c, &gart->client, list) {
188                 if (c->dev == dev) {
189                         dev_err(gart->dev,
190                                 "%s is already attached\n", dev_name(dev));
191                         err = -EINVAL;
192                         goto fail;
193                 }
194         }
195         list_add(&client->list, &gart->client);
196         spin_unlock(&gart->client_lock);
197         dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
198         return 0;
199
200 fail:
201         devm_kfree(gart->dev, client);
202         spin_unlock(&gart->client_lock);
203         return err;
204 }
205
206 static void gart_iommu_detach_dev(struct iommu_domain *domain,
207                                   struct device *dev)
208 {
209         struct gart_domain *gart_domain = to_gart_domain(domain);
210         struct gart_device *gart = gart_domain->gart;
211         struct gart_client *c;
212
213         spin_lock(&gart->client_lock);
214
215         list_for_each_entry(c, &gart->client, list) {
216                 if (c->dev == dev) {
217                         list_del(&c->list);
218                         devm_kfree(gart->dev, c);
219                         dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
220                         goto out;
221                 }
222         }
223         dev_err(gart->dev, "Couldn't find\n");
224 out:
225         spin_unlock(&gart->client_lock);
226 }
227
228 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
229 {
230         struct gart_domain *gart_domain;
231         struct gart_device *gart;
232
233         if (type != IOMMU_DOMAIN_UNMANAGED)
234                 return NULL;
235
236         gart = gart_handle;
237         if (!gart)
238                 return NULL;
239
240         gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
241         if (!gart_domain)
242                 return NULL;
243
244         gart_domain->gart = gart;
245         gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
246         gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
247                                         gart->page_count * GART_PAGE_SIZE - 1;
248         gart_domain->domain.geometry.force_aperture = true;
249
250         return &gart_domain->domain;
251 }
252
253 static void gart_iommu_domain_free(struct iommu_domain *domain)
254 {
255         struct gart_domain *gart_domain = to_gart_domain(domain);
256         struct gart_device *gart = gart_domain->gart;
257
258         if (gart) {
259                 spin_lock(&gart->client_lock);
260                 if (!list_empty(&gart->client)) {
261                         struct gart_client *c;
262
263                         list_for_each_entry(c, &gart->client, list)
264                                 gart_iommu_detach_dev(domain, c->dev);
265                 }
266                 spin_unlock(&gart->client_lock);
267         }
268
269         kfree(gart_domain);
270 }
271
272 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
273                           phys_addr_t pa, size_t bytes, int prot)
274 {
275         struct gart_domain *gart_domain = to_gart_domain(domain);
276         struct gart_device *gart = gart_domain->gart;
277         unsigned long flags;
278         unsigned long pfn;
279         unsigned long pte;
280
281         if (!gart_iova_range_valid(gart, iova, bytes))
282                 return -EINVAL;
283
284         spin_lock_irqsave(&gart->pte_lock, flags);
285         pfn = __phys_to_pfn(pa);
286         if (!pfn_valid(pfn)) {
287                 dev_err(gart->dev, "Invalid page: %pa\n", &pa);
288                 spin_unlock_irqrestore(&gart->pte_lock, flags);
289                 return -EINVAL;
290         }
291         if (gart_debug) {
292                 pte = gart_read_pte(gart, iova);
293                 if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
294                         spin_unlock_irqrestore(&gart->pte_lock, flags);
295                         dev_err(gart->dev, "Page entry is in-use\n");
296                         return -EBUSY;
297                 }
298         }
299         gart_set_pte(gart, iova, GART_PTE(pfn));
300         FLUSH_GART_REGS(gart);
301         spin_unlock_irqrestore(&gart->pte_lock, flags);
302         return 0;
303 }
304
305 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
306                                size_t bytes)
307 {
308         struct gart_domain *gart_domain = to_gart_domain(domain);
309         struct gart_device *gart = gart_domain->gart;
310         unsigned long flags;
311
312         if (!gart_iova_range_valid(gart, iova, bytes))
313                 return 0;
314
315         spin_lock_irqsave(&gart->pte_lock, flags);
316         gart_set_pte(gart, iova, 0);
317         FLUSH_GART_REGS(gart);
318         spin_unlock_irqrestore(&gart->pte_lock, flags);
319         return bytes;
320 }
321
322 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
323                                            dma_addr_t iova)
324 {
325         struct gart_domain *gart_domain = to_gart_domain(domain);
326         struct gart_device *gart = gart_domain->gart;
327         unsigned long pte;
328         phys_addr_t pa;
329         unsigned long flags;
330
331         if (!gart_iova_range_valid(gart, iova, 0))
332                 return -EINVAL;
333
334         spin_lock_irqsave(&gart->pte_lock, flags);
335         pte = gart_read_pte(gart, iova);
336         spin_unlock_irqrestore(&gart->pte_lock, flags);
337
338         pa = (pte & GART_PAGE_MASK);
339         if (!pfn_valid(__phys_to_pfn(pa))) {
340                 dev_err(gart->dev, "No entry for %08llx:%pa\n",
341                          (unsigned long long)iova, &pa);
342                 gart_dump_table(gart);
343                 return -EINVAL;
344         }
345         return pa;
346 }
347
348 static bool gart_iommu_capable(enum iommu_cap cap)
349 {
350         return false;
351 }
352
353 static int gart_iommu_add_device(struct device *dev)
354 {
355         struct iommu_group *group = iommu_group_get_for_dev(dev);
356
357         if (IS_ERR(group))
358                 return PTR_ERR(group);
359
360         iommu_group_put(group);
361
362         iommu_device_link(&gart_handle->iommu, dev);
363
364         return 0;
365 }
366
367 static void gart_iommu_remove_device(struct device *dev)
368 {
369         iommu_group_remove_device(dev);
370         iommu_device_unlink(&gart_handle->iommu, dev);
371 }
372
373 static const struct iommu_ops gart_iommu_ops = {
374         .capable        = gart_iommu_capable,
375         .domain_alloc   = gart_iommu_domain_alloc,
376         .domain_free    = gart_iommu_domain_free,
377         .attach_dev     = gart_iommu_attach_dev,
378         .detach_dev     = gart_iommu_detach_dev,
379         .add_device     = gart_iommu_add_device,
380         .remove_device  = gart_iommu_remove_device,
381         .device_group   = generic_device_group,
382         .map            = gart_iommu_map,
383         .unmap          = gart_iommu_unmap,
384         .iova_to_phys   = gart_iommu_iova_to_phys,
385         .pgsize_bitmap  = GART_IOMMU_PGSIZES,
386 };
387
388 static int tegra_gart_suspend(struct device *dev)
389 {
390         struct gart_device *gart = dev_get_drvdata(dev);
391         unsigned long iova;
392         u32 *data = gart->savedata;
393         unsigned long flags;
394
395         spin_lock_irqsave(&gart->pte_lock, flags);
396         for_each_gart_pte(gart, iova)
397                 *(data++) = gart_read_pte(gart, iova);
398         spin_unlock_irqrestore(&gart->pte_lock, flags);
399         return 0;
400 }
401
402 static int tegra_gart_resume(struct device *dev)
403 {
404         struct gart_device *gart = dev_get_drvdata(dev);
405         unsigned long flags;
406
407         spin_lock_irqsave(&gart->pte_lock, flags);
408         do_gart_setup(gart, gart->savedata);
409         spin_unlock_irqrestore(&gart->pte_lock, flags);
410         return 0;
411 }
412
413 static int tegra_gart_probe(struct platform_device *pdev)
414 {
415         struct gart_device *gart;
416         struct resource *res, *res_remap;
417         void __iomem *gart_regs;
418         struct device *dev = &pdev->dev;
419         int ret;
420
421         if (gart_handle)
422                 return -EIO;
423
424         BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
425
426         /* the GART memory aperture is required */
427         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
428         res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
429         if (!res || !res_remap) {
430                 dev_err(dev, "GART memory aperture expected\n");
431                 return -ENXIO;
432         }
433
434         gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
435         if (!gart) {
436                 dev_err(dev, "failed to allocate gart_device\n");
437                 return -ENOMEM;
438         }
439
440         gart_regs = devm_ioremap(dev, res->start, resource_size(res));
441         if (!gart_regs) {
442                 dev_err(dev, "failed to remap GART registers\n");
443                 return -ENXIO;
444         }
445
446         ret = iommu_device_sysfs_add(&gart->iommu, &pdev->dev, NULL,
447                                      dev_name(&pdev->dev));
448         if (ret) {
449                 dev_err(dev, "Failed to register IOMMU in sysfs\n");
450                 return ret;
451         }
452
453         iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
454
455         ret = iommu_device_register(&gart->iommu);
456         if (ret) {
457                 dev_err(dev, "Failed to register IOMMU\n");
458                 iommu_device_sysfs_remove(&gart->iommu);
459                 return ret;
460         }
461
462         gart->dev = &pdev->dev;
463         spin_lock_init(&gart->pte_lock);
464         spin_lock_init(&gart->client_lock);
465         INIT_LIST_HEAD(&gart->client);
466         gart->regs = gart_regs;
467         gart->iovmm_base = (dma_addr_t)res_remap->start;
468         gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
469
470         gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
471         if (!gart->savedata) {
472                 dev_err(dev, "failed to allocate context save area\n");
473                 return -ENOMEM;
474         }
475
476         platform_set_drvdata(pdev, gart);
477         do_gart_setup(gart, NULL);
478
479         gart_handle = gart;
480
481         return 0;
482 }
483
484 static const struct dev_pm_ops tegra_gart_pm_ops = {
485         .suspend        = tegra_gart_suspend,
486         .resume         = tegra_gart_resume,
487 };
488
489 static const struct of_device_id tegra_gart_of_match[] = {
490         { .compatible = "nvidia,tegra20-gart", },
491         { },
492 };
493
494 static struct platform_driver tegra_gart_driver = {
495         .probe          = tegra_gart_probe,
496         .driver = {
497                 .name   = "tegra-gart",
498                 .pm     = &tegra_gart_pm_ops,
499                 .of_match_table = tegra_gart_of_match,
500                 .suppress_bind_attrs = true,
501         },
502 };
503
504 static int __init tegra_gart_init(void)
505 {
506         return platform_driver_register(&tegra_gart_driver);
507 }
508 subsys_initcall(tegra_gart_init);
509
510 module_param(gart_debug, bool, 0644);
511 MODULE_PARM_DESC(gart_debug, "Enable GART debugging");