2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "AMD-Vi: " fmt
21 #include <linux/mmu_notifier.h>
22 #include <linux/amd-iommu.h>
23 #include <linux/mm_types.h>
24 #include <linux/profile.h>
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/iommu.h>
29 #include <linux/wait.h>
30 #include <linux/pci.h>
31 #include <linux/gfp.h>
33 #include "amd_iommu_types.h"
34 #include "amd_iommu_proto.h"
36 MODULE_LICENSE("GPL v2");
37 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
39 #define MAX_DEVICES 0x10000
40 #define PRI_QUEUE_SIZE 512
49 struct list_head list; /* For global state-list */
50 atomic_t count; /* Reference count */
51 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
53 struct mm_struct *mm; /* mm_struct for the faults */
54 struct mmu_notifier mn; /* mmu_notifier handle */
55 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
56 struct device_state *device_state; /* Link to our device_state */
57 int pasid; /* PASID index */
58 bool invalid; /* Used during setup and
59 teardown of the pasid */
60 spinlock_t lock; /* Protect pri_queues and
62 wait_queue_head_t wq; /* To wait for count == 0 */
66 struct list_head list;
70 struct pasid_state **states;
71 struct iommu_domain *domain;
74 amd_iommu_invalid_ppr_cb inv_ppr_cb;
75 amd_iommu_invalidate_ctx inv_ctx_cb;
81 struct work_struct work;
82 struct device_state *dev_state;
83 struct pasid_state *state;
93 static LIST_HEAD(state_list);
94 static spinlock_t state_lock;
96 static struct workqueue_struct *iommu_wq;
98 static void free_pasid_states(struct device_state *dev_state);
100 static u16 device_id(struct pci_dev *pdev)
104 devid = pdev->bus->number;
105 devid = (devid << 8) | pdev->devfn;
110 static struct device_state *__get_device_state(u16 devid)
112 struct device_state *dev_state;
114 list_for_each_entry(dev_state, &state_list, list) {
115 if (dev_state->devid == devid)
122 static struct device_state *get_device_state(u16 devid)
124 struct device_state *dev_state;
127 spin_lock_irqsave(&state_lock, flags);
128 dev_state = __get_device_state(devid);
129 if (dev_state != NULL)
130 atomic_inc(&dev_state->count);
131 spin_unlock_irqrestore(&state_lock, flags);
136 static void free_device_state(struct device_state *dev_state)
138 struct iommu_group *group;
141 * First detach device from domain - No more PRI requests will arrive
142 * from that device after it is unbound from the IOMMUv2 domain.
144 group = iommu_group_get(&dev_state->pdev->dev);
148 iommu_detach_group(dev_state->domain, group);
150 iommu_group_put(group);
152 /* Everything is down now, free the IOMMUv2 domain */
153 iommu_domain_free(dev_state->domain);
155 /* Finally get rid of the device-state */
159 static void put_device_state(struct device_state *dev_state)
161 if (atomic_dec_and_test(&dev_state->count))
162 wake_up(&dev_state->wq);
165 /* Must be called under dev_state->lock */
166 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
167 int pasid, bool alloc)
169 struct pasid_state **root, **ptr;
172 level = dev_state->pasid_levels;
173 root = dev_state->states;
177 index = (pasid >> (9 * level)) & 0x1ff;
187 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
192 root = (struct pasid_state **)*ptr;
199 static int set_pasid_state(struct device_state *dev_state,
200 struct pasid_state *pasid_state,
203 struct pasid_state **ptr;
207 spin_lock_irqsave(&dev_state->lock, flags);
208 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
223 spin_unlock_irqrestore(&dev_state->lock, flags);
228 static void clear_pasid_state(struct device_state *dev_state, int pasid)
230 struct pasid_state **ptr;
233 spin_lock_irqsave(&dev_state->lock, flags);
234 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
242 spin_unlock_irqrestore(&dev_state->lock, flags);
245 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
248 struct pasid_state **ptr, *ret = NULL;
251 spin_lock_irqsave(&dev_state->lock, flags);
252 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
259 atomic_inc(&ret->count);
262 spin_unlock_irqrestore(&dev_state->lock, flags);
267 static void free_pasid_state(struct pasid_state *pasid_state)
272 static void put_pasid_state(struct pasid_state *pasid_state)
274 if (atomic_dec_and_test(&pasid_state->count))
275 wake_up(&pasid_state->wq);
278 static void put_pasid_state_wait(struct pasid_state *pasid_state)
280 atomic_dec(&pasid_state->count);
281 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
282 free_pasid_state(pasid_state);
285 static void unbind_pasid(struct pasid_state *pasid_state)
287 struct iommu_domain *domain;
289 domain = pasid_state->device_state->domain;
292 * Mark pasid_state as invalid, no more faults will we added to the
293 * work queue after this is visible everywhere.
295 pasid_state->invalid = true;
297 /* Make sure this is visible */
300 /* After this the device/pasid can't access the mm anymore */
301 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
303 /* Make sure no more pending faults are in the queue */
304 flush_workqueue(iommu_wq);
307 static void free_pasid_states_level1(struct pasid_state **tbl)
311 for (i = 0; i < 512; ++i) {
315 free_page((unsigned long)tbl[i]);
319 static void free_pasid_states_level2(struct pasid_state **tbl)
321 struct pasid_state **ptr;
324 for (i = 0; i < 512; ++i) {
328 ptr = (struct pasid_state **)tbl[i];
329 free_pasid_states_level1(ptr);
333 static void free_pasid_states(struct device_state *dev_state)
335 struct pasid_state *pasid_state;
338 for (i = 0; i < dev_state->max_pasids; ++i) {
339 pasid_state = get_pasid_state(dev_state, i);
340 if (pasid_state == NULL)
343 put_pasid_state(pasid_state);
346 * This will call the mn_release function and
349 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
351 put_pasid_state_wait(pasid_state); /* Reference taken in
352 amd_iommu_bind_pasid */
354 /* Drop reference taken in amd_iommu_bind_pasid */
355 put_device_state(dev_state);
358 if (dev_state->pasid_levels == 2)
359 free_pasid_states_level2(dev_state->states);
360 else if (dev_state->pasid_levels == 1)
361 free_pasid_states_level1(dev_state->states);
363 BUG_ON(dev_state->pasid_levels != 0);
365 free_page((unsigned long)dev_state->states);
368 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
370 return container_of(mn, struct pasid_state, mn);
373 static void __mn_flush_page(struct mmu_notifier *mn,
374 unsigned long address)
376 struct pasid_state *pasid_state;
377 struct device_state *dev_state;
379 pasid_state = mn_to_state(mn);
380 dev_state = pasid_state->device_state;
382 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
385 static int mn_clear_flush_young(struct mmu_notifier *mn,
386 struct mm_struct *mm,
390 for (; start < end; start += PAGE_SIZE)
391 __mn_flush_page(mn, start);
396 static void mn_invalidate_range(struct mmu_notifier *mn,
397 struct mm_struct *mm,
398 unsigned long start, unsigned long end)
400 struct pasid_state *pasid_state;
401 struct device_state *dev_state;
403 pasid_state = mn_to_state(mn);
404 dev_state = pasid_state->device_state;
406 if ((start ^ (end - 1)) < PAGE_SIZE)
407 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
410 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
413 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
415 struct pasid_state *pasid_state;
416 struct device_state *dev_state;
421 pasid_state = mn_to_state(mn);
422 dev_state = pasid_state->device_state;
423 run_inv_ctx_cb = !pasid_state->invalid;
425 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
426 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
428 unbind_pasid(pasid_state);
431 static const struct mmu_notifier_ops iommu_mn = {
432 .release = mn_release,
433 .clear_flush_young = mn_clear_flush_young,
434 .invalidate_range = mn_invalidate_range,
437 static void set_pri_tag_status(struct pasid_state *pasid_state,
442 spin_lock_irqsave(&pasid_state->lock, flags);
443 pasid_state->pri[tag].status = status;
444 spin_unlock_irqrestore(&pasid_state->lock, flags);
447 static void finish_pri_tag(struct device_state *dev_state,
448 struct pasid_state *pasid_state,
453 spin_lock_irqsave(&pasid_state->lock, flags);
454 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
455 pasid_state->pri[tag].finish) {
456 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
457 pasid_state->pri[tag].status, tag);
458 pasid_state->pri[tag].finish = false;
459 pasid_state->pri[tag].status = PPR_SUCCESS;
461 spin_unlock_irqrestore(&pasid_state->lock, flags);
464 static void handle_fault_error(struct fault *fault)
468 if (!fault->dev_state->inv_ppr_cb) {
469 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
473 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
478 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
479 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
481 case AMD_IOMMU_INV_PRI_RSP_INVALID:
482 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
484 case AMD_IOMMU_INV_PRI_RSP_FAIL:
485 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
492 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
494 unsigned long requested = 0;
496 if (fault->flags & PPR_FAULT_EXEC)
497 requested |= VM_EXEC;
499 if (fault->flags & PPR_FAULT_READ)
500 requested |= VM_READ;
502 if (fault->flags & PPR_FAULT_WRITE)
503 requested |= VM_WRITE;
505 return (requested & ~vma->vm_flags) != 0;
508 static void do_fault(struct work_struct *work)
510 struct fault *fault = container_of(work, struct fault, work);
511 struct vm_area_struct *vma;
512 vm_fault_t ret = VM_FAULT_ERROR;
513 unsigned int flags = 0;
514 struct mm_struct *mm;
517 mm = fault->state->mm;
518 address = fault->address;
520 if (fault->flags & PPR_FAULT_USER)
521 flags |= FAULT_FLAG_USER;
522 if (fault->flags & PPR_FAULT_WRITE)
523 flags |= FAULT_FLAG_WRITE;
524 flags |= FAULT_FLAG_REMOTE;
526 down_read(&mm->mmap_sem);
527 vma = find_extend_vma(mm, address);
528 if (!vma || address < vma->vm_start)
529 /* failed to get a vma in the right range */
532 /* Check if we have the right permissions on the vma */
533 if (access_error(vma, fault))
536 ret = handle_mm_fault(vma, address, flags);
538 up_read(&mm->mmap_sem);
540 if (ret & VM_FAULT_ERROR)
541 /* failed to service fault */
542 handle_fault_error(fault);
544 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
546 put_pasid_state(fault->state);
551 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
553 struct amd_iommu_fault *iommu_fault;
554 struct pasid_state *pasid_state;
555 struct device_state *dev_state;
561 struct iommu_dev_data *dev_data;
562 struct pci_dev *pdev = NULL;
565 tag = iommu_fault->tag & 0x1ff;
566 finish = (iommu_fault->tag >> 9) & 1;
568 devid = iommu_fault->device_id;
569 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
573 dev_data = get_dev_data(&pdev->dev);
575 /* In kdump kernel pci dev is not initialized yet -> send INVALID */
577 if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
578 && dev_data->defer_attach) {
579 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
584 dev_state = get_device_state(iommu_fault->device_id);
585 if (dev_state == NULL)
588 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
589 if (pasid_state == NULL || pasid_state->invalid) {
590 /* We know the device but not the PASID -> send INVALID */
591 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
596 spin_lock_irqsave(&pasid_state->lock, flags);
597 atomic_inc(&pasid_state->pri[tag].inflight);
599 pasid_state->pri[tag].finish = true;
600 spin_unlock_irqrestore(&pasid_state->lock, flags);
602 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
604 /* We are OOM - send success and let the device re-fault */
605 finish_pri_tag(dev_state, pasid_state, tag);
609 fault->dev_state = dev_state;
610 fault->address = iommu_fault->address;
611 fault->state = pasid_state;
613 fault->finish = finish;
614 fault->pasid = iommu_fault->pasid;
615 fault->flags = iommu_fault->flags;
616 INIT_WORK(&fault->work, do_fault);
618 queue_work(iommu_wq, &fault->work);
624 if (ret != NOTIFY_OK && pasid_state)
625 put_pasid_state(pasid_state);
627 put_device_state(dev_state);
633 static struct notifier_block ppr_nb = {
634 .notifier_call = ppr_notifier,
637 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
638 struct task_struct *task)
640 struct pasid_state *pasid_state;
641 struct device_state *dev_state;
642 struct mm_struct *mm;
648 if (!amd_iommu_v2_supported())
651 devid = device_id(pdev);
652 dev_state = get_device_state(devid);
654 if (dev_state == NULL)
658 if (pasid < 0 || pasid >= dev_state->max_pasids)
662 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
663 if (pasid_state == NULL)
667 atomic_set(&pasid_state->count, 1);
668 init_waitqueue_head(&pasid_state->wq);
669 spin_lock_init(&pasid_state->lock);
671 mm = get_task_mm(task);
672 pasid_state->mm = mm;
673 pasid_state->device_state = dev_state;
674 pasid_state->pasid = pasid;
675 pasid_state->invalid = true; /* Mark as valid only if we are
676 done with setting up the pasid */
677 pasid_state->mn.ops = &iommu_mn;
679 if (pasid_state->mm == NULL)
682 mmu_notifier_register(&pasid_state->mn, mm);
684 ret = set_pasid_state(dev_state, pasid_state, pasid);
688 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
689 __pa(pasid_state->mm->pgd));
691 goto out_clear_state;
693 /* Now we are ready to handle faults */
694 pasid_state->invalid = false;
697 * Drop the reference to the mm_struct here. We rely on the
698 * mmu_notifier release call-back to inform us when the mm
706 clear_pasid_state(dev_state, pasid);
709 mmu_notifier_unregister(&pasid_state->mn, mm);
713 free_pasid_state(pasid_state);
716 put_device_state(dev_state);
720 EXPORT_SYMBOL(amd_iommu_bind_pasid);
722 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
724 struct pasid_state *pasid_state;
725 struct device_state *dev_state;
730 if (!amd_iommu_v2_supported())
733 devid = device_id(pdev);
734 dev_state = get_device_state(devid);
735 if (dev_state == NULL)
738 if (pasid < 0 || pasid >= dev_state->max_pasids)
741 pasid_state = get_pasid_state(dev_state, pasid);
742 if (pasid_state == NULL)
745 * Drop reference taken here. We are safe because we still hold
746 * the reference taken in the amd_iommu_bind_pasid function.
748 put_pasid_state(pasid_state);
750 /* Clear the pasid state so that the pasid can be re-used */
751 clear_pasid_state(dev_state, pasid_state->pasid);
754 * Call mmu_notifier_unregister to drop our reference
757 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
759 put_pasid_state_wait(pasid_state); /* Reference taken in
760 amd_iommu_bind_pasid */
762 /* Drop reference taken in this function */
763 put_device_state(dev_state);
765 /* Drop reference taken in amd_iommu_bind_pasid */
766 put_device_state(dev_state);
768 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
770 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
772 struct device_state *dev_state;
773 struct iommu_group *group;
780 if (!amd_iommu_v2_supported())
783 if (pasids <= 0 || pasids > (PASID_MASK + 1))
786 devid = device_id(pdev);
788 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
789 if (dev_state == NULL)
792 spin_lock_init(&dev_state->lock);
793 init_waitqueue_head(&dev_state->wq);
794 dev_state->pdev = pdev;
795 dev_state->devid = devid;
798 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
799 dev_state->pasid_levels += 1;
801 atomic_set(&dev_state->count, 1);
802 dev_state->max_pasids = pasids;
805 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
806 if (dev_state->states == NULL)
807 goto out_free_dev_state;
809 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
810 if (dev_state->domain == NULL)
811 goto out_free_states;
813 amd_iommu_domain_direct_map(dev_state->domain);
815 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
817 goto out_free_domain;
819 group = iommu_group_get(&pdev->dev);
822 goto out_free_domain;
825 ret = iommu_attach_group(dev_state->domain, group);
829 iommu_group_put(group);
831 spin_lock_irqsave(&state_lock, flags);
833 if (__get_device_state(devid) != NULL) {
834 spin_unlock_irqrestore(&state_lock, flags);
836 goto out_free_domain;
839 list_add_tail(&dev_state->list, &state_list);
841 spin_unlock_irqrestore(&state_lock, flags);
846 iommu_group_put(group);
849 iommu_domain_free(dev_state->domain);
852 free_page((unsigned long)dev_state->states);
859 EXPORT_SYMBOL(amd_iommu_init_device);
861 void amd_iommu_free_device(struct pci_dev *pdev)
863 struct device_state *dev_state;
867 if (!amd_iommu_v2_supported())
870 devid = device_id(pdev);
872 spin_lock_irqsave(&state_lock, flags);
874 dev_state = __get_device_state(devid);
875 if (dev_state == NULL) {
876 spin_unlock_irqrestore(&state_lock, flags);
880 list_del(&dev_state->list);
882 spin_unlock_irqrestore(&state_lock, flags);
884 /* Get rid of any remaining pasid states */
885 free_pasid_states(dev_state);
887 put_device_state(dev_state);
889 * Wait until the last reference is dropped before freeing
892 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
893 free_device_state(dev_state);
895 EXPORT_SYMBOL(amd_iommu_free_device);
897 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
898 amd_iommu_invalid_ppr_cb cb)
900 struct device_state *dev_state;
905 if (!amd_iommu_v2_supported())
908 devid = device_id(pdev);
910 spin_lock_irqsave(&state_lock, flags);
913 dev_state = __get_device_state(devid);
914 if (dev_state == NULL)
917 dev_state->inv_ppr_cb = cb;
922 spin_unlock_irqrestore(&state_lock, flags);
926 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
928 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
929 amd_iommu_invalidate_ctx cb)
931 struct device_state *dev_state;
936 if (!amd_iommu_v2_supported())
939 devid = device_id(pdev);
941 spin_lock_irqsave(&state_lock, flags);
944 dev_state = __get_device_state(devid);
945 if (dev_state == NULL)
948 dev_state->inv_ctx_cb = cb;
953 spin_unlock_irqrestore(&state_lock, flags);
957 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
959 static int __init amd_iommu_v2_init(void)
963 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
965 if (!amd_iommu_v2_supported()) {
966 pr_info("AMD IOMMUv2 functionality not available on this system\n");
968 * Load anyway to provide the symbols to other modules
969 * which may use AMD IOMMUv2 optionally.
974 spin_lock_init(&state_lock);
977 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
978 if (iommu_wq == NULL)
981 amd_iommu_register_ppr_notifier(&ppr_nb);
989 static void __exit amd_iommu_v2_exit(void)
991 struct device_state *dev_state;
994 if (!amd_iommu_v2_supported())
997 amd_iommu_unregister_ppr_notifier(&ppr_nb);
999 flush_workqueue(iommu_wq);
1002 * The loop below might call flush_workqueue(), so call
1003 * destroy_workqueue() after it
1005 for (i = 0; i < MAX_DEVICES; ++i) {
1006 dev_state = get_device_state(i);
1008 if (dev_state == NULL)
1013 put_device_state(dev_state);
1014 amd_iommu_free_device(dev_state->pdev);
1017 destroy_workqueue(iommu_wq);
1020 module_init(amd_iommu_v2_init);
1021 module_exit(amd_iommu_v2_exit);