OSDN Git Service

Merge branches 'apple/dart', 'arm/mediatek', 'arm/renesas', 'arm/smmu', 'arm/tegra...
authorJoerg Roedel <jroedel@suse.de>
Sun, 31 Oct 2021 21:26:53 +0000 (22:26 +0100)
committerJoerg Roedel <jroedel@suse.de>
Sun, 31 Oct 2021 21:26:53 +0000 (22:26 +0100)
1  2  3  4  5  6  7  8  9  10 
drivers/iommu/apple-dart.c
drivers/iommu/dma-iommu.c

@@@@@@@@@@@ -698,20 -703,19 -698,6 -696,6 -698,6 -698,6 -698,20 -698,6 -698,20 -698,6 +701,19 @@@@@@@@@@@ static struct iommu_group *apple_dart_d
          #endif
                        group = generic_device_group(dev);
          
-     - -       group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL);
  ++++ + +      res = ERR_PTR(-ENOMEM);
  ++++ + +      if (!group)
  ++++ + +              goto out;
  ++++ + +
-     - -       memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg));
+ ++++++++      group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
  ++++ + +      if (!group_master_cfg) {
  ++++ + +              iommu_group_put(group);
  ++++ + +              goto out;
  ++++ + +      }
  ++++ + +
  ++++ + +      iommu_group_set_iommudata(group, group_master_cfg,
  ++++ + +              apple_dart_release_group);
  ++++ + +
                for_each_stream_map(i, cfg, stream_map)
                        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
                                stream_map->dart->sid2group[sid] = group;
@@@@@@@@@@@ -867,14 -867,14 -867,14 -860,14 -867,14 -867,14 -867,14 -867,14 -867,14 -802,47 +795,47 @@@@@@@@@@@ static dma_addr_t iommu_dma_map_page(st
          {
                phys_addr_t phys = page_to_phys(page) + offset;
                bool coherent = dev_is_dma_coherent(dev);
---------       dma_addr_t dma_handle;
+++++++++       int prot = dma_info_to_prot(dir, coherent, attrs);
+++++++++       struct iommu_domain *domain = iommu_get_dma_domain(dev);
+++++++++       struct iommu_dma_cookie *cookie = domain->iova_cookie;
+++++++++       struct iova_domain *iovad = &cookie->iovad;
+++++++++       dma_addr_t iova, dma_mask = dma_get_mask(dev);
+++++++++ 
+++++++++       /*
+++++++++        * If both the physical buffer start address and size are
+++++++++        * page aligned, we don't need to use a bounce page.
+++++++++        */
+++++++++       if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
+++++++++               void *padding_start;
+++++++++               size_t padding_size, aligned_size;
+++++++++ 
+++++++++               aligned_size = iova_align(iovad, size);
+++++++++               phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+++++++++                                             iova_mask(iovad), dir, attrs);
+++++++++ 
+++++++++               if (phys == DMA_MAPPING_ERROR)
+++++++++                       return DMA_MAPPING_ERROR;
      +   
------ --       dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
------ --                       coherent, dir, attrs);
------ --       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
------ --           dma_handle != DMA_MAPPING_ERROR)
+++++++++               /* Cleanup the padding area. */
+++++++++               padding_start = phys_to_virt(phys);
+++++++++               padding_size = aligned_size;
+++++++++ 
+++++++++               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+++++++++                   (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+++++++++                       padding_start += size;
+++++++++                       padding_size -= size;
+++++++++               }
+++++++++ 
+++++++++               memset(padding_start, 0, padding_size);
+++++++++       }
++++++ ++ 
      -         dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
      -                         coherent, dir, attrs);
      -         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
      -             dma_handle != DMA_MAPPING_ERROR)
+++++++++       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                        arch_sync_dma_for_device(phys, size, dir);
---------       return dma_handle;
+++++++++ 
+++++++++       iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
+++++++++       if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
+++++++++               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+++++++++       return iova;
          }
          
          static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
@@@@@@@@@@@ -1016,9 -1016,9 -1016,9 -1009,9 -1016,9 -1016,9 -1016,10 -1016,9 -1016,9 -994,12 +987,13 @@@@@@@@@@@ static int iommu_dma_map_sg(struct devi
          
                if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
                        ret = iommu_deferred_attach(dev, domain);
------ ---              goto out;
++++++ +++              if (ret)
++++++ +++                      goto out;
                }
          
+++++++++       if (dev_use_swiotlb(dev))
+++++++++               return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
+++++++++ 
                if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                        iommu_dma_sync_sg_for_device(dev, sg, nents, dir);