OSDN Git Service

msm: ipa: add support for SMMU fastpath
authorSkylar Chang <chiaweic@codeaurora.org>
Thu, 26 May 2016 22:17:06 +0000 (15:17 -0700)
committerKyle Yan <kyan@codeaurora.org>
Fri, 27 May 2016 21:55:38 +0000 (14:55 -0700)
Add support for SMMU fast path configuration to allow
efficient buffer DMA mapping/unmapping.

CRs-Fixed: 1014404
Change-Id: Iaaa373db29d8b53e93ae1d3bf455ee066ed90dfd
Acked-by: Ady Abraham <adya@qti.qualcomm.com>
Signed-off-by: Skylar Chang <chiaweic@codeaurora.org>
Documentation/devicetree/bindings/platform/msm/ipa.txt
drivers/platform/msm/ipa/ipa_v2/ipa.c
drivers/platform/msm/ipa/ipa_v2/ipa_client.c
drivers/platform/msm/ipa/ipa_v2/ipa_i.h
drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
drivers/platform/msm/ipa/ipa_v3/ipa.c
drivers/platform/msm/ipa/ipa_v3/ipa_client.c
drivers/platform/msm/ipa/ipa_v3/ipa_i.h
drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c

index a407b73..3b2883a 100644 (file)
@@ -26,6 +26,7 @@ Optional:
 - qcom,arm-smmu: SMMU is present and ARM SMMU driver is used
 - qcom,msm-smmu: SMMU is present and QSMMU driver is used
 - qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
+- qcom,smmu-fast-map: Boolean context flag to set SMMU to fastpath mode
 - ipa_smmu_ap: AP general purpose SMMU device
        compatible "qcom,ipa-smmu-ap-cb"
 - ipa_smmu_wlan: WDI SMMU device
@@ -102,6 +103,18 @@ Optional properties:
 - clock-names: This property shall contain the clock input names used
     by driver in same order as the clocks property.This should be "iface_clk"
 
+IPA SMMU sub nodes
+
+-compatible: "qcom,ipa-smmu-ap-cb" - represents the AP context bank.
+
+-compatible: "qcom,ipa-smmu-wlan-cb" - represents IPA WLAN context bank.
+
+-compatible: "qcom,ipa-smmu-uc-cb" - represents IPA uC context bank (for uC
+                                       offload scenarios).
+- iommus : the phandle and stream IDs for the SMMU used by this root
+
+- qcom,iova-mapping: specifies the start address and size of iova space.
+
 IPA SMP2P sub nodes
 
 -compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
@@ -174,4 +187,21 @@ qcom,ipa@fd4c0000 {
                compatible = "qcom,smp2pgpio-map-ipa-1-in";
                gpios = <&smp2pgpio_ipa_1_in 0 0>;
        };
+
+       ipa_smmu_ap: ipa_smmu_ap {
+               compatible = "qcom,ipa-smmu-ap-cb";
+               iommus = <&anoc2_smmu 0x30>;
+               qcom,iova-mapping = <0x10000000 0x40000000>;
+       };
+
+       ipa_smmu_wlan: ipa_smmu_wlan {
+               compatible = "qcom,ipa-smmu-wlan-cb";
+               iommus = <&anoc2_smmu 0x31>;
+       };
+
+       ipa_smmu_uc: ipa_smmu_uc {
+               compatible = "qcom,ipa-smmu-uc-cb";
+               iommus = <&anoc2_smmu 0x32>;
+               qcom,iova-mapping = <0x40000000 0x20000000>;
+       };
 };
index feb6225..ae9e2b5 100644 (file)
@@ -200,10 +200,15 @@ static struct clk *ipa_inactivity_clk;
 struct ipa_context *ipa_ctx;
 static struct device *master_dev;
 struct platform_device *ipa_pdev;
-static bool smmu_present;
-static bool arm_smmu;
-static bool smmu_disable_htw;
-static bool smmu_s1_bypass;
+static struct {
+       bool present;
+       bool arm_smmu;
+       bool disable_htw;
+       bool fast_map;
+       bool s1_bypass;
+       u32 ipa_base;
+       u32 ipa_size;
+} smmu_info;
 
 static char *active_clients_table_buf;
 
@@ -382,16 +387,24 @@ struct iommu_domain *ipa2_get_smmu_domain(void)
        return NULL;
 }
 
-struct iommu_domain *ipa_get_uc_smmu_domain(void)
+struct iommu_domain *ipa2_get_uc_smmu_domain(void)
 {
-       struct iommu_domain *domain = NULL;
-
        if (smmu_cb[IPA_SMMU_CB_UC].valid)
-               domain = smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
-       else
-               IPAERR("CB not valid\n");
+               return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+       IPAERR("CB not valid\n");
+
+       return NULL;
+}
+
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void)
+{
+       if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+               return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
 
-       return domain;
+       IPAERR("CB not valid\n");
+
+       return NULL;
 }
 
 struct device *ipa2_get_dma_dev(void)
@@ -400,6 +413,17 @@ struct device *ipa2_get_dma_dev(void)
 }
 
 /**
+ * ipa2_get_smmu_ctx()- Return the smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void)
+{
+       return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+
+/**
  * ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context
  *
  * Return value: pointer to smmu context address
@@ -2701,7 +2725,7 @@ static int ipa_get_clks(struct device *dev)
                return PTR_ERR(ipa_clk);
        }
 
-       if (smmu_present && arm_smmu) {
+       if (smmu_info.present && smmu_info.arm_smmu) {
                smmu_clk = clk_get(dev, "smmu_clk");
                if (IS_ERR(smmu_clk)) {
                        if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
@@ -3572,12 +3596,13 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
 
        ipa_ctx->pdev = ipa_dev;
        ipa_ctx->uc_pdev = ipa_dev;
-       ipa_ctx->smmu_present = smmu_present;
+       ipa_ctx->smmu_present = smmu_info.present;
        if (!ipa_ctx->smmu_present)
                ipa_ctx->smmu_s1_bypass = true;
        else
-               ipa_ctx->smmu_s1_bypass = smmu_s1_bypass;
+               ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
        ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+       ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
        ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
        ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
        ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
@@ -4103,7 +4128,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
        ipa_drv_res->modem_cfg_emb_pipe_flt = false;
        ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
 
-       smmu_disable_htw = of_property_read_bool(pdev->dev.of_node,
+       smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
                        "qcom,smmu-disable-htw");
 
        /* Get IPA HW Version */
@@ -4189,6 +4214,9 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
                        ipa_drv_res->ipa_mem_base,
                        ipa_drv_res->ipa_mem_size);
 
+       smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+       smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
        /* Get IPA BAM address */
        resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                        "bam-base");
@@ -4245,9 +4273,10 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 
 static int ipa_smmu_wlan_cb_probe(struct device *dev)
 {
-       struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_WLAN];
+       struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
        int disable_htw = 1;
        int atomic_ctx = 1;
+       int fast = 1;
        int bypass = 1;
        int ret;
 
@@ -4260,18 +4289,20 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
                /* assume this failure is because iommu driver is not ready */
                return -EPROBE_DEFER;
        }
+       cb->valid = true;
 
-       if (smmu_disable_htw) {
+       if (smmu_info.disable_htw) {
                ret = iommu_domain_set_attr(cb->iommu,
                        DOMAIN_ATTR_COHERENT_HTW_DISABLE,
                        &disable_htw);
                if (ret) {
                        IPAERR("couldn't disable coherent HTW\n");
+                       cb->valid = false;
                        return -EIO;
                }
        }
 
-       if (smmu_s1_bypass) {
+       if (smmu_info.s1_bypass) {
                if (iommu_domain_set_attr(cb->iommu,
                        DOMAIN_ATTR_S1_BYPASS,
                        &bypass)) {
@@ -4289,6 +4320,16 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
                        return -EIO;
                }
                IPADBG("SMMU atomic set\n");
+               if (smmu_info.fast_map) {
+                       if (iommu_domain_set_attr(cb->iommu,
+                               DOMAIN_ATTR_FAST,
+                               &fast)) {
+                               IPAERR("couldn't set fast map\n");
+                               cb->valid = false;
+                               return -EIO;
+                       }
+                       IPADBG("SMMU fast map set\n");
+               }
        }
 
        ret = iommu_attach_device(cb->iommu, dev);
@@ -4298,31 +4339,47 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
                return ret;
        }
 
-       if (!smmu_s1_bypass) {
+       if (!smmu_info.s1_bypass) {
                IPAERR("map IPA region to WLAN_CB IOMMU\n");
-               ret = iommu_map(cb->iommu, 0x680000, 0x680000,
-                       0x64000,
+               ret = ipa_iommu_map(cb->iommu,
+                       rounddown(smmu_info.ipa_base, PAGE_SIZE),
+                       rounddown(smmu_info.ipa_base, PAGE_SIZE),
+                       roundup(smmu_info.ipa_size, PAGE_SIZE),
                        IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
                if (ret) {
                        IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n",
                                ret);
+                       arm_iommu_detach_device(cb->dev);
+                       cb->valid = false;
                        return ret;
                }
        }
 
-       cb->valid = true;
-
        return 0;
 }
 
 static int ipa_smmu_uc_cb_probe(struct device *dev)
 {
-       struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_UC];
+       struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
        int disable_htw = 1;
+       int atomic_ctx = 1;
        int ret;
+       int fast = 1;
        int bypass = 1;
+       u32 iova_ap_mapping[2];
 
-       IPADBG("sub pdev=%p\n", dev);
+       IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+       ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+               iova_ap_mapping, 2);
+       if (ret) {
+               IPAERR("Fail to read UC start/size iova addresses\n");
+               return ret;
+       }
+       cb->va_start = iova_ap_mapping[0];
+       cb->va_size = iova_ap_mapping[1];
+       cb->va_end = cb->va_start + cb->va_size;
+       IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
 
        if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
                    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
@@ -4330,26 +4387,33 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
                return -EOPNOTSUPP;
        }
 
+       IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
        cb->dev = dev;
        cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
-                       IPA_SMMU_UC_VA_START, IPA_SMMU_UC_VA_SIZE);
+                               cb->va_start, cb->va_size);
        if (IS_ERR(cb->mapping)) {
                IPADBG("Fail to create mapping\n");
                /* assume this failure is because iommu driver is not ready */
                return -EPROBE_DEFER;
        }
+       IPADBG("SMMU mapping created\n");
+       cb->valid = true;
 
-       if (smmu_disable_htw) {
+       IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
+       if (smmu_info.disable_htw) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                                DOMAIN_ATTR_COHERENT_HTW_DISABLE,
                                 &disable_htw)) {
                        IPAERR("couldn't disable coherent HTW\n");
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
                        return -EIO;
                }
        }
 
        IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
-       if (smmu_s1_bypass) {
+       if (smmu_info.s1_bypass) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                        DOMAIN_ATTR_S1_BYPASS,
                        &bypass)) {
@@ -4359,6 +4423,27 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
                        return -EIO;
                }
                IPADBG("SMMU S1 BYPASS\n");
+       } else {
+               if (iommu_domain_set_attr(cb->mapping->domain,
+                       DOMAIN_ATTR_ATOMIC,
+                       &atomic_ctx)) {
+                       IPAERR("couldn't set domain as atomic\n");
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
+                       return -EIO;
+               }
+               IPADBG("SMMU atomic set\n");
+               if (smmu_info.fast_map) {
+                       if (iommu_domain_set_attr(cb->mapping->domain,
+                               DOMAIN_ATTR_FAST,
+                               &fast)) {
+                               IPAERR("couldn't set fast map\n");
+                               arm_iommu_release_mapping(cb->mapping);
+                               cb->valid = false;
+                               return -EIO;
+                       }
+                       IPADBG("SMMU fast map set\n");
+               }
        }
 
        IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
@@ -4370,8 +4455,7 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
                return ret;
        }
 
-       cb->valid = true;
-       cb->next_addr = IPA_SMMU_UC_VA_END;
+       cb->next_addr = cb->va_end;
        ipa_ctx->uc_pdev = dev;
 
        IPADBG("UC CB PROBE pdev=%p attached\n", dev);
@@ -4380,13 +4464,26 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
 
 static int ipa_smmu_ap_cb_probe(struct device *dev)
 {
-       struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_AP];
+       struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
        int result;
        int disable_htw = 1;
        int atomic_ctx = 1;
+       int fast = 1;
        int bypass = 1;
+       u32 iova_ap_mapping[2];
 
-       IPADBG("sub pdev=%p\n", dev);
+       IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+       result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+                iova_ap_mapping, 2);
+       if (result) {
+               IPAERR("Fail to read AP start/size iova addresses\n");
+               return result;
+       }
+       cb->va_start = iova_ap_mapping[0];
+       cb->va_size = iova_ap_mapping[1];
+       cb->va_end = cb->va_start + cb->va_size;
+       IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
 
        if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
                    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
@@ -4396,25 +4493,29 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 
        cb->dev = dev;
        cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
-                       IPA_SMMU_AP_VA_START, IPA_SMMU_AP_VA_SIZE);
+                                              cb->va_start,
+                                              cb->va_size);
        if (IS_ERR(cb->mapping)) {
                IPADBG("Fail to create mapping\n");
                /* assume this failure is because iommu driver is not ready */
                return -EPROBE_DEFER;
        }
+       IPADBG("SMMU mapping created\n");
+       cb->valid = true;
 
-
-       if (smmu_disable_htw) {
+       if (smmu_info.disable_htw) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                                DOMAIN_ATTR_COHERENT_HTW_DISABLE,
                                 &disable_htw)) {
                        IPAERR("couldn't disable coherent HTW\n");
-                       arm_iommu_detach_device(cb->dev);
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
                        return -EIO;
                }
+               IPADBG("SMMU disable HTW\n");
        }
 
-       if (smmu_s1_bypass) {
+       if (smmu_info.s1_bypass) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                        DOMAIN_ATTR_S1_BYPASS,
                        &bypass)) {
@@ -4435,25 +4536,41 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
                }
                IPADBG("SMMU atomic set\n");
 
-               IPADBG("map IPA region to AP_CB IOMMU\n");
-               result = iommu_map(cb->mapping->domain, 0x680000, 0x680000,
-                       0x64000,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
-               if (result) {
-                       IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
-                               result);
-                       return result;
+               if (iommu_domain_set_attr(cb->mapping->domain,
+                       DOMAIN_ATTR_FAST,
+                       &fast)) {
+                       IPAERR("couldn't set fast map\n");
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
+                       return -EIO;
                }
+               IPADBG("SMMU fast map set\n");
        }
 
        result = arm_iommu_attach_device(cb->dev, cb->mapping);
        if (result) {
                IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+               cb->valid = false;
                return result;
        }
 
-       cb->valid = true;
-       smmu_present = true;
+       if (!smmu_info.s1_bypass) {
+               IPAERR("map IPA region to AP_CB IOMMU\n");
+               result = ipa_iommu_map(cb->mapping->domain,
+                               rounddown(smmu_info.ipa_base, PAGE_SIZE),
+                               rounddown(smmu_info.ipa_base, PAGE_SIZE),
+                               roundup(smmu_info.ipa_size, PAGE_SIZE),
+                               IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+               if (result) {
+                       IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
+                               result);
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
+                       return result;
+               }
+       }
+
+       smmu_info.present = true;
 
        if (!bus_scale_table)
                bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev);
@@ -4464,6 +4581,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
                IPAERR("ipa_init failed\n");
                arm_iommu_detach_device(cb->dev);
                arm_iommu_release_mapping(cb->mapping);
+               cb->valid = false;
                return result;
        }
 
@@ -4506,8 +4624,13 @@ int ipa_plat_drv_probe(struct platform_device *pdev_p,
        if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
                if (of_property_read_bool(pdev_p->dev.of_node,
                    "qcom,smmu-s1-bypass"))
-                       smmu_s1_bypass = true;
-               arm_smmu = true;
+                       smmu_info.s1_bypass = true;
+               if (of_property_read_bool(pdev_p->dev.of_node,
+                   "qcom,smmu-fast-map"))
+                       smmu_info.fast_map = true;
+               smmu_info.arm_smmu = true;
+               pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+                       smmu_info.s1_bypass, smmu_info.fast_map);
                result = of_platform_populate(pdev_p->dev.of_node,
                                pdrv_match, NULL, &pdev_p->dev);
        } else if (of_property_read_bool(pdev_p->dev.of_node,
@@ -4589,6 +4712,39 @@ struct ipa_context *ipa_get_ctx(void)
        return ipa_ctx;
 }
 
+int ipa_iommu_map(struct iommu_domain *domain,
+       unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+       struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx();
+       struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx();
+
+       IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+       IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+       /* make sure no overlapping */
+       if (domain == ipa2_get_smmu_domain()) {
+               if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+                       IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+                       ipa_assert();
+                       return -EFAULT;
+               }
+       } else if (domain == ipa2_get_wlan_smmu_domain()) {
+               /* wlan is one time map */
+       } else if (domain == ipa2_get_uc_smmu_domain()) {
+               if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+                       IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+                       ipa_assert();
+                       return -EFAULT;
+               }
+       } else {
+               IPAERR("Unexpected domain 0x%p\n", domain);
+               ipa_assert();
+               return -EFAULT;
+       }
+
+       return iommu_map(domain, iova, paddr, size, prot);
+}
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("IPA HW device driver");
 
index d7bbd0a..64246ac 100644 (file)
@@ -100,6 +100,7 @@ static int ipa2_smmu_map_peer_bam(unsigned long dev)
        phys_addr_t base;
        u32 size;
        struct iommu_domain *smmu_domain;
+       struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
 
        if (!ipa_ctx->smmu_s1_bypass) {
                if (ipa_ctx->peer_bam_map_cnt == 0) {
@@ -109,19 +110,19 @@ static int ipa2_smmu_map_peer_bam(unsigned long dev)
                        }
                        smmu_domain = ipa2_get_smmu_domain();
                        if (smmu_domain != NULL) {
-                               if (iommu_map(smmu_domain,
-                                       IPA_SMMU_AP_VA_END,
+                               if (ipa_iommu_map(smmu_domain,
+                                       cb->va_end,
                                        rounddown(base, PAGE_SIZE),
                                        roundup(size + base -
                                        rounddown(base, PAGE_SIZE), PAGE_SIZE),
                                        IOMMU_READ | IOMMU_WRITE |
                                        IOMMU_DEVICE)) {
-                                       IPAERR("Fail to iommu_map\n");
+                                       IPAERR("Fail to ipa_iommu_map\n");
                                        return -EINVAL;
                                }
                        }
 
-                       ipa_ctx->peer_bam_iova = IPA_SMMU_AP_VA_END;
+                       ipa_ctx->peer_bam_iova = cb->va_end;
                        ipa_ctx->peer_bam_pa = base;
                        ipa_ctx->peer_bam_map_size = size;
                        ipa_ctx->peer_bam_dev = dev;
@@ -381,26 +382,26 @@ int ipa2_connect(const struct ipa_connect_params *in,
                base = ep->connect.data.iova;
                smmu_domain = ipa2_get_smmu_domain();
                if (smmu_domain != NULL) {
-                       if (iommu_map(smmu_domain,
+                       if (ipa_iommu_map(smmu_domain,
                                rounddown(base, PAGE_SIZE),
                                rounddown(base, PAGE_SIZE),
                                roundup(ep->connect.data.size + base -
                                        rounddown(base, PAGE_SIZE), PAGE_SIZE),
                                IOMMU_READ | IOMMU_WRITE)) {
-                               IPAERR("Fail to iommu_map data FIFO\n");
+                               IPAERR("Fail to ipa_iommu_map data FIFO\n");
                                goto iommu_map_data_fail;
                        }
                }
                ep->connect.desc.iova = ep->connect.desc.phys_base;
                base = ep->connect.desc.iova;
                if (smmu_domain != NULL) {
-                       if (iommu_map(smmu_domain,
+                       if (ipa_iommu_map(smmu_domain,
                                rounddown(base, PAGE_SIZE),
                                rounddown(base, PAGE_SIZE),
                                roundup(ep->connect.desc.size + base -
                                        rounddown(base, PAGE_SIZE), PAGE_SIZE),
                                IOMMU_READ | IOMMU_WRITE)) {
-                               IPAERR("Fail to iommu_map desc FIFO\n");
+                               IPAERR("Fail to ipa_iommu_map desc FIFO\n");
                                goto iommu_map_desc_fail;
                        }
                }
@@ -495,6 +496,7 @@ static int ipa2_smmu_unmap_peer_bam(unsigned long dev)
 {
        size_t len;
        struct iommu_domain *smmu_domain;
+       struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
 
        if (!ipa_ctx->smmu_s1_bypass) {
                WARN_ON(dev != ipa_ctx->peer_bam_dev);
@@ -507,7 +509,7 @@ static int ipa2_smmu_unmap_peer_bam(unsigned long dev)
                        smmu_domain = ipa2_get_smmu_domain();
                        if (smmu_domain != NULL) {
                                if (iommu_unmap(smmu_domain,
-                                       IPA_SMMU_AP_VA_END, len) != len) {
+                                       cb->va_end, len) != len) {
                                        IPAERR("Fail to iommu_unmap\n");
                                        return -EINVAL;
                                }
index 178ca5a..1c4468d 100644 (file)
 #define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
 #define IPA_MEM_PART(x_) (ipa_ctx->ctrl->mem_partition.x_)
 
-#define IPA_SMMU_AP_VA_START 0x1000
-#define IPA_SMMU_AP_VA_SIZE 0x40000000
-#define IPA_SMMU_AP_VA_END (IPA_SMMU_AP_VA_START +  IPA_SMMU_AP_VA_SIZE)
-#define IPA_SMMU_UC_VA_START 0x40000000
-#define IPA_SMMU_UC_VA_SIZE 0x20000000
-#define IPA_SMMU_UC_VA_END (IPA_SMMU_UC_VA_START +  IPA_SMMU_UC_VA_SIZE)
-
 #define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
 #define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
 #define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
@@ -181,6 +174,9 @@ struct ipa_smmu_cb_ctx {
        struct dma_iommu_mapping *mapping;
        struct iommu_domain *iommu;
        unsigned long next_addr;
+       u32 va_start;
+       u32 va_size;
+       u32 va_end;
 };
 
 /**
@@ -1155,6 +1151,7 @@ struct ipa_context {
        struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX];
        void __iomem *mmio;
        u32 ipa_wrapper_base;
+       u32 ipa_wrapper_size;
        struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
        struct ipa_hdr_tbl hdr_tbl;
        struct ipa_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
@@ -1923,9 +1920,11 @@ int ipa2_uc_mhi_print_stats(char *dbg_buff, int size);
 int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
 u32 ipa_get_num_pipes(void);
 u32 ipa_get_sys_yellow_wm(void);
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void);
 struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void);
 struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void);
 struct iommu_domain *ipa_get_uc_smmu_domain(void);
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void);
 int ipa2_ap_suspend(struct device *dev);
 int ipa2_ap_resume(struct device *dev);
 struct iommu_domain *ipa2_get_smmu_domain(void);
@@ -1940,4 +1939,6 @@ int ipa2_restore_suspend_handler(void);
 void ipa_sps_irq_control_all(bool enable);
 void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
 void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
+int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
+       phys_addr_t paddr, size_t size, int prot);
 #endif /* _IPA_I_H_ */
index 8ba4f04..a45b51a 100644 (file)
@@ -484,7 +484,7 @@ static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
                return -EINVAL;
        }
 
-       ret = iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+       ret = ipa_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
                        true_len,
                        device ? (prot | IOMMU_DEVICE) : prot);
        if (ret) {
@@ -525,7 +525,7 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
                phys = page_to_phys(sg_page(sg));
                len = PAGE_ALIGN(sg->offset + sg->length);
 
-               ret = iommu_map(cb->mapping->domain, va, phys, len, prot);
+               ret = ipa_iommu_map(cb->mapping->domain, va, phys, len, prot);
                if (ret) {
                        IPAERR("iommu map failed for pa=%pa len=%zu\n",
                                        &phys, len);
@@ -577,7 +577,7 @@ static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
        }
 
        if (ipa_ctx->wdi_map_cnt == 0)
-               cb->next_addr = IPA_SMMU_UC_VA_END;
+               cb->next_addr = cb->va_end;
 
 }
 
@@ -1574,7 +1574,7 @@ int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
        for (i = 0; i < num_buffers; i++) {
                IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
                        &info[i].pa, info[i].iova, info[i].size);
-               info[i].result = iommu_map(cb->iommu,
+               info[i].result = ipa_iommu_map(cb->iommu,
                        rounddown(info[i].iova, PAGE_SIZE),
                        rounddown(info[i].pa, PAGE_SIZE),
                        roundup(info[i].size + info[i].pa -
index 535ed4d..f19853e 100644 (file)
@@ -211,10 +211,15 @@ static struct clk *smmu_clk;
 struct ipa3_context *ipa3_ctx;
 static struct device *master_dev;
 struct platform_device *ipa3_pdev;
-static bool smmu_present;
-static bool arm_smmu;
-static bool smmu_disable_htw;
-static bool smmu_s1_bypass;
+static struct {
+       bool present;
+       bool arm_smmu;
+       bool disable_htw;
+       bool fast_map;
+       bool s1_bypass;
+       u32 ipa_base;
+       u32 ipa_size;
+} smmu_info;
 
 static char *active_clients_table_buf;
 
@@ -396,22 +401,41 @@ struct iommu_domain *ipa3_get_smmu_domain(void)
 
 struct iommu_domain *ipa3_get_uc_smmu_domain(void)
 {
-       struct iommu_domain *domain = NULL;
-
        if (smmu_cb[IPA_SMMU_CB_UC].valid)
-               domain = smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
-       else
-               IPAERR("CB not valid\n");
+               return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+       IPAERR("CB not valid\n");
+
+       return NULL;
+}
+
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
+{
+       if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+               return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
+
+       IPAERR("CB not valid\n");
 
-       return domain;
+       return NULL;
 }
 
+
 struct device *ipa3_get_dma_dev(void)
 {
        return ipa3_ctx->pdev;
 }
 
 /**
+ * ipa3_get_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
+{
+       return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+/**
  * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
  *
  * Return value: pointer to smmu context address
@@ -2927,7 +2951,7 @@ static int ipa3_get_clks(struct device *dev)
                return PTR_ERR(ipa3_clk);
        }
 
-       if (smmu_present && arm_smmu) {
+       if (smmu_info.present && smmu_info.arm_smmu) {
                smmu_clk = clk_get(dev, "smmu_clk");
                if (IS_ERR(smmu_clk)) {
                        if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
@@ -3975,12 +3999,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 
        ipa3_ctx->pdev = ipa_dev;
        ipa3_ctx->uc_pdev = ipa_dev;
-       ipa3_ctx->smmu_present = smmu_present;
+       ipa3_ctx->smmu_present = smmu_info.present;
        if (!ipa3_ctx->smmu_present)
                ipa3_ctx->smmu_s1_bypass = true;
        else
-               ipa3_ctx->smmu_s1_bypass = smmu_s1_bypass;
+               ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
        ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+       ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
        ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
        ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
        ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
@@ -4494,7 +4519,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
        ipa_drv_res->apply_rg10_wa = false;
        ipa_drv_res->gsi_ch20_wa = false;
 
-       smmu_disable_htw = of_property_read_bool(pdev->dev.of_node,
+       smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
                        "qcom,smmu-disable-htw");
 
        /* Get IPA HW Version */
@@ -4595,6 +4620,9 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
                        ipa_drv_res->ipa_mem_base,
                        ipa_drv_res->ipa_mem_size);
 
+       smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+       smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
        if (ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
                /* Get IPA BAM address */
                resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -4690,9 +4718,10 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 
 static int ipa_smmu_wlan_cb_probe(struct device *dev)
 {
-       struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_WLAN];
+       struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
        int disable_htw = 1;
        int atomic_ctx = 1;
+       int fast = 1;
        int bypass = 1;
        int ret;
 
@@ -4705,18 +4734,20 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
                /* assume this failure is because iommu driver is not ready */
                return -EPROBE_DEFER;
        }
+       cb->valid = true;
 
-       if (smmu_disable_htw) {
+       if (smmu_info.disable_htw) {
                ret = iommu_domain_set_attr(cb->iommu,
                        DOMAIN_ATTR_COHERENT_HTW_DISABLE,
                        &disable_htw);
                if (ret) {
                        IPAERR("couldn't disable coherent HTW\n");
+                       cb->valid = false;
                        return -EIO;
                }
        }
 
-       if (smmu_s1_bypass) {
+       if (smmu_info.s1_bypass) {
                if (iommu_domain_set_attr(cb->iommu,
                                        DOMAIN_ATTR_S1_BYPASS,
                                        &bypass)) {
@@ -4730,9 +4761,21 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
                                        DOMAIN_ATTR_ATOMIC,
                                        &atomic_ctx)) {
                        IPAERR("couldn't disable coherent HTW\n");
+                       cb->valid = false;
                        return -EIO;
                }
                IPADBG("SMMU ATTR ATOMIC\n");
+
+               if (smmu_info.fast_map) {
+                       if (iommu_domain_set_attr(cb->iommu,
+                                               DOMAIN_ATTR_FAST,
+                                               &fast)) {
+                               IPAERR("couldn't set fast map\n");
+                               cb->valid = false;
+                               return -EIO;
+                       }
+                       IPADBG("SMMU fast map set\n");
+               }
        }
 
        ret = iommu_attach_device(cb->iommu, dev);
@@ -4742,19 +4785,31 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
                return ret;
        }
 
-       cb->valid = true;
-
        return 0;
 }
 
 static int ipa_smmu_uc_cb_probe(struct device *dev)
 {
-       struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_UC];
+       struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
        int disable_htw = 1;
+       int atomic_ctx = 1;
        int bypass = 1;
+       int fast = 1;
        int ret;
+       u32 iova_ap_mapping[2];
 
-       IPADBG("sub pdev=%p\n", dev);
+       IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+       ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+                       iova_ap_mapping, 2);
+       if (ret) {
+               IPAERR("Fail to read UC start/size iova addresses\n");
+               return ret;
+       }
+       cb->va_start = iova_ap_mapping[0];
+       cb->va_size = iova_ap_mapping[1];
+       cb->va_end = cb->va_start + cb->va_size;
+       IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
 
        if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
                    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
@@ -4762,26 +4817,33 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
                return -EOPNOTSUPP;
        }
 
+       IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
        cb->dev = dev;
        cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
-                       IPA_SMMU_UC_VA_START, IPA_SMMU_UC_VA_SIZE);
+                       cb->va_start, cb->va_size);
        if (IS_ERR(cb->mapping)) {
                IPADBG("Fail to create mapping\n");
                /* assume this failure is because iommu driver is not ready */
                return -EPROBE_DEFER;
        }
+       IPADBG("SMMU mapping created\n");
+       cb->valid = true;
 
-       if (smmu_disable_htw) {
+       IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
+       if (smmu_info.disable_htw) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                                DOMAIN_ATTR_COHERENT_HTW_DISABLE,
                                 &disable_htw)) {
                        IPAERR("couldn't disable coherent HTW\n");
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
                        return -EIO;
                }
        }
 
        IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
-       if (smmu_s1_bypass) {
+       if (smmu_info.s1_bypass) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                                DOMAIN_ATTR_S1_BYPASS,
                                &bypass)) {
@@ -4791,10 +4853,31 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
                        return -EIO;
                }
                IPADBG("SMMU S1 BYPASS\n");
-       }
-       IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
+       } else {
+               if (iommu_domain_set_attr(cb->mapping->domain,
+                               DOMAIN_ATTR_ATOMIC,
+                               &atomic_ctx)) {
+                       IPAERR("couldn't set domain as atomic\n");
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
+                       return -EIO;
+               }
+               IPADBG("SMMU atomic set\n");
 
+               if (smmu_info.fast_map) {
+                       if (iommu_domain_set_attr(cb->mapping->domain,
+                                       DOMAIN_ATTR_FAST,
+                                       &fast)) {
+                               IPAERR("couldn't set fast map\n");
+                               arm_iommu_release_mapping(cb->mapping);
+                               cb->valid = false;
+                               return -EIO;
+                       }
+                       IPADBG("SMMU fast map set\n");
+               }
+       }
 
+       IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
        ret = arm_iommu_attach_device(cb->dev, cb->mapping);
        if (ret) {
                IPAERR("could not attach device ret=%d\n", ret);
@@ -4803,8 +4886,7 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
                return ret;
        }
 
-       cb->valid = true;
-       cb->next_addr = IPA_SMMU_UC_VA_END;
+       cb->next_addr = cb->va_end;
        ipa3_ctx->uc_pdev = dev;
 
        return 0;
@@ -4812,13 +4894,26 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
 
 static int ipa_smmu_ap_cb_probe(struct device *dev)
 {
-       struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_AP];
+       struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
        int result;
        int disable_htw = 1;
        int atomic_ctx = 1;
+       int fast = 1;
        int bypass = 1;
+       u32 iova_ap_mapping[2];
 
-       IPADBG("sub pdev=%p\n", dev);
+       IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+       result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+               iova_ap_mapping, 2);
+       if (result) {
+               IPAERR("Fail to read AP start/size iova addresses\n");
+               return result;
+       }
+       cb->va_start = iova_ap_mapping[0];
+       cb->va_size = iova_ap_mapping[1];
+       cb->va_end = cb->va_start + cb->va_size;
+       IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
 
        if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
                    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
@@ -4828,23 +4923,27 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 
        cb->dev = dev;
        cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
-                       IPA_SMMU_AP_VA_START, IPA_SMMU_AP_VA_SIZE);
+                                       cb->va_start, cb->va_size);
        if (IS_ERR(cb->mapping)) {
                IPADBG("Fail to create mapping\n");
                /* assume this failure is because iommu driver is not ready */
                return -EPROBE_DEFER;
        }
+       IPADBG("SMMU mapping created\n");
+       cb->valid = true;
 
-       if (smmu_disable_htw) {
+       if (smmu_info.disable_htw) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                                DOMAIN_ATTR_COHERENT_HTW_DISABLE,
                                 &disable_htw)) {
                        IPAERR("couldn't disable coherent HTW\n");
-                       arm_iommu_detach_device(cb->dev);
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
                        return -EIO;
                }
+               IPADBG("SMMU disable HTW\n");
        }
-       if (smmu_s1_bypass) {
+       if (smmu_info.s1_bypass) {
                if (iommu_domain_set_attr(cb->mapping->domain,
                                DOMAIN_ATTR_S1_BYPASS,
                                &bypass)) {
@@ -4863,16 +4962,27 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
                        cb->valid = false;
                        return -EIO;
                }
+               IPADBG("SMMU atomic set\n");
+
+               if (iommu_domain_set_attr(cb->mapping->domain,
+                               DOMAIN_ATTR_FAST,
+                               &fast)) {
+                       IPAERR("couldn't set fast map\n");
+                       arm_iommu_release_mapping(cb->mapping);
+                       cb->valid = false;
+                       return -EIO;
+               }
+               IPADBG("SMMU fast map set\n");
        }
 
        result = arm_iommu_attach_device(cb->dev, cb->mapping);
        if (result) {
                IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+               cb->valid = false;
                return result;
        }
 
-       cb->valid = true;
-       smmu_present = true;
+       smmu_info.present = true;
 
        if (!ipa3_bus_scale_table)
                ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
@@ -4883,6 +4993,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
                IPAERR("ipa_init failed\n");
                arm_iommu_detach_device(cb->dev);
                arm_iommu_release_mapping(cb->mapping);
+               cb->valid = false;
                return result;
        }
 
@@ -5001,8 +5112,13 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p,
        if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
                if (of_property_read_bool(pdev_p->dev.of_node,
                    "qcom,smmu-s1-bypass"))
-                       smmu_s1_bypass = true;
-               arm_smmu = true;
+                       smmu_info.s1_bypass = true;
+               if (of_property_read_bool(pdev_p->dev.of_node,
+                       "qcom,smmu-fast-map"))
+                       smmu_info.fast_map = true;
+               smmu_info.arm_smmu = true;
+               pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+                       smmu_info.s1_bypass, smmu_info.fast_map);
        } else if (of_property_read_bool(pdev_p->dev.of_node,
                                "qcom,msm-smmu")) {
                IPAERR("Legacy IOMMU not supported\n");
@@ -5233,5 +5349,38 @@ int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
        return 0;
 }
 
+int ipa3_iommu_map(struct iommu_domain *domain,
+       unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+       struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
+       struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
+
+       IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+       IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+       /* make sure no overlapping */
+       if (domain == ipa3_get_smmu_domain()) {
+               if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+                       IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+                       ipa_assert();
+                       return -EFAULT;
+               }
+       } else if (domain == ipa3_get_wlan_smmu_domain()) {
+               /* wlan is one time map */
+       } else if (domain == ipa3_get_uc_smmu_domain()) {
+               if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+                       IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+                       ipa_assert();
+                       return -EFAULT;
+               }
+       } else {
+               IPAERR("Unexpected domain 0x%p\n", domain);
+               ipa_assert();
+               return -EFAULT;
+       }
+
+       return iommu_map(domain, iova, paddr, size, prot);
+}
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("IPA HW device driver");
index 2a48b2b..7b0376e 100644 (file)
@@ -123,6 +123,7 @@ static int ipa3_smmu_map_peer_bam(unsigned long dev)
        phys_addr_t base;
        u32 size;
        struct iommu_domain *smmu_domain;
+       struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
 
        if (!ipa3_ctx->smmu_s1_bypass) {
                if (ipa3_ctx->peer_bam_map_cnt == 0) {
@@ -132,19 +133,19 @@ static int ipa3_smmu_map_peer_bam(unsigned long dev)
                        }
                        smmu_domain = ipa3_get_smmu_domain();
                        if (smmu_domain != NULL) {
-                               if (iommu_map(smmu_domain,
-                                       IPA_SMMU_AP_VA_END,
+                               if (ipa3_iommu_map(smmu_domain,
+                                       cb->va_end,
                                        rounddown(base, PAGE_SIZE),
                                        roundup(size + base -
                                        rounddown(base, PAGE_SIZE), PAGE_SIZE),
                                        IOMMU_READ | IOMMU_WRITE |
                                        IOMMU_DEVICE)) {
-                                       IPAERR("Fail to iommu_map\n");
+                                       IPAERR("Fail to ipa3_iommu_map\n");
                                        return -EINVAL;
                                }
                        }
 
-                       ipa3_ctx->peer_bam_iova = IPA_SMMU_AP_VA_END;
+                       ipa3_ctx->peer_bam_iova = cb->va_end;
                        ipa3_ctx->peer_bam_pa = base;
                        ipa3_ctx->peer_bam_map_size = size;
                        ipa3_ctx->peer_bam_dev = dev;
@@ -393,26 +394,26 @@ int ipa3_connect(const struct ipa_connect_params *in,
                base = ep->connect.data.iova;
                smmu_domain = ipa_get_smmu_domain();
                if (smmu_domain != NULL) {
-                       if (iommu_map(smmu_domain,
+                       if (ipa3_iommu_map(smmu_domain,
                                rounddown(base, PAGE_SIZE),
                                rounddown(base, PAGE_SIZE),
                                roundup(ep->connect.data.size + base -
                                        rounddown(base, PAGE_SIZE), PAGE_SIZE),
                                IOMMU_READ | IOMMU_WRITE)) {
-                               IPAERR("Fail to iommu_map data FIFO\n");
+                               IPAERR("Fail to ipa3_iommu_map data FIFO\n");
                                goto iommu_map_data_fail;
                        }
                }
                ep->connect.desc.iova = ep->connect.desc.phys_base;
                base = ep->connect.desc.iova;
                if (smmu_domain != NULL) {
-                       if (iommu_map(smmu_domain,
+                       if (ipa3_iommu_map(smmu_domain,
                                rounddown(base, PAGE_SIZE),
                                rounddown(base, PAGE_SIZE),
                                roundup(ep->connect.desc.size + base -
                                        rounddown(base, PAGE_SIZE), PAGE_SIZE),
                                IOMMU_READ | IOMMU_WRITE)) {
-                               IPAERR("Fail to iommu_map desc FIFO\n");
+                               IPAERR("Fail to ipa3_iommu_map desc FIFO\n");
                                goto iommu_map_desc_fail;
                        }
                }
@@ -504,6 +505,7 @@ static int ipa3_smmu_unmap_peer_bam(unsigned long dev)
 {
        size_t len;
        struct iommu_domain *smmu_domain;
+       struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
 
        if (!ipa3_ctx->smmu_s1_bypass) {
                WARN_ON(dev != ipa3_ctx->peer_bam_dev);
@@ -516,7 +518,7 @@ static int ipa3_smmu_unmap_peer_bam(unsigned long dev)
                        smmu_domain = ipa3_get_smmu_domain();
                        if (smmu_domain != NULL) {
                                if (iommu_unmap(smmu_domain,
-                                       IPA_SMMU_AP_VA_END, len) != len) {
+                                       cb->va_end, len) != len) {
                                        IPAERR("Fail to iommu_unmap\n");
                                        return -EINVAL;
                                }
index 47dfead..62f0c7f 100644 (file)
 #define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
 #define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition.x_)
 
-#define IPA_SMMU_AP_VA_START 0x1000
-#define IPA_SMMU_AP_VA_SIZE 0x40000000
-#define IPA_SMMU_AP_VA_END (IPA_SMMU_AP_VA_START +  IPA_SMMU_AP_VA_SIZE)
-#define IPA_SMMU_UC_VA_START 0x40000000
-#define IPA_SMMU_UC_VA_SIZE 0x20000000
-#define IPA_SMMU_UC_VA_END (IPA_SMMU_UC_VA_START +  IPA_SMMU_UC_VA_SIZE)
-
 #define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10
 #define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1
 
@@ -239,6 +232,9 @@ struct ipa_smmu_cb_ctx {
        struct dma_iommu_mapping *mapping;
        struct iommu_domain *iommu;
        unsigned long next_addr;
+       u32 va_start;
+       u32 va_size;
+       u32 va_end;
 };
 
 /**
@@ -1376,6 +1372,7 @@ struct ipa3_context {
        struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
        void __iomem *mmio;
        u32 ipa_wrapper_base;
+       u32 ipa_wrapper_size;
        struct ipa3_hdr_tbl hdr_tbl;
        struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
        struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
@@ -2151,9 +2148,14 @@ struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx);
 void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
 
 u32 ipa3_get_num_pipes(void);
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void);
 struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
 struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void);
-struct iommu_domain *ipa_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+struct iommu_domain *ipa3_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void);
+int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova,
+       phys_addr_t paddr, size_t size, int prot);
 int ipa3_ap_suspend(struct device *dev);
 int ipa3_ap_resume(struct device *dev);
 int ipa3_init_interrupts(void);
index 393ae2a..1caccdd 100644 (file)
@@ -509,7 +509,7 @@ static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
                return -EINVAL;
        }
 
-       ret = iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+       ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
                        true_len,
                        device ? (prot | IOMMU_DEVICE) : prot);
        if (ret) {
@@ -550,7 +550,7 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
                phys = page_to_phys(sg_page(sg));
                len = PAGE_ALIGN(sg->offset + sg->length);
 
-               ret = iommu_map(cb->mapping->domain, va, phys, len, prot);
+               ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
                if (ret) {
                        IPAERR("iommu map failed for pa=%pa len=%zu\n",
                                        &phys, len);
@@ -605,7 +605,7 @@ static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
        }
 
        if (ipa3_ctx->wdi_map_cnt == 0)
-               cb->next_addr = IPA_SMMU_UC_VA_END;
+               cb->next_addr = cb->va_end;
 
 }
 
@@ -1761,7 +1761,7 @@ int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
        for (i = 0; i < num_buffers; i++) {
                IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
                        &info[i].pa, info[i].iova, info[i].size);
-               info[i].result = iommu_map(cb->iommu,
+               info[i].result = ipa3_iommu_map(cb->iommu,
                        rounddown(info[i].iova, PAGE_SIZE),
                        rounddown(info[i].pa, PAGE_SIZE),
                        roundup(info[i].size + info[i].pa -