- qcom,arm-smmu: SMMU is present and ARM SMMU driver is used
- qcom,msm-smmu: SMMU is present and QSMMU driver is used
- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
+- qcom,smmu-fast-map: Boolean context flag to set SMMU to fastpath mode
- ipa_smmu_ap: AP general purpose SMMU device
compatible "qcom,ipa-smmu-ap-cb"
- ipa_smmu_wlan: WDI SMMU device
- clock-names: This property shall contain the clock input names used
by driver in same order as the clocks property.This should be "iface_clk"
+IPA SMMU sub nodes
+
+-compatible: "qcom,ipa-smmu-ap-cb" - represents the AP context bank.
+
+-compatible: "qcom,ipa-smmu-wlan-cb" - represents IPA WLAN context bank.
+
+-compatible: "qcom,ipa-smmu-uc-cb" - represents IPA uC context bank (for uC
+ offload scenarios).
+- iommus : the phandle and stream IDs for the SMMU used by this root
+
+- qcom,iova-mapping: specifies the start address and size of iova space.
+
IPA SMP2P sub nodes
-compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
compatible = "qcom,smp2pgpio-map-ipa-1-in";
gpios = <&smp2pgpio_ipa_1_in 0 0>;
};
+
+ ipa_smmu_ap: ipa_smmu_ap {
+ compatible = "qcom,ipa-smmu-ap-cb";
+ iommus = <&anoc2_smmu 0x30>;
+ qcom,iova-mapping = <0x10000000 0x40000000>;
+ };
+
+ ipa_smmu_wlan: ipa_smmu_wlan {
+ compatible = "qcom,ipa-smmu-wlan-cb";
+ iommus = <&anoc2_smmu 0x31>;
+ };
+
+ ipa_smmu_uc: ipa_smmu_uc {
+ compatible = "qcom,ipa-smmu-uc-cb";
+ iommus = <&anoc2_smmu 0x32>;
+ qcom,iova-mapping = <0x40000000 0x20000000>;
+ };
};
struct ipa_context *ipa_ctx;
static struct device *master_dev;
struct platform_device *ipa_pdev;
-static bool smmu_present;
-static bool arm_smmu;
-static bool smmu_disable_htw;
-static bool smmu_s1_bypass;
+static struct {
+ bool present;
+ bool arm_smmu;
+ bool disable_htw;
+ bool fast_map;
+ bool s1_bypass;
+ u32 ipa_base;
+ u32 ipa_size;
+} smmu_info;
static char *active_clients_table_buf;
return NULL;
}
-struct iommu_domain *ipa_get_uc_smmu_domain(void)
+struct iommu_domain *ipa2_get_uc_smmu_domain(void)
{
- struct iommu_domain *domain = NULL;
-
if (smmu_cb[IPA_SMMU_CB_UC].valid)
- domain = smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
- else
- IPAERR("CB not valid\n");
+ return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+ return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
- return domain;
+ IPAERR("CB not valid\n");
+
+ return NULL;
}
struct device *ipa2_get_dma_dev(void)
}
/**
+ * ipa2_get_smmu_ctx()- Return the smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+
+/**
* ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context
*
* Return value: pointer to smmu context address
return PTR_ERR(ipa_clk);
}
- if (smmu_present && arm_smmu) {
+ if (smmu_info.present && smmu_info.arm_smmu) {
smmu_clk = clk_get(dev, "smmu_clk");
if (IS_ERR(smmu_clk)) {
if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
ipa_ctx->pdev = ipa_dev;
ipa_ctx->uc_pdev = ipa_dev;
- ipa_ctx->smmu_present = smmu_present;
+ ipa_ctx->smmu_present = smmu_info.present;
if (!ipa_ctx->smmu_present)
ipa_ctx->smmu_s1_bypass = true;
else
- ipa_ctx->smmu_s1_bypass = smmu_s1_bypass;
+ ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+ ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
ipa_drv_res->modem_cfg_emb_pipe_flt = false;
ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
- smmu_disable_htw = of_property_read_bool(pdev->dev.of_node,
+ smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
"qcom,smmu-disable-htw");
/* Get IPA HW Version */
ipa_drv_res->ipa_mem_base,
ipa_drv_res->ipa_mem_size);
+ smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+ smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
/* Get IPA BAM address */
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"bam-base");
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
- struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_WLAN];
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
int disable_htw = 1;
int atomic_ctx = 1;
+ int fast = 1;
int bypass = 1;
int ret;
/* assume this failure is because iommu driver is not ready */
return -EPROBE_DEFER;
}
+ cb->valid = true;
- if (smmu_disable_htw) {
+ if (smmu_info.disable_htw) {
ret = iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_COHERENT_HTW_DISABLE,
&disable_htw);
if (ret) {
IPAERR("couldn't disable coherent HTW\n");
+ cb->valid = false;
return -EIO;
}
}
- if (smmu_s1_bypass) {
+ if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
&bypass)) {
return -EIO;
}
IPADBG("SMMU atomic set\n");
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
}
ret = iommu_attach_device(cb->iommu, dev);
return ret;
}
- if (!smmu_s1_bypass) {
+ if (!smmu_info.s1_bypass) {
IPAERR("map IPA region to WLAN_CB IOMMU\n");
- ret = iommu_map(cb->iommu, 0x680000, 0x680000,
- 0x64000,
+ ret = ipa_iommu_map(cb->iommu,
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ roundup(smmu_info.ipa_size, PAGE_SIZE),
IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
if (ret) {
IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n",
ret);
+ arm_iommu_detach_device(cb->dev);
+ cb->valid = false;
return ret;
}
}
- cb->valid = true;
-
return 0;
}
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
- struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_UC];
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
int disable_htw = 1;
+ int atomic_ctx = 1;
int ret;
+ int fast = 1;
int bypass = 1;
+ u32 iova_ap_mapping[2];
- IPADBG("sub pdev=%p\n", dev);
+ IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (ret) {
+ IPAERR("Fail to read UC start/size iova addresses\n");
+ return ret;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
return -EOPNOTSUPP;
}
+ IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
cb->dev = dev;
cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
- IPA_SMMU_UC_VA_START, IPA_SMMU_UC_VA_SIZE);
+ cb->va_start, cb->va_size);
if (IS_ERR(cb->mapping)) {
IPADBG("Fail to create mapping\n");
/* assume this failure is because iommu driver is not ready */
return -EPROBE_DEFER;
}
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
- if (smmu_disable_htw) {
+ IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
+ if (smmu_info.disable_htw) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_COHERENT_HTW_DISABLE,
&disable_htw)) {
IPAERR("couldn't disable coherent HTW\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
return -EIO;
}
}
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
- if (smmu_s1_bypass) {
+ if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
&bypass)) {
return -EIO;
}
IPADBG("SMMU S1 BYPASS\n");
+ } else {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't set domain as atomic\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU atomic set\n");
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
}
IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
return ret;
}
- cb->valid = true;
- cb->next_addr = IPA_SMMU_UC_VA_END;
+ cb->next_addr = cb->va_end;
ipa_ctx->uc_pdev = dev;
IPADBG("UC CB PROBE pdev=%p attached\n", dev);
static int ipa_smmu_ap_cb_probe(struct device *dev)
{
- struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_AP];
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
int result;
int disable_htw = 1;
int atomic_ctx = 1;
+ int fast = 1;
int bypass = 1;
+ u32 iova_ap_mapping[2];
- IPADBG("sub pdev=%p\n", dev);
+ IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+ result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (result) {
+ IPAERR("Fail to read AP start/size iova addresses\n");
+ return result;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
cb->dev = dev;
cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
- IPA_SMMU_AP_VA_START, IPA_SMMU_AP_VA_SIZE);
+ cb->va_start,
+ cb->va_size);
if (IS_ERR(cb->mapping)) {
IPADBG("Fail to create mapping\n");
/* assume this failure is because iommu driver is not ready */
return -EPROBE_DEFER;
}
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
-
- if (smmu_disable_htw) {
+ if (smmu_info.disable_htw) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_COHERENT_HTW_DISABLE,
&disable_htw)) {
IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_detach_device(cb->dev);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
return -EIO;
}
+ IPADBG("SMMU disable HTW\n");
}
- if (smmu_s1_bypass) {
+ if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
&bypass)) {
}
IPADBG("SMMU atomic set\n");
- IPADBG("map IPA region to AP_CB IOMMU\n");
- result = iommu_map(cb->mapping->domain, 0x680000, 0x680000,
- 0x64000,
- IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
- if (result) {
- IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
- result);
- return result;
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
}
+ IPADBG("SMMU fast map set\n");
}
result = arm_iommu_attach_device(cb->dev, cb->mapping);
if (result) {
IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+ cb->valid = false;
return result;
}
- cb->valid = true;
- smmu_present = true;
+ if (!smmu_info.s1_bypass) {
+ IPAERR("map IPA region to AP_CB IOMMU\n");
+ result = ipa_iommu_map(cb->mapping->domain,
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ rounddown(smmu_info.ipa_base, PAGE_SIZE),
+ roundup(smmu_info.ipa_size, PAGE_SIZE),
+ IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+ if (result) {
+ IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
+ result);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return result;
+ }
+ }
+
+ smmu_info.present = true;
if (!bus_scale_table)
bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev);
IPAERR("ipa_init failed\n");
arm_iommu_detach_device(cb->dev);
arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
return result;
}
if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
if (of_property_read_bool(pdev_p->dev.of_node,
"qcom,smmu-s1-bypass"))
- smmu_s1_bypass = true;
- arm_smmu = true;
+ smmu_info.s1_bypass = true;
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,smmu-fast-map"))
+ smmu_info.fast_map = true;
+ smmu_info.arm_smmu = true;
+ pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+ smmu_info.s1_bypass, smmu_info.fast_map);
result = of_platform_populate(pdev_p->dev.of_node,
pdrv_match, NULL, &pdev_p->dev);
} else if (of_property_read_bool(pdev_p->dev.of_node,
return ipa_ctx;
}
+int ipa_iommu_map(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+ struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx();
+ struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx();
+
+ IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+ IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+ /* make sure no overlapping */
+ if (domain == ipa2_get_smmu_domain()) {
+ if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+ IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else if (domain == ipa2_get_wlan_smmu_domain()) {
+ /* wlan is one time map */
+ } else if (domain == ipa2_get_uc_smmu_domain()) {
+ if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+ IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else {
+ IPAERR("Unexpected domain 0x%p\n", domain);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ return iommu_map(domain, iova, paddr, size, prot);
+}
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA HW device driver");
phys_addr_t base;
u32 size;
struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
if (!ipa_ctx->smmu_s1_bypass) {
if (ipa_ctx->peer_bam_map_cnt == 0) {
}
smmu_domain = ipa2_get_smmu_domain();
if (smmu_domain != NULL) {
- if (iommu_map(smmu_domain,
- IPA_SMMU_AP_VA_END,
+ if (ipa_iommu_map(smmu_domain,
+ cb->va_end,
rounddown(base, PAGE_SIZE),
roundup(size + base -
rounddown(base, PAGE_SIZE), PAGE_SIZE),
IOMMU_READ | IOMMU_WRITE |
IOMMU_DEVICE)) {
- IPAERR("Fail to iommu_map\n");
+ IPAERR("Fail to ipa_iommu_map\n");
return -EINVAL;
}
}
- ipa_ctx->peer_bam_iova = IPA_SMMU_AP_VA_END;
+ ipa_ctx->peer_bam_iova = cb->va_end;
ipa_ctx->peer_bam_pa = base;
ipa_ctx->peer_bam_map_size = size;
ipa_ctx->peer_bam_dev = dev;
base = ep->connect.data.iova;
smmu_domain = ipa2_get_smmu_domain();
if (smmu_domain != NULL) {
- if (iommu_map(smmu_domain,
+ if (ipa_iommu_map(smmu_domain,
rounddown(base, PAGE_SIZE),
rounddown(base, PAGE_SIZE),
roundup(ep->connect.data.size + base -
rounddown(base, PAGE_SIZE), PAGE_SIZE),
IOMMU_READ | IOMMU_WRITE)) {
- IPAERR("Fail to iommu_map data FIFO\n");
+ IPAERR("Fail to ipa_iommu_map data FIFO\n");
goto iommu_map_data_fail;
}
}
ep->connect.desc.iova = ep->connect.desc.phys_base;
base = ep->connect.desc.iova;
if (smmu_domain != NULL) {
- if (iommu_map(smmu_domain,
+ if (ipa_iommu_map(smmu_domain,
rounddown(base, PAGE_SIZE),
rounddown(base, PAGE_SIZE),
roundup(ep->connect.desc.size + base -
rounddown(base, PAGE_SIZE), PAGE_SIZE),
IOMMU_READ | IOMMU_WRITE)) {
- IPAERR("Fail to iommu_map desc FIFO\n");
+ IPAERR("Fail to ipa_iommu_map desc FIFO\n");
goto iommu_map_desc_fail;
}
}
{
size_t len;
struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
if (!ipa_ctx->smmu_s1_bypass) {
WARN_ON(dev != ipa_ctx->peer_bam_dev);
smmu_domain = ipa2_get_smmu_domain();
if (smmu_domain != NULL) {
if (iommu_unmap(smmu_domain,
- IPA_SMMU_AP_VA_END, len) != len) {
+ cb->va_end, len) != len) {
IPAERR("Fail to iommu_unmap\n");
return -EINVAL;
}
#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
#define IPA_MEM_PART(x_) (ipa_ctx->ctrl->mem_partition.x_)
-#define IPA_SMMU_AP_VA_START 0x1000
-#define IPA_SMMU_AP_VA_SIZE 0x40000000
-#define IPA_SMMU_AP_VA_END (IPA_SMMU_AP_VA_START + IPA_SMMU_AP_VA_SIZE)
-#define IPA_SMMU_UC_VA_START 0x40000000
-#define IPA_SMMU_UC_VA_SIZE 0x20000000
-#define IPA_SMMU_UC_VA_END (IPA_SMMU_UC_VA_START + IPA_SMMU_UC_VA_SIZE)
-
#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
struct dma_iommu_mapping *mapping;
struct iommu_domain *iommu;
unsigned long next_addr;
+ u32 va_start;
+ u32 va_size;
+ u32 va_end;
};
/**
struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX];
void __iomem *mmio;
u32 ipa_wrapper_base;
+ u32 ipa_wrapper_size;
struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
struct ipa_hdr_tbl hdr_tbl;
struct ipa_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
u32 ipa_get_num_pipes(void);
u32 ipa_get_sys_yellow_wm(void);
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void);
struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void);
struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void);
struct iommu_domain *ipa_get_uc_smmu_domain(void);
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void);
int ipa2_ap_suspend(struct device *dev);
int ipa2_ap_resume(struct device *dev);
struct iommu_domain *ipa2_get_smmu_domain(void);
void ipa_sps_irq_control_all(bool enable);
void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
+int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
#endif /* _IPA_I_H_ */
return -EINVAL;
}
- ret = iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+ ret = ipa_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
true_len,
device ? (prot | IOMMU_DEVICE) : prot);
if (ret) {
phys = page_to_phys(sg_page(sg));
len = PAGE_ALIGN(sg->offset + sg->length);
- ret = iommu_map(cb->mapping->domain, va, phys, len, prot);
+ ret = ipa_iommu_map(cb->mapping->domain, va, phys, len, prot);
if (ret) {
IPAERR("iommu map failed for pa=%pa len=%zu\n",
&phys, len);
}
if (ipa_ctx->wdi_map_cnt == 0)
- cb->next_addr = IPA_SMMU_UC_VA_END;
+ cb->next_addr = cb->va_end;
}
for (i = 0; i < num_buffers; i++) {
IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
&info[i].pa, info[i].iova, info[i].size);
- info[i].result = iommu_map(cb->iommu,
+ info[i].result = ipa_iommu_map(cb->iommu,
rounddown(info[i].iova, PAGE_SIZE),
rounddown(info[i].pa, PAGE_SIZE),
roundup(info[i].size + info[i].pa -
struct ipa3_context *ipa3_ctx;
static struct device *master_dev;
struct platform_device *ipa3_pdev;
-static bool smmu_present;
-static bool arm_smmu;
-static bool smmu_disable_htw;
-static bool smmu_s1_bypass;
+static struct {
+ bool present;
+ bool arm_smmu;
+ bool disable_htw;
+ bool fast_map;
+ bool s1_bypass;
+ u32 ipa_base;
+ u32 ipa_size;
+} smmu_info;
static char *active_clients_table_buf;
struct iommu_domain *ipa3_get_uc_smmu_domain(void)
{
- struct iommu_domain *domain = NULL;
-
if (smmu_cb[IPA_SMMU_CB_UC].valid)
- domain = smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
- else
- IPAERR("CB not valid\n");
+ return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+ IPAERR("CB not valid\n");
+
+ return NULL;
+}
+
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
+{
+ if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+ return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
+
+ IPAERR("CB not valid\n");
- return domain;
+ return NULL;
}
+
struct device *ipa3_get_dma_dev(void)
{
return ipa3_ctx->pdev;
}
/**
+ * ipa3_get_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
+{
+ return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+/**
* ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
*
* Return value: pointer to smmu context address
return PTR_ERR(ipa3_clk);
}
- if (smmu_present && arm_smmu) {
+ if (smmu_info.present && smmu_info.arm_smmu) {
smmu_clk = clk_get(dev, "smmu_clk");
if (IS_ERR(smmu_clk)) {
if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
ipa3_ctx->pdev = ipa_dev;
ipa3_ctx->uc_pdev = ipa_dev;
- ipa3_ctx->smmu_present = smmu_present;
+ ipa3_ctx->smmu_present = smmu_info.present;
if (!ipa3_ctx->smmu_present)
ipa3_ctx->smmu_s1_bypass = true;
else
- ipa3_ctx->smmu_s1_bypass = smmu_s1_bypass;
+ ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+ ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
ipa_drv_res->apply_rg10_wa = false;
ipa_drv_res->gsi_ch20_wa = false;
- smmu_disable_htw = of_property_read_bool(pdev->dev.of_node,
+ smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
"qcom,smmu-disable-htw");
/* Get IPA HW Version */
ipa_drv_res->ipa_mem_base,
ipa_drv_res->ipa_mem_size);
+ smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+ smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
if (ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
/* Get IPA BAM address */
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
- struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_WLAN];
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
int disable_htw = 1;
int atomic_ctx = 1;
+ int fast = 1;
int bypass = 1;
int ret;
/* assume this failure is because iommu driver is not ready */
return -EPROBE_DEFER;
}
+ cb->valid = true;
- if (smmu_disable_htw) {
+ if (smmu_info.disable_htw) {
ret = iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_COHERENT_HTW_DISABLE,
&disable_htw);
if (ret) {
IPAERR("couldn't disable coherent HTW\n");
+ cb->valid = false;
return -EIO;
}
}
- if (smmu_s1_bypass) {
+ if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
&bypass)) {
DOMAIN_ATTR_ATOMIC,
&atomic_ctx)) {
IPAERR("couldn't disable coherent HTW\n");
+ cb->valid = false;
return -EIO;
}
IPADBG("SMMU ATTR ATOMIC\n");
+
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->iommu,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
}
ret = iommu_attach_device(cb->iommu, dev);
return ret;
}
- cb->valid = true;
-
return 0;
}
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
- struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_UC];
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
int disable_htw = 1;
+ int atomic_ctx = 1;
int bypass = 1;
+ int fast = 1;
int ret;
+ u32 iova_ap_mapping[2];
- IPADBG("sub pdev=%p\n", dev);
+ IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (ret) {
+ IPAERR("Fail to read UC start/size iova addresses\n");
+ return ret;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
return -EOPNOTSUPP;
}
+ IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
cb->dev = dev;
cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
- IPA_SMMU_UC_VA_START, IPA_SMMU_UC_VA_SIZE);
+ cb->va_start, cb->va_size);
if (IS_ERR(cb->mapping)) {
IPADBG("Fail to create mapping\n");
/* assume this failure is because iommu driver is not ready */
return -EPROBE_DEFER;
}
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
- if (smmu_disable_htw) {
+ IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
+ if (smmu_info.disable_htw) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_COHERENT_HTW_DISABLE,
&disable_htw)) {
IPAERR("couldn't disable coherent HTW\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
return -EIO;
}
}
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
- if (smmu_s1_bypass) {
+ if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
&bypass)) {
return -EIO;
}
IPADBG("SMMU S1 BYPASS\n");
- }
- IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
+ } else {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ IPAERR("couldn't set domain as atomic\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU atomic set\n");
+ if (smmu_info.fast_map) {
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
+ }
+ }
+ IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
ret = arm_iommu_attach_device(cb->dev, cb->mapping);
if (ret) {
IPAERR("could not attach device ret=%d\n", ret);
return ret;
}
- cb->valid = true;
- cb->next_addr = IPA_SMMU_UC_VA_END;
+ cb->next_addr = cb->va_end;
ipa3_ctx->uc_pdev = dev;
return 0;
static int ipa_smmu_ap_cb_probe(struct device *dev)
{
- struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_AP];
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
int result;
int disable_htw = 1;
int atomic_ctx = 1;
+ int fast = 1;
int bypass = 1;
+ u32 iova_ap_mapping[2];
- IPADBG("sub pdev=%p\n", dev);
+ IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+ result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (result) {
+ IPAERR("Fail to read AP start/size iova addresses\n");
+ return result;
+ }
+ cb->va_start = iova_ap_mapping[0];
+ cb->va_size = iova_ap_mapping[1];
+ cb->va_end = cb->va_start + cb->va_size;
+ IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
cb->dev = dev;
cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
- IPA_SMMU_AP_VA_START, IPA_SMMU_AP_VA_SIZE);
+ cb->va_start, cb->va_size);
if (IS_ERR(cb->mapping)) {
IPADBG("Fail to create mapping\n");
/* assume this failure is because iommu driver is not ready */
return -EPROBE_DEFER;
}
+ IPADBG("SMMU mapping created\n");
+ cb->valid = true;
- if (smmu_disable_htw) {
+ if (smmu_info.disable_htw) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_COHERENT_HTW_DISABLE,
&disable_htw)) {
IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_detach_device(cb->dev);
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
return -EIO;
}
+ IPADBG("SMMU disable HTW\n");
}
- if (smmu_s1_bypass) {
+ if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
&bypass)) {
cb->valid = false;
return -EIO;
}
+ IPADBG("SMMU atomic set\n");
+
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ IPAERR("couldn't set fast map\n");
+ arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
+ return -EIO;
+ }
+ IPADBG("SMMU fast map set\n");
}
result = arm_iommu_attach_device(cb->dev, cb->mapping);
if (result) {
IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+ cb->valid = false;
return result;
}
- cb->valid = true;
- smmu_present = true;
+ smmu_info.present = true;
if (!ipa3_bus_scale_table)
ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
IPAERR("ipa_init failed\n");
arm_iommu_detach_device(cb->dev);
arm_iommu_release_mapping(cb->mapping);
+ cb->valid = false;
return result;
}
if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
if (of_property_read_bool(pdev_p->dev.of_node,
"qcom,smmu-s1-bypass"))
- smmu_s1_bypass = true;
- arm_smmu = true;
+ smmu_info.s1_bypass = true;
+ if (of_property_read_bool(pdev_p->dev.of_node,
+ "qcom,smmu-fast-map"))
+ smmu_info.fast_map = true;
+ smmu_info.arm_smmu = true;
+ pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+ smmu_info.s1_bypass, smmu_info.fast_map);
} else if (of_property_read_bool(pdev_p->dev.of_node,
"qcom,msm-smmu")) {
IPAERR("Legacy IOMMU not supported\n");
return 0;
}
+int ipa3_iommu_map(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+ struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
+ struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
+
+ IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+ IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+ /* make sure no overlapping */
+ if (domain == ipa3_get_smmu_domain()) {
+ if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+ IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else if (domain == ipa3_get_wlan_smmu_domain()) {
+ /* wlan is one time map */
+ } else if (domain == ipa3_get_uc_smmu_domain()) {
+ if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+ IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+ ipa_assert();
+ return -EFAULT;
+ }
+ } else {
+ IPAERR("Unexpected domain 0x%p\n", domain);
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ return iommu_map(domain, iova, paddr, size, prot);
+}
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA HW device driver");
phys_addr_t base;
u32 size;
struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
if (!ipa3_ctx->smmu_s1_bypass) {
if (ipa3_ctx->peer_bam_map_cnt == 0) {
}
smmu_domain = ipa3_get_smmu_domain();
if (smmu_domain != NULL) {
- if (iommu_map(smmu_domain,
- IPA_SMMU_AP_VA_END,
+ if (ipa3_iommu_map(smmu_domain,
+ cb->va_end,
rounddown(base, PAGE_SIZE),
roundup(size + base -
rounddown(base, PAGE_SIZE), PAGE_SIZE),
IOMMU_READ | IOMMU_WRITE |
IOMMU_DEVICE)) {
- IPAERR("Fail to iommu_map\n");
+ IPAERR("Fail to ipa3_iommu_map\n");
return -EINVAL;
}
}
- ipa3_ctx->peer_bam_iova = IPA_SMMU_AP_VA_END;
+ ipa3_ctx->peer_bam_iova = cb->va_end;
ipa3_ctx->peer_bam_pa = base;
ipa3_ctx->peer_bam_map_size = size;
ipa3_ctx->peer_bam_dev = dev;
base = ep->connect.data.iova;
smmu_domain = ipa_get_smmu_domain();
if (smmu_domain != NULL) {
- if (iommu_map(smmu_domain,
+ if (ipa3_iommu_map(smmu_domain,
rounddown(base, PAGE_SIZE),
rounddown(base, PAGE_SIZE),
roundup(ep->connect.data.size + base -
rounddown(base, PAGE_SIZE), PAGE_SIZE),
IOMMU_READ | IOMMU_WRITE)) {
- IPAERR("Fail to iommu_map data FIFO\n");
+ IPAERR("Fail to ipa3_iommu_map data FIFO\n");
goto iommu_map_data_fail;
}
}
ep->connect.desc.iova = ep->connect.desc.phys_base;
base = ep->connect.desc.iova;
if (smmu_domain != NULL) {
- if (iommu_map(smmu_domain,
+ if (ipa3_iommu_map(smmu_domain,
rounddown(base, PAGE_SIZE),
rounddown(base, PAGE_SIZE),
roundup(ep->connect.desc.size + base -
rounddown(base, PAGE_SIZE), PAGE_SIZE),
IOMMU_READ | IOMMU_WRITE)) {
- IPAERR("Fail to iommu_map desc FIFO\n");
+ IPAERR("Fail to ipa3_iommu_map desc FIFO\n");
goto iommu_map_desc_fail;
}
}
{
size_t len;
struct iommu_domain *smmu_domain;
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
if (!ipa3_ctx->smmu_s1_bypass) {
WARN_ON(dev != ipa3_ctx->peer_bam_dev);
smmu_domain = ipa3_get_smmu_domain();
if (smmu_domain != NULL) {
if (iommu_unmap(smmu_domain,
- IPA_SMMU_AP_VA_END, len) != len) {
+ cb->va_end, len) != len) {
IPAERR("Fail to iommu_unmap\n");
return -EINVAL;
}
#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
#define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition.x_)
-#define IPA_SMMU_AP_VA_START 0x1000
-#define IPA_SMMU_AP_VA_SIZE 0x40000000
-#define IPA_SMMU_AP_VA_END (IPA_SMMU_AP_VA_START + IPA_SMMU_AP_VA_SIZE)
-#define IPA_SMMU_UC_VA_START 0x40000000
-#define IPA_SMMU_UC_VA_SIZE 0x20000000
-#define IPA_SMMU_UC_VA_END (IPA_SMMU_UC_VA_START + IPA_SMMU_UC_VA_SIZE)
-
#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10
#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1
struct dma_iommu_mapping *mapping;
struct iommu_domain *iommu;
unsigned long next_addr;
+ u32 va_start;
+ u32 va_size;
+ u32 va_end;
};
/**
struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
void __iomem *mmio;
u32 ipa_wrapper_base;
+ u32 ipa_wrapper_size;
struct ipa3_hdr_tbl hdr_tbl;
struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
u32 ipa3_get_num_pipes(void);
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void);
struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void);
-struct iommu_domain *ipa_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+struct iommu_domain *ipa3_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void);
+int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
int ipa3_ap_suspend(struct device *dev);
int ipa3_ap_resume(struct device *dev);
int ipa3_init_interrupts(void);
return -EINVAL;
}
- ret = iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+ ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
true_len,
device ? (prot | IOMMU_DEVICE) : prot);
if (ret) {
phys = page_to_phys(sg_page(sg));
len = PAGE_ALIGN(sg->offset + sg->length);
- ret = iommu_map(cb->mapping->domain, va, phys, len, prot);
+ ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
if (ret) {
IPAERR("iommu map failed for pa=%pa len=%zu\n",
&phys, len);
}
if (ipa3_ctx->wdi_map_cnt == 0)
- cb->next_addr = IPA_SMMU_UC_VA_END;
+ cb->next_addr = cb->va_end;
}
for (i = 0; i < num_buffers; i++) {
IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
&info[i].pa, info[i].iova, info[i].size);
- info[i].result = iommu_map(cb->iommu,
+ info[i].result = ipa3_iommu_map(cb->iommu,
rounddown(info[i].iova, PAGE_SIZE),
rounddown(info[i].pa, PAGE_SIZE),
roundup(info[i].size + info[i].pa -