1 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include <linux/genalloc.h> /* gen_pool_alloc() */
16 #include <linux/ratelimit.h>
17 #include <linux/msm-bus.h>
18 #include <linux/msm-bus-board.h>
19 #include <linux/msm_gsi.h>
20 #include <linux/elf.h>
22 #include "ipahal/ipahal.h"
23 #include "ipahal/ipahal_fltrt.h"
24 #include "../ipa_rm_i.h"
26 #define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
27 #define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
28 #define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
29 #define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
31 #define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
32 #define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
34 #define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
35 #define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
37 /* Max pipes + ICs for TAG process */
38 #define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6)
40 #define IPA_TAG_SLEEP_MIN_USEC (1000)
41 #define IPA_TAG_SLEEP_MAX_USEC (2000)
42 #define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
43 #define IPA_BCR_REG_VAL_v3_0 (0x00000001)
44 #define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
45 #define IPA_AGGR_GRAN_MIN (1)
46 #define IPA_AGGR_GRAN_MAX (32)
47 #define IPA_EOT_COAL_GRAN_MIN (1)
48 #define IPA_EOT_COAL_GRAN_MAX (16)
50 #define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15)
52 #define IPA_AGGR_BYTE_LIMIT (\
53 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
54 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
55 #define IPA_AGGR_PKT_LIMIT (\
56 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
57 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
59 /* In IPAv3 only endpoints 0-3 can be configured to deaggregation */
60 #define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3)
62 /* configure IPA spare register 1 in order to have correct IPA version
63 * set bits 0,2,3 and 4. see SpareBits documentation.xlsx
65 #define IPA_SPARE_REG_1_VAL (0x0000081D)
68 /* HPS, DPS sequencers Types*/
69 #define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY 0x00000000
70 /* DMA + DECIPHER/CIPHER */
71 #define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011
72 /* Packet Processing + no decipher + uCP (for Ethernet Bridging) */
73 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002
74 /* Packet Processing + decipher + uCP */
75 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013
76 /* 2 Packet Processing pass + no decipher + uCP */
77 #define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004
78 /* 2 Packet Processing pass + decipher + uCP */
79 #define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015
80 /* Packet Processing + no decipher + no uCP */
81 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006
82 /* Packet Processing + no decipher + no uCP */
83 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017
85 #define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020
86 /* Invalid sequencer type */
87 #define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF
89 #define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \
90 (seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \
91 seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \
92 seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP)
94 #define QMB_MASTER_SELECT_DDR (0)
95 #define QMB_MASTER_SELECT_PCIE (1)
97 #define IPA_CLIENT_NOT_USED \
98 {IPA_EP_NOT_ALLOCATED, IPA_EP_NOT_ALLOCATED, false, \
99 IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR}
101 /* Resource Group index*/
102 #define IPA_GROUP_UL (0)
103 #define IPA_GROUP_DL (1)
104 #define IPA_GROUP_DPL IPA_GROUP_DL
105 #define IPA_GROUP_DIAG (2)
106 #define IPA_GROUP_DMA (3)
107 #define IPA_GROUP_IMM_CMD IPA_GROUP_UL
108 #define IPA_GROUP_Q6ZIP (4)
109 #define IPA_GROUP_Q6ZIP_GENERAL IPA_GROUP_Q6ZIP
110 #define IPA_GROUP_UC_RX_Q (5)
111 #define IPA_GROUP_Q6ZIP_ENGINE IPA_GROUP_UC_RX_Q
112 #define IPA_GROUP_MAX (6)
114 enum ipa_rsrc_grp_type_src {
115 IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
116 IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
117 IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
118 IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
119 IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
120 IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
121 IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS,
122 IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
123 IPA_RSRC_GRP_TYPE_SRC_MAX,
125 enum ipa_rsrc_grp_type_dst {
126 IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS,
127 IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
128 IPA_RSRC_GRP_TYPE_DST_DPS_DMARS,
129 IPA_RSRC_GRP_TYPE_DST_MAX,
131 enum ipa_rsrc_grp_type_rx {
132 IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
133 IPA_RSRC_GRP_TYPE_RX_MAX
135 struct rsrc_min_max {
140 static const struct rsrc_min_max ipa3_rsrc_src_grp_config
141 [IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
142 /*UL DL DIAG DMA Not Used uC Rx*/
143 [IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
144 {3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
145 [IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
146 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
147 [IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
148 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
149 [IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
150 {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
151 [IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
152 {19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
153 [IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
154 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
155 [IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
156 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
157 [IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
158 {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
160 static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
161 [IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
162 /*UL DL/DPL DIAG DMA Q6zip_gen Q6zip_eng*/
163 [IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
164 {2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
165 [IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
166 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
167 [IPA_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
168 {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
170 static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
171 [IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
172 /*UL DL DIAG DMA Not Used uC Rx*/
173 [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
174 {16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
182 struct ipa_ep_configuration {
190 static const struct ipa_ep_configuration ipa3_ep_mapping
191 [IPA_VER_MAX][IPA_CLIENT_MAX] = {
192 [IPA_3_0][IPA_CLIENT_HSIC1_PROD] = IPA_CLIENT_NOT_USED,
193 [IPA_3_0][IPA_CLIENT_WLAN1_PROD] = {10, IPA_GROUP_UL, true,
194 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
195 QMB_MASTER_SELECT_DDR},
196 [IPA_3_0][IPA_CLIENT_HSIC2_PROD] = IPA_CLIENT_NOT_USED,
197 [IPA_3_0][IPA_CLIENT_USB2_PROD] = IPA_CLIENT_NOT_USED,
198 [IPA_3_0][IPA_CLIENT_HSIC3_PROD] = IPA_CLIENT_NOT_USED,
199 [IPA_3_0][IPA_CLIENT_USB3_PROD] = IPA_CLIENT_NOT_USED,
200 [IPA_3_0][IPA_CLIENT_HSIC4_PROD] = IPA_CLIENT_NOT_USED,
201 [IPA_3_0][IPA_CLIENT_USB4_PROD] = IPA_CLIENT_NOT_USED,
202 [IPA_3_0][IPA_CLIENT_HSIC5_PROD] = IPA_CLIENT_NOT_USED,
203 [IPA_3_0][IPA_CLIENT_USB_PROD] = {1, IPA_GROUP_UL, true,
204 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
205 QMB_MASTER_SELECT_DDR},
206 [IPA_3_0][IPA_CLIENT_UC_USB_PROD] = {2, IPA_GROUP_UL, true,
207 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
208 QMB_MASTER_SELECT_DDR},
209 [IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = IPA_CLIENT_NOT_USED,
210 [IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD] = IPA_CLIENT_NOT_USED,
211 [IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD] = IPA_CLIENT_NOT_USED,
212 [IPA_3_0][IPA_CLIENT_APPS_LAN_PROD]
213 = {14, IPA_GROUP_DL, false,
214 IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
215 QMB_MASTER_SELECT_DDR},
216 [IPA_3_0][IPA_CLIENT_APPS_WAN_PROD]
217 = {3, IPA_GROUP_UL, true,
218 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
219 QMB_MASTER_SELECT_DDR},
220 [IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]
221 = {22, IPA_GROUP_IMM_CMD, false,
222 IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
223 QMB_MASTER_SELECT_DDR},
224 [IPA_3_0][IPA_CLIENT_ODU_PROD] = {12, IPA_GROUP_UL, true,
225 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
226 QMB_MASTER_SELECT_DDR},
227 [IPA_3_0][IPA_CLIENT_MHI_PROD] = {0, IPA_GROUP_UL, true,
228 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
229 QMB_MASTER_SELECT_PCIE},
230 [IPA_3_0][IPA_CLIENT_Q6_LAN_PROD] = {9, IPA_GROUP_UL, false,
231 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
232 QMB_MASTER_SELECT_DDR},
233 [IPA_3_0][IPA_CLIENT_Q6_WAN_PROD] = {5, IPA_GROUP_DL,
234 true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
235 QMB_MASTER_SELECT_DDR},
236 [IPA_3_0][IPA_CLIENT_Q6_CMD_PROD]
237 = {6, IPA_GROUP_IMM_CMD, false,
238 IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
239 QMB_MASTER_SELECT_DDR},
240 [IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD] = {7, IPA_GROUP_Q6ZIP,
241 false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
242 QMB_MASTER_SELECT_DDR},
243 [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD] = {8, IPA_GROUP_Q6ZIP,
244 false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
245 QMB_MASTER_SELECT_DDR},
246 [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
247 = {12, IPA_GROUP_DMA, false,
248 IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
249 QMB_MASTER_SELECT_PCIE},
250 [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
251 = {13, IPA_GROUP_DMA, false,
252 IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
253 QMB_MASTER_SELECT_PCIE},
254 /* Only for test purpose */
255 [IPA_3_0][IPA_CLIENT_TEST_PROD] = {1, IPA_GROUP_UL, true,
256 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
257 QMB_MASTER_SELECT_DDR},
258 [IPA_3_0][IPA_CLIENT_TEST1_PROD] = {1, IPA_GROUP_UL, true,
259 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
260 QMB_MASTER_SELECT_DDR},
261 [IPA_3_0][IPA_CLIENT_TEST2_PROD] = {3, IPA_GROUP_UL, true,
262 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
263 QMB_MASTER_SELECT_DDR},
264 [IPA_3_0][IPA_CLIENT_TEST3_PROD] = {12, IPA_GROUP_UL, true,
265 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
266 QMB_MASTER_SELECT_DDR},
267 [IPA_3_0][IPA_CLIENT_TEST4_PROD] = {13, IPA_GROUP_UL, true,
268 IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
269 QMB_MASTER_SELECT_DDR},
271 [IPA_3_0][IPA_CLIENT_HSIC1_CONS] = IPA_CLIENT_NOT_USED,
272 [IPA_3_0][IPA_CLIENT_WLAN1_CONS] = {25, IPA_GROUP_DL, false,
273 IPA_DPS_HPS_SEQ_TYPE_INVALID,
274 QMB_MASTER_SELECT_DDR},
275 [IPA_3_0][IPA_CLIENT_HSIC2_CONS] = IPA_CLIENT_NOT_USED,
276 [IPA_3_0][IPA_CLIENT_USB2_CONS] = IPA_CLIENT_NOT_USED,
277 [IPA_3_0][IPA_CLIENT_WLAN2_CONS] = {27, IPA_GROUP_DL, false,
278 IPA_DPS_HPS_SEQ_TYPE_INVALID,
279 QMB_MASTER_SELECT_DDR},
280 [IPA_3_0][IPA_CLIENT_HSIC3_CONS] = IPA_CLIENT_NOT_USED,
281 [IPA_3_0][IPA_CLIENT_USB3_CONS] = IPA_CLIENT_NOT_USED,
282 [IPA_3_0][IPA_CLIENT_WLAN3_CONS] = {28, IPA_GROUP_DL, false,
283 IPA_DPS_HPS_SEQ_TYPE_INVALID,
284 QMB_MASTER_SELECT_DDR},
285 [IPA_3_0][IPA_CLIENT_HSIC4_CONS] = IPA_CLIENT_NOT_USED,
286 [IPA_3_0][IPA_CLIENT_USB4_CONS] = IPA_CLIENT_NOT_USED,
287 [IPA_3_0][IPA_CLIENT_WLAN4_CONS] = {29, IPA_GROUP_DL, false,
288 IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
289 QMB_MASTER_SELECT_DDR},
290 [IPA_3_0][IPA_CLIENT_HSIC5_CONS] = IPA_CLIENT_NOT_USED,
291 [IPA_3_0][IPA_CLIENT_USB_CONS] = {26, IPA_GROUP_DL, false,
292 IPA_DPS_HPS_SEQ_TYPE_INVALID,
293 QMB_MASTER_SELECT_DDR},
294 [IPA_3_0][IPA_CLIENT_USB_DPL_CONS] = {17, IPA_GROUP_DPL, false,
295 IPA_DPS_HPS_SEQ_TYPE_INVALID,
296 QMB_MASTER_SELECT_DDR},
297 [IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS] = IPA_CLIENT_NOT_USED,
298 [IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS] = IPA_CLIENT_NOT_USED,
299 [IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS] = IPA_CLIENT_NOT_USED,
300 [IPA_3_0][IPA_CLIENT_APPS_LAN_CONS] = {15, IPA_GROUP_UL, false,
301 IPA_DPS_HPS_SEQ_TYPE_INVALID,
302 QMB_MASTER_SELECT_DDR},
303 [IPA_3_0][IPA_CLIENT_APPS_WAN_CONS] = {16, IPA_GROUP_DL, false,
304 IPA_DPS_HPS_SEQ_TYPE_INVALID,
305 QMB_MASTER_SELECT_DDR},
306 [IPA_3_0][IPA_CLIENT_ODU_EMB_CONS] = {23, IPA_GROUP_DL, false,
307 IPA_DPS_HPS_SEQ_TYPE_INVALID,
308 QMB_MASTER_SELECT_DDR},
309 [IPA_3_0][IPA_CLIENT_ODU_TETH_CONS] = IPA_CLIENT_NOT_USED,
310 [IPA_3_0][IPA_CLIENT_MHI_CONS] = {23, IPA_GROUP_DL, false,
311 IPA_DPS_HPS_SEQ_TYPE_INVALID,
312 QMB_MASTER_SELECT_PCIE},
313 [IPA_3_0][IPA_CLIENT_Q6_LAN_CONS] = {19, IPA_GROUP_DL, false,
314 IPA_DPS_HPS_SEQ_TYPE_INVALID,
315 QMB_MASTER_SELECT_DDR},
316 [IPA_3_0][IPA_CLIENT_Q6_WAN_CONS] = {18, IPA_GROUP_UL, false,
317 IPA_DPS_HPS_SEQ_TYPE_INVALID,
318 QMB_MASTER_SELECT_DDR},
319 [IPA_3_0][IPA_CLIENT_Q6_DUN_CONS] = {30, IPA_GROUP_DIAG,
320 false, IPA_DPS_HPS_SEQ_TYPE_INVALID,
321 QMB_MASTER_SELECT_DDR},
322 [IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS]
323 = {21, IPA_GROUP_Q6ZIP, false,
324 IPA_DPS_HPS_SEQ_TYPE_INVALID,
325 QMB_MASTER_SELECT_DDR},
326 [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS]
327 = {4, IPA_GROUP_Q6ZIP, false,
328 IPA_DPS_HPS_SEQ_TYPE_INVALID,
329 QMB_MASTER_SELECT_DDR},
330 [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
331 = {28, IPA_GROUP_DMA, false,
332 IPA_DPS_HPS_SEQ_TYPE_INVALID,
333 QMB_MASTER_SELECT_PCIE},
334 [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
335 = {29, IPA_GROUP_DMA, false,
336 IPA_DPS_HPS_SEQ_TYPE_INVALID,
337 QMB_MASTER_SELECT_PCIE},
338 [IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
339 /* Only for test purpose */
340 [IPA_3_0][IPA_CLIENT_TEST_CONS] = {26, IPA_GROUP_DL, false,
341 IPA_DPS_HPS_SEQ_TYPE_INVALID,
342 QMB_MASTER_SELECT_DDR},
343 [IPA_3_0][IPA_CLIENT_TEST1_CONS] = {26, IPA_GROUP_DL, false,
344 IPA_DPS_HPS_SEQ_TYPE_INVALID,
345 QMB_MASTER_SELECT_DDR},
346 [IPA_3_0][IPA_CLIENT_TEST2_CONS] = {27, IPA_GROUP_DL, false,
347 IPA_DPS_HPS_SEQ_TYPE_INVALID,
348 QMB_MASTER_SELECT_DDR},
349 [IPA_3_0][IPA_CLIENT_TEST3_CONS] = {28, IPA_GROUP_DL, false,
350 IPA_DPS_HPS_SEQ_TYPE_INVALID,
351 QMB_MASTER_SELECT_DDR},
352 [IPA_3_0][IPA_CLIENT_TEST4_CONS] = {29, IPA_GROUP_DL, false,
353 IPA_DPS_HPS_SEQ_TYPE_INVALID,
354 QMB_MASTER_SELECT_DDR},
357 /* this array include information tuple:
358 {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
359 static struct ipa_gsi_ep_config ipa_gsi_ep_info[] = {
390 static struct msm_bus_vectors ipa_init_vectors_v3_0[] = {
392 .src = MSM_BUS_MASTER_IPA,
393 .dst = MSM_BUS_SLAVE_EBI_CH0,
398 .src = MSM_BUS_MASTER_IPA,
399 .dst = MSM_BUS_SLAVE_OCIMEM,
405 static struct msm_bus_vectors ipa_nominal_perf_vectors_v3_0[] = {
407 .src = MSM_BUS_MASTER_IPA,
408 .dst = MSM_BUS_SLAVE_EBI_CH0,
413 .src = MSM_BUS_MASTER_IPA,
414 .dst = MSM_BUS_SLAVE_OCIMEM,
420 static struct msm_bus_paths ipa_usecases_v3_0[] = {
422 ARRAY_SIZE(ipa_init_vectors_v3_0),
423 ipa_init_vectors_v3_0,
426 ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0),
427 ipa_nominal_perf_vectors_v3_0,
431 static struct msm_bus_scale_pdata ipa_bus_client_pdata_v3_0 = {
433 ARRAY_SIZE(ipa_usecases_v3_0),
437 void ipa3_active_clients_lock(void)
441 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
442 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
443 ipa3_ctx->ipa3_active_clients.mutex_locked = true;
444 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
447 int ipa3_active_clients_trylock(unsigned long *flags)
449 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
450 if (ipa3_ctx->ipa3_active_clients.mutex_locked) {
451 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock,
459 void ipa3_active_clients_trylock_unlock(unsigned long *flags)
461 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
464 void ipa3_active_clients_unlock(void)
468 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
469 ipa3_ctx->ipa3_active_clients.mutex_locked = false;
470 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
471 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
475 * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
478 * @resource: [IN] IPA Resource Manager resource
479 * @clients: [OUT] Empty array which will contain the list of clients. The
480 * caller must initialize this array.
482 * Return codes: 0 on success, negative on failure.
484 int ipa3_get_clients_from_rm_resource(
485 enum ipa_rm_resource_name resource,
486 struct ipa3_client_names *clients)
491 resource >= IPA_RM_RESOURCE_MAX ||
493 IPAERR("Bad parameters\n");
498 case IPA_RM_RESOURCE_USB_CONS:
499 clients->names[i++] = IPA_CLIENT_USB_CONS;
501 case IPA_RM_RESOURCE_USB_DPL_CONS:
502 clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
504 case IPA_RM_RESOURCE_HSIC_CONS:
505 clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
507 case IPA_RM_RESOURCE_WLAN_CONS:
508 clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
509 clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
510 clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
511 clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
513 case IPA_RM_RESOURCE_MHI_CONS:
514 clients->names[i++] = IPA_CLIENT_MHI_CONS;
516 case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
517 clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
518 clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
520 case IPA_RM_RESOURCE_USB_PROD:
521 clients->names[i++] = IPA_CLIENT_USB_PROD;
523 case IPA_RM_RESOURCE_HSIC_PROD:
524 clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
526 case IPA_RM_RESOURCE_MHI_PROD:
527 clients->names[i++] = IPA_CLIENT_MHI_PROD;
529 case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
530 clients->names[i++] = IPA_CLIENT_ODU_PROD;
540 * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should
541 * be suspended during a power save scenario. False otherwise.
543 * @client: [IN] IPA client
545 bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
547 struct ipa3_ep_context *ep;
550 ipa_ep_idx = ipa3_get_ep_mapping(client);
551 if (ipa_ep_idx == -1) {
552 IPAERR("Invalid client.\n");
557 ep = &ipa3_ctx->ep[ipa_ep_idx];
559 if (ep->keep_ipa_awake)
562 if (client == IPA_CLIENT_USB_CONS ||
563 client == IPA_CLIENT_USB_DPL_CONS ||
564 client == IPA_CLIENT_MHI_CONS ||
565 client == IPA_CLIENT_HSIC1_CONS ||
566 client == IPA_CLIENT_WLAN1_CONS ||
567 client == IPA_CLIENT_WLAN2_CONS ||
568 client == IPA_CLIENT_WLAN3_CONS ||
569 client == IPA_CLIENT_WLAN4_CONS ||
570 client == IPA_CLIENT_ODU_EMB_CONS ||
571 client == IPA_CLIENT_ODU_TETH_CONS)
578 * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
579 * resource and decrement active clients counter, which may result in clock
580 * gating of IPA clocks.
582 * @resource: [IN] IPA Resource Manager resource
584 * Return codes: 0 on success, negative on failure.
586 int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
588 struct ipa3_client_names clients;
591 struct ipa_ep_cfg_ctrl suspend;
592 enum ipa_client_type client;
594 bool pipe_suspended = false;
596 memset(&clients, 0, sizeof(clients));
597 res = ipa3_get_clients_from_rm_resource(resource, &clients);
599 IPAERR("Bad params.\n");
603 for (index = 0; index < clients.length; index++) {
604 client = clients.names[index];
605 ipa_ep_idx = ipa3_get_ep_mapping(client);
606 if (ipa_ep_idx == -1) {
607 IPAERR("Invalid client.\n");
611 ipa3_ctx->resume_on_connect[client] = false;
612 if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
613 ipa3_should_pipe_be_suspended(client)) {
614 if (ipa3_ctx->ep[ipa_ep_idx].valid) {
615 /* suspend endpoint */
616 memset(&suspend, 0, sizeof(suspend));
617 suspend.ipa_ep_suspend = true;
618 ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
619 pipe_suspended = true;
625 usleep_range(1000, 2000);
627 /* before gating IPA clocks do TAG process */
628 ipa3_ctx->tag_process_before_gating = true;
629 IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
635 * ipa3_suspend_resource_no_block() - suspend client endpoints related to the
636 * IPA_RM resource and decrement active clients counter. This function is
637 * guaranteed to avoid sleeping.
639 * @resource: [IN] IPA Resource Manager resource
641 * Return codes: 0 on success, negative on failure.
643 int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
646 struct ipa3_client_names clients;
648 enum ipa_client_type client;
649 struct ipa_ep_cfg_ctrl suspend;
652 struct ipa_active_client_logging_info log_info;
654 if (ipa3_active_clients_trylock(&flags) == 0)
656 if (ipa3_ctx->ipa3_active_clients.cnt == 1) {
661 memset(&clients, 0, sizeof(clients));
662 res = ipa3_get_clients_from_rm_resource(resource, &clients);
665 "ipa3_get_clients_from_rm_resource() failed, name = %d.\n",
670 for (index = 0; index < clients.length; index++) {
671 client = clients.names[index];
672 ipa_ep_idx = ipa3_get_ep_mapping(client);
673 if (ipa_ep_idx == -1) {
674 IPAERR("Invalid client.\n");
678 ipa3_ctx->resume_on_connect[client] = false;
679 if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
680 ipa3_should_pipe_be_suspended(client)) {
681 if (ipa3_ctx->ep[ipa_ep_idx].valid) {
682 /* suspend endpoint */
683 memset(&suspend, 0, sizeof(suspend));
684 suspend.ipa_ep_suspend = true;
685 ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
691 IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
692 ipa_rm_resource_str(resource));
693 ipa3_active_clients_log_dec(&log_info, true);
694 ipa3_ctx->ipa3_active_clients.cnt--;
695 IPADBG("active clients = %d\n",
696 ipa3_ctx->ipa3_active_clients.cnt);
699 ipa3_active_clients_trylock_unlock(&flags);
705 * ipa3_resume_resource() - resume client endpoints related to the IPA_RM
708 * @resource: [IN] IPA Resource Manager resource
710 * Return codes: 0 on success, negative on failure.
712 int ipa3_resume_resource(enum ipa_rm_resource_name resource)
715 struct ipa3_client_names clients;
718 struct ipa_ep_cfg_ctrl suspend;
719 enum ipa_client_type client;
722 memset(&clients, 0, sizeof(clients));
723 res = ipa3_get_clients_from_rm_resource(resource, &clients);
725 IPAERR("ipa3_get_clients_from_rm_resource() failed.\n");
729 for (index = 0; index < clients.length; index++) {
730 client = clients.names[index];
731 ipa_ep_idx = ipa3_get_ep_mapping(client);
732 if (ipa_ep_idx == -1) {
733 IPAERR("Invalid client.\n");
738 * The related ep, will be resumed on connect
739 * while its resource is granted
741 ipa3_ctx->resume_on_connect[client] = true;
742 IPADBG("%d will be resumed on connect.\n", client);
743 if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
744 ipa3_should_pipe_be_suspended(client)) {
745 if (ipa3_ctx->ep[ipa_ep_idx].valid) {
746 memset(&suspend, 0, sizeof(suspend));
747 suspend.ipa_ep_suspend = false;
748 ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
757 * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW
761 void _ipa_sram_settings_read_v3_0(void)
763 struct ipahal_reg_shared_mem_size smem_sz;
765 memset(&smem_sz, 0, sizeof(smem_sz));
767 ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
769 ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr;
770 ipa3_ctx->smem_sz = smem_sz.shared_mem_sz;
772 /* reg fields are in 8B units */
773 ipa3_ctx->smem_restricted_bytes *= 8;
774 ipa3_ctx->smem_sz *= 8;
775 ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
776 ipa3_ctx->hdr_tbl_lcl = 0;
777 ipa3_ctx->hdr_proc_ctx_tbl_lcl = 1;
780 * when proc ctx table is located in internal memory,
781 * modem entries resides first.
783 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
784 ipa3_ctx->hdr_proc_ctx_tbl.start_offset =
785 IPA_MEM_PART(modem_hdr_proc_ctx_size);
787 ipa3_ctx->ip4_rt_tbl_hash_lcl = 0;
788 ipa3_ctx->ip4_rt_tbl_nhash_lcl = 0;
789 ipa3_ctx->ip6_rt_tbl_hash_lcl = 0;
790 ipa3_ctx->ip6_rt_tbl_nhash_lcl = 0;
791 ipa3_ctx->ip4_flt_tbl_hash_lcl = 0;
792 ipa3_ctx->ip4_flt_tbl_nhash_lcl = 0;
793 ipa3_ctx->ip6_flt_tbl_hash_lcl = 0;
794 ipa3_ctx->ip6_flt_tbl_nhash_lcl = 0;
798 * ipa3_cfg_clkon_cfg() - configure IPA clkon_cfg
799 * @clkon_cfg: IPA clkon_cfg
804 int ipa3_cfg_clkon_cfg(struct ipahal_reg_clkon_cfg *clkon_cfg)
807 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
809 IPADBG("cgc_open_misc = %d\n",
810 clkon_cfg->cgc_open_misc);
812 ipahal_write_reg_fields(IPA_CLKON_CFG, clkon_cfg);
814 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
820 * ipa3_cfg_route() - configure IPA route
826 int ipa3_cfg_route(struct ipahal_reg_route *route)
829 IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
831 route->route_def_pipe,
832 route->route_def_hdr_table);
833 IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
834 route->route_def_hdr_ofst,
835 route->route_frag_def_pipe);
837 IPADBG("default_retain_hdr=%d\n",
838 route->route_def_retain_hdr);
840 if (route->route_dis) {
841 IPAERR("Route disable is not supported!\n");
845 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
847 ipahal_write_reg_fields(IPA_ROUTE, route);
849 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
855 * ipa3_cfg_filter() - configure filter
856 * @disable: disable value
861 int ipa3_cfg_filter(u32 disable)
863 IPAERR_RL("Filter disable is not supported!\n");
868 * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes
872 void ipa3_cfg_qsb(void)
874 int qsb_max_writes[2] = { 8, 2 };
875 int qsb_max_reads[2] = { 8, 8 };
877 ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
878 ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
882 * ipa3_init_hw() - initialize HW
887 int ipa3_init_hw(void)
892 /* Read IPA version and make sure we have access to the registers */
893 ipa_version = ipahal_read_reg(IPA_VERSION);
894 if (ipa_version == 0)
897 switch (ipa3_ctx->ipa_hw_type) {
900 val = IPA_BCR_REG_VAL_v3_0;
904 val = IPA_BCR_REG_VAL_v3_5;
907 IPAERR("unknown HW type in dts\n");
911 ipahal_write_reg(IPA_BCR, val);
919 * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index
920 * into ipa3_ep_mapping[] array.
922 * Return value: HW type index
924 u8 ipa3_get_hw_type_index(void)
928 switch (ipa3_ctx->ipa_hw_type) {
931 hw_type_index = IPA_3_0;
934 IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
935 hw_type_index = IPA_3_0;
939 return hw_type_index;
943 * ipa3_get_ep_mapping() - provide endpoint mapping
944 * @client: client type
946 * Return value: endpoint mapping
948 int ipa3_get_ep_mapping(enum ipa_client_type client)
952 if (client >= IPA_CLIENT_MAX || client < 0) {
953 IPAERR_RL("Bad client number! client =%d\n", client);
954 return IPA_EP_NOT_ALLOCATED;
957 ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
958 if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA3_MAX_NUM_PIPES)
959 return IPA_EP_NOT_ALLOCATED;
965 * ipa3_get_gsi_ep_info() - provide gsi ep information
966 * @ipa_ep_idx: IPA endpoint index
968 * Return value: pointer to ipa_gsi_ep_info
970 struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx)
975 if (ipa_gsi_ep_info[i].ipa_ep_num < 0)
978 if (ipa_gsi_ep_info[i].ipa_ep_num ==
980 return &(ipa_gsi_ep_info[i]);
987 * ipa_get_ep_group() - provide endpoint group by client
988 * @client: client type
990 * Return value: endpoint group
992 int ipa_get_ep_group(enum ipa_client_type client)
994 if (client >= IPA_CLIENT_MAX || client < 0) {
995 IPAERR("Bad client number! client =%d\n", client);
999 return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num;
1003 * ipa3_get_qmb_master_sel() - provide QMB master selection for the client
1004 * @client: client type
1006 * Return value: QMB master index
1008 u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
1010 if (client >= IPA_CLIENT_MAX || client < 0) {
1011 IPAERR("Bad client number! client =%d\n", client);
1015 return ipa3_ep_mapping[ipa3_get_hw_type_index()]
1016 [client].qmb_master_sel;
1019 /* ipa3_set_client() - provide client mapping
1020 * @client: client type
1022 * Return value: none
1025 void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
1027 if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
1028 IPAERR("Bad client number! client =%d\n", client);
1029 } else if (index >= IPA3_MAX_NUM_PIPES || index < 0) {
1030 IPAERR("Bad pipe index! index =%d\n", index);
1032 ipa3_ctx->ipacm_client[index].client_enum = client;
1033 ipa3_ctx->ipacm_client[index].uplink = uplink;
1037 /* ipa3_get_wlan_stats() - get ipa wifi stats
1039 * Return value: success or failure
1041 int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
1043 if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
1044 ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
1047 IPAERR("uc_wdi_ctx.stats_notify NULL\n");
1053 int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
1055 if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
1056 ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
1059 IPAERR("uc_wdi_ctx.stats_notify NULL\n");
1066 * ipa3_get_client() - provide client mapping
1067 * @client: client type
1069 * Return value: client mapping enum
1071 enum ipacm_client_enum ipa3_get_client(int pipe_idx)
1073 if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) {
1074 IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
1075 return IPACM_CLIENT_MAX;
1077 return ipa3_ctx->ipacm_client[pipe_idx].client_enum;
1082 * ipa2_get_client_uplink() - provide client mapping
1083 * @client: client type
1085 * Return value: none
1087 bool ipa3_get_client_uplink(int pipe_idx)
1089 if (pipe_idx < 0 || pipe_idx >= IPA3_MAX_NUM_PIPES) {
1090 IPAERR("invalid pipe idx %d\n", pipe_idx);
1094 return ipa3_ctx->ipacm_client[pipe_idx].uplink;
1098 * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
1099 * the supplied pipe index.
1103 * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
1106 enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx)
1110 enum ipa_client_type client;
1111 struct ipa3_client_names clients;
1114 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1115 IPAERR("Bad pipe index!\n");
1119 client = ipa3_ctx->ep[pipe_idx].client;
1121 for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
1122 memset(&clients, 0, sizeof(clients));
1123 ipa3_get_clients_from_rm_resource(i, &clients);
1124 for (j = 0; j < clients.length; j++) {
1125 if (clients.names[j] == client) {
1141 * ipa3_get_client_mapping() - provide client mapping
1142 * @pipe_idx: IPA end-point number
1144 * Return value: client mapping
1146 enum ipa_client_type ipa3_get_client_mapping(int pipe_idx)
1148 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1149 IPAERR("Bad pipe index!\n");
1153 return ipa3_ctx->ep[pipe_idx].client;
1157 * ipa_init_ep_flt_bitmap() - Initialize the bitmap
1158 * that represents the End-points that supports filtering
1160 void ipa_init_ep_flt_bitmap(void)
1162 enum ipa_client_type cl;
1163 u8 hw_type_idx = ipa3_get_hw_type_index();
1168 BUG_ON(ipa3_ctx->ep_flt_bitmap);
1170 for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
1171 if (ipa3_ep_mapping[hw_type_idx][cl].support_flt) {
1173 (1U<<ipa3_ep_mapping[hw_type_idx][cl].pipe_num);
1174 if (bitmap != ipa3_ctx->ep_flt_bitmap) {
1175 ipa3_ctx->ep_flt_bitmap = bitmap;
1176 ipa3_ctx->ep_flt_num++;
1183 * ipa_is_ep_support_flt() - Given an End-point check
1184 * whether it supports filtering or not.
1189 * true if supports and false if not
1191 bool ipa_is_ep_support_flt(int pipe_idx)
1193 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1194 IPAERR("Bad pipe index!\n");
1198 return ipa3_ctx->ep_flt_bitmap & (1U<<pipe_idx);
1202 * ipa3_cfg_ep_seq() - IPA end-point HPS/DPS sequencer type configuration
1203 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1205 * Returns: 0 on success, negative on failure
1207 * Note: Should not be called from atomic context
1209 int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg)
1213 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1214 ipa3_ctx->ep[clnt_hdl].valid == 0) {
1215 IPAERR("bad param, clnt_hdl = %d", clnt_hdl);
1219 if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1220 IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl);
1225 * Skip Configure sequencers type for test clients.
1226 * These are configured dynamically in ipa3_cfg_ep_mode
1228 if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
1229 IPADBG("Skip sequencers configuration for test clients\n");
1233 if (seq_cfg->set_dynamic)
1234 type = seq_cfg->seq_type;
1236 type = ipa3_ep_mapping[ipa3_get_hw_type_index()]
1237 [ipa3_ctx->ep[clnt_hdl].client].sequencer_type;
1239 if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) {
1240 if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA &&
1241 !IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) {
1242 IPAERR("Configuring non-DMA SEQ type to DMA pipe\n");
1245 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1246 /* Configure sequencers type*/
1248 IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type,
1250 ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
1252 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1254 IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl);
1261 * ipa3_cfg_ep - IPA end-point configuration
1262 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1263 * @ipa_ep_cfg: [in] IPA end-point configuration params
1265 * This includes nat, header, mode, aggregation and route settings and is a one
1266 * shot API to configure the IPA end-point fully
1268 * Returns: 0 on success, negative on failure
1270 * Note: Should not be called from atomic context
1272 int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
1274 int result = -EINVAL;
1276 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1277 ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
1278 IPAERR("bad parm.\n");
1282 result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
1286 result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
1290 result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
1294 result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
1298 if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
1299 result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
1303 result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
1307 result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq);
1311 result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
1315 result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
1319 result = ipa3_cfg_ep_metadata_mask(clnt_hdl,
1320 &ipa_ep_cfg->metadata_mask);
1328 const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
1331 case (IPA_BYPASS_NAT):
1332 return "NAT disabled";
1334 return "Source NAT";
1343 * ipa3_cfg_ep_nat() - IPA end-point NAT configuration
1344 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1345 * @ipa_ep_cfg: [in] IPA end-point configuration params
1347 * Returns: 0 on success, negative on failure
1349 * Note: Should not be called from atomic context
1351 int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
1353 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1354 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
1355 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1357 ipa3_ctx->ep[clnt_hdl].valid);
1361 if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1362 IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
1366 IPADBG("pipe=%d, nat_en=%d(%s)\n",
1369 ipa3_get_nat_en_str(ep_nat->nat_en));
1371 /* copy over EP cfg */
1372 ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
1374 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1376 ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat);
1378 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1385 * ipa3_cfg_ep_status() - IPA end-point status configuration
1386 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1387 * @ipa_ep_cfg: [in] IPA end-point configuration params
1389 * Returns: 0 on success, negative on failure
1391 * Note: Should not be called from atomic context
1393 int ipa3_cfg_ep_status(u32 clnt_hdl,
1394 const struct ipahal_reg_ep_cfg_status *ep_status)
1396 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1397 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
1398 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1400 ipa3_ctx->ep[clnt_hdl].valid);
1404 IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n",
1406 ep_status->status_en,
1407 ep_status->status_ep,
1408 ep_status->status_location);
1410 /* copy over EP cfg */
1411 ipa3_ctx->ep[clnt_hdl].status = *ep_status;
1413 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1415 ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status);
1417 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1423 * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration
1424 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1425 * @ipa_ep_cfg: [in] IPA end-point configuration params
1427 * Returns: 0 on success, negative on failure
1429 * Note: Should not be called from atomic context
1431 int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
1435 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1436 ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
1437 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1439 ipa3_ctx->ep[clnt_hdl].valid);
1443 /* copy over EP cfg */
1444 ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
1446 /* Override QMB master selection */
1447 qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client);
1448 ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel;
1450 "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n",
1452 ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en,
1453 ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en,
1454 ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset,
1455 ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel);
1457 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1459 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl,
1460 &ipa3_ctx->ep[clnt_hdl].cfg.cfg);
1462 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1468 * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
1469 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1470 * @ipa_ep_cfg: [in] IPA end-point configuration params
1472 * Returns: 0 on success, negative on failure
1474 * Note: Should not be called from atomic context
1476 int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
1477 const struct ipa_ep_cfg_metadata_mask
1480 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1481 ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
1482 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1484 ipa3_ctx->ep[clnt_hdl].valid);
1488 IPADBG("pipe=%d, metadata_mask=0x%x\n",
1490 metadata_mask->metadata_mask);
1492 /* copy over EP cfg */
1493 ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
1495 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1497 ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
1498 clnt_hdl, metadata_mask);
1500 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1506 * ipa3_cfg_ep_hdr() - IPA end-point header configuration
1507 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1508 * @ipa_ep_cfg: [in] IPA end-point configuration params
1510 * Returns: 0 on success, negative on failure
1512 * Note: Should not be called from atomic context
1514 int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
1516 struct ipa3_ep_context *ep;
1518 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1519 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
1520 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1521 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1524 IPADBG("pipe=%d metadata_reg_valid=%d\n",
1526 ep_hdr->hdr_metadata_reg_valid);
1528 IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
1529 ep_hdr->hdr_remove_additional,
1531 ep_hdr->hdr_ofst_pkt_size);
1533 IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
1534 ep_hdr->hdr_ofst_pkt_size_valid,
1535 ep_hdr->hdr_additional_const_len);
1537 IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
1538 ep_hdr->hdr_ofst_metadata,
1539 ep_hdr->hdr_ofst_metadata_valid,
1542 ep = &ipa3_ctx->ep[clnt_hdl];
1544 /* copy over EP cfg */
1545 ep->cfg.hdr = *ep_hdr;
1547 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1549 ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr);
1551 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1557 * ipa3_cfg_ep_hdr_ext() - IPA end-point extended header configuration
1558 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1559 * @ep_hdr_ext: [in] IPA end-point configuration params
1561 * Returns: 0 on success, negative on failure
1563 * Note: Should not be called from atomic context
1565 int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
1566 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
1568 struct ipa3_ep_context *ep;
1570 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1571 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
1572 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1573 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1577 IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
1579 ep_hdr_ext->hdr_pad_to_alignment);
1581 IPADBG("hdr_total_len_or_pad_offset=%d\n",
1582 ep_hdr_ext->hdr_total_len_or_pad_offset);
1584 IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
1585 ep_hdr_ext->hdr_payload_len_inc_padding,
1586 ep_hdr_ext->hdr_total_len_or_pad);
1588 IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
1589 ep_hdr_ext->hdr_total_len_or_pad_valid,
1590 ep_hdr_ext->hdr_little_endian);
1592 ep = &ipa3_ctx->ep[clnt_hdl];
1594 /* copy over EP cfg */
1595 ep->cfg.hdr_ext = *ep_hdr_ext;
1597 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1599 ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl,
1602 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1608 * ipa3_cfg_ep_ctrl() - IPA end-point Control configuration
1609 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1610 * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
1612 * Returns: 0 on success, negative on failure
1614 int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
1616 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) {
1617 IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
1621 IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
1623 ep_ctrl->ipa_ep_suspend,
1624 ep_ctrl->ipa_ep_delay);
1626 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl);
1628 if (ep_ctrl->ipa_ep_suspend == true &&
1629 IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client))
1630 ipa3_suspend_active_aggr_wa(clnt_hdl);
1635 const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
1640 case (IPA_ENABLE_FRAMING_HDLC):
1641 return "HDLC framing";
1642 case (IPA_ENABLE_DEFRAMING_HDLC):
1643 return "HDLC de-framing";
1652 * ipa3_cfg_ep_mode() - IPA end-point mode configuration
1653 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1654 * @ipa_ep_cfg: [in] IPA end-point configuration params
1656 * Returns: 0 on success, negative on failure
1658 * Note: Should not be called from atomic context
1660 int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
1664 struct ipahal_reg_endp_init_mode init_mode;
1666 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1667 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
1668 IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%p\n",
1669 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid,
1674 if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1675 IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
1679 ep = ipa3_get_ep_mapping(ep_mode->dst);
1680 if (ep == -1 && ep_mode->mode == IPA_DMA) {
1681 IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst);
1685 WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
1687 if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
1688 ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1690 IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
1693 ipa3_get_mode_type_str(ep_mode->mode),
1696 /* copy over EP cfg */
1697 ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
1698 ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep;
1700 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1702 init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index;
1703 init_mode.ep_mode = *ep_mode;
1704 ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode);
1706 /* Configure sequencers type for test clients*/
1707 if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
1708 if (ep_mode->mode == IPA_DMA)
1709 type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
1711 type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP;
1713 IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
1715 ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
1717 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1722 const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
1725 case (IPA_BYPASS_AGGR):
1726 return "no aggregation";
1727 case (IPA_ENABLE_AGGR):
1728 return "aggregation enabled";
1729 case (IPA_ENABLE_DEAGGR):
1730 return "de-aggregation enabled";
1736 const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type)
1738 switch (aggr_type) {
1756 * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration
1757 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1758 * @ipa_ep_cfg: [in] IPA end-point configuration params
1760 * Returns: 0 on success, negative on failure
1762 * Note: Should not be called from atomic context
1764 int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
1766 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1767 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
1768 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1769 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1773 if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR &&
1774 !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) {
1775 IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl);
1780 IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
1783 ipa3_get_aggr_enable_str(ep_aggr->aggr_en),
1785 ipa3_get_aggr_type_str(ep_aggr->aggr),
1786 ep_aggr->aggr_byte_limit,
1787 ep_aggr->aggr_time_limit);
1788 IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n",
1789 ep_aggr->aggr_hard_byte_limit_en,
1790 ep_aggr->aggr_sw_eof_active);
1792 /* copy over EP cfg */
1793 ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
1795 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1797 ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, ep_aggr);
1799 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1805 * ipa3_cfg_ep_route() - IPA end-point routing configuration
1806 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1807 * @ipa_ep_cfg: [in] IPA end-point configuration params
1809 * Returns: 0 on success, negative on failure
1811 * Note: Should not be called from atomic context
1813 int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
1815 struct ipahal_reg_endp_init_route init_rt;
1817 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1818 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
1819 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1820 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1824 if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1825 IPAERR("ROUTE does not apply to IPA out EP %d\n",
1831 * if DMA mode was configured previously for this EP, return with
1834 if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
1835 IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
1840 if (ep_route->rt_tbl_hdl)
1841 IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
1843 IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
1845 ep_route->rt_tbl_hdl);
1847 /* always use "default" routing table when programming EP ROUTE reg */
1848 ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
1849 IPA_MEM_PART(v4_apps_rt_index_lo);
1851 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1853 init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
1854 ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
1856 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1862 * ipa3_cfg_ep_holb() - IPA end-point holb configuration
1864 * If an IPA producer pipe is full, IPA HW by default will block
1865 * indefinitely till space opens up. During this time no packets
1866 * including those from unrelated pipes will be processed. Enabling
1867 * HOLB means IPA HW will be allowed to drop packets as/when needed
1868 * and indefinite blocking is avoided.
1870 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1871 * @ipa_ep_cfg: [in] IPA end-point configuration params
1873 * Returns: 0 on success, negative on failure
1875 int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
1877 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1878 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
1879 ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val ||
1881 IPAERR("bad parm.\n");
1885 if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
1886 IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
1890 ipa3_ctx->ep[clnt_hdl].holb = *ep_holb;
1892 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1894 ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
1897 ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl,
1900 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1902 IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
1909 * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration
1911 * Wrapper function for ipa3_cfg_ep_holb() with client name instead of
1912 * client handle. This function is used for clients that does not have
1915 * @client: [in] client name
1916 * @ipa_ep_cfg: [in] IPA end-point configuration params
1918 * Returns: 0 on success, negative on failure
1920 int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
1921 const struct ipa_ep_cfg_holb *ep_holb)
1923 return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
1927 * ipa3_cfg_ep_deaggr() - IPA end-point deaggregation configuration
1928 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1929 * @ep_deaggr: [in] IPA end-point configuration params
1931 * Returns: 0 on success, negative on failure
1933 * Note: Should not be called from atomic context
1935 int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
1936 const struct ipa_ep_cfg_deaggr *ep_deaggr)
1938 struct ipa3_ep_context *ep;
1940 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1941 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
1942 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1943 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1947 IPADBG("pipe=%d deaggr_hdr_len=%d\n",
1949 ep_deaggr->deaggr_hdr_len);
1951 IPADBG("packet_offset_valid=%d\n",
1952 ep_deaggr->packet_offset_valid);
1954 IPADBG("packet_offset_location=%d max_packet_len=%d\n",
1955 ep_deaggr->packet_offset_location,
1956 ep_deaggr->max_packet_len);
1958 ep = &ipa3_ctx->ep[clnt_hdl];
1960 /* copy over EP cfg */
1961 ep->cfg.deaggr = *ep_deaggr;
1963 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1965 ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl,
1968 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1974 * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration
1975 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
1976 * @ipa_ep_cfg: [in] IPA end-point configuration params
1978 * Returns: 0 on success, negative on failure
1980 * Note: Should not be called from atomic context
1982 int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
1985 struct ipa_ep_cfg_metadata ep_md_reg_wrt;
1987 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1988 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
1989 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1990 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1994 IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
1996 /* copy over EP cfg */
1997 ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
1999 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
2001 ep_md_reg_wrt = *ep_md;
2002 qmap_id = (ep_md->qmap_id <<
2003 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &
2004 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK;
2006 ep_md_reg_wrt.qmap_id = qmap_id;
2007 ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl,
2009 ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
2010 ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl,
2011 &ipa3_ctx->ep[clnt_hdl].cfg.hdr);
2013 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
2018 int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
2020 struct ipa_ep_cfg_metadata meta;
2021 struct ipa3_ep_context *ep;
2023 int result = -EINVAL;
2025 if (param_in->client >= IPA_CLIENT_MAX) {
2026 IPAERR_RL("bad parm client:%d\n", param_in->client);
2030 ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
2031 if (ipa_ep_idx == -1) {
2032 IPAERR_RL("Invalid client.\n");
2036 ep = &ipa3_ctx->ep[ipa_ep_idx];
2038 IPAERR_RL("EP not allocated.\n");
2042 meta.qmap_id = param_in->qmap_id;
2043 if (param_in->client == IPA_CLIENT_USB_PROD ||
2044 param_in->client == IPA_CLIENT_HSIC1_PROD ||
2045 param_in->client == IPA_CLIENT_ODU_PROD) {
2046 result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
2047 } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
2048 ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
2049 result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
2051 IPAERR_RL("qmap_id %d write failed on ep=%d\n",
2052 meta.qmap_id, ipa_ep_idx);
2061 * ipa3_dump_buff_internal() - dumps buffer for debug purposes
2062 * @base: buffer base address
2063 * @phy_base: buffer physical base address
2064 * @size: size of the buffer
2066 void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
2069 u32 *cur = (u32 *)base;
2072 IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
2073 for (i = 0; i < size / 4; i++) {
2074 byt = (u8 *)(cur + i);
2075 IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
2076 byt[0], byt[1], byt[2], byt[3]);
2082 * ipa3_pipe_mem_init() - initialize the pipe memory
2083 * @start_ofst: start offset
2088 * -ENOMEM: no memory
2090 int ipa3_pipe_mem_init(u32 start_ofst, u32 size)
2093 u32 aligned_start_ofst;
2095 struct gen_pool *pool;
2098 IPAERR("no IPA pipe memory allocated\n");
2102 aligned_start_ofst = IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst);
2103 aligned_size = size - (aligned_start_ofst - start_ofst);
2105 IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
2106 start_ofst, aligned_start_ofst, size, aligned_size);
2108 /* allocation order of 8 i.e. 128 bytes, global pool */
2109 pool = gen_pool_create(8, -1);
2111 IPAERR("Failed to create a new memory pool.\n");
2115 res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
2117 IPAERR("Failed to add memory to IPA pipe pool\n");
2121 ipa3_ctx->pipe_mem_pool = pool;
2125 gen_pool_destroy(pool);
2131 * ipa3_pipe_mem_alloc() - allocate pipe memory
2138 int ipa3_pipe_mem_alloc(u32 *ofst, u32 size)
2143 if (!ipa3_ctx->pipe_mem_pool || !size) {
2144 IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
2145 ipa3_ctx->pipe_mem_pool);
2149 vaddr = gen_pool_alloc(ipa3_ctx->pipe_mem_pool, size);
2154 IPADBG("size=%u ofst=%u\n", size, vaddr);
2156 IPAERR("size=%u failed\n", size);
2163 * ipa3_pipe_mem_free() - free pipe memory
2170 int ipa3_pipe_mem_free(u32 ofst, u32 size)
2172 IPADBG("size=%u ofst=%u\n", size, ofst);
2173 if (ipa3_ctx->pipe_mem_pool && size)
2174 gen_pool_free(ipa3_ctx->pipe_mem_pool, ofst, size);
2179 * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
2180 * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
2183 * Returns: 0 on success
2185 int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
2187 struct ipahal_reg_qcncm qcncm;
2189 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2190 ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
2191 qcncm.mode_en = mode;
2192 ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
2193 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2199 * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
2201 * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
2204 * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
2205 * (expected to be 'P') needs to be set using the header addition mechanism
2207 * Returns: 0 on success, negative on failure
2209 int ipa3_set_qcncm_ndp_sig(char sig[3])
2211 struct ipahal_reg_qcncm qcncm;
2214 IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
2217 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2218 ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
2219 qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]);
2220 ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
2221 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2227 * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
2229 * @enable: [in] true for single NDP/MBIM; false otherwise
2231 * Returns: 0 on success
2233 int ipa3_set_single_ndp_per_mbim(bool enable)
2235 struct ipahal_reg_single_ndp_mode mode;
2237 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2238 ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
2239 mode.single_ndp_en = enable;
2240 ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
2241 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2247 * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a boundary
2248 * @start: start address of the memory buffer
2249 * @end: end address of the memory buffer
2250 * @boundary: boundary
2253 * 1: if the interval [start, end] straddles boundary
2256 int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary)
2261 IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
2263 next_start = (start + (boundary - 1)) & ~(boundary - 1);
2264 prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
2266 while (next_start < prev_end)
2267 next_start += boundary;
2269 if (next_start == prev_end)
2276 * ipa3_bam_reg_dump() - Dump selected BAM registers for IPA.
2277 * The API is right now used only to dump IPA registers towards USB.
2279 * Function is rate limited to avoid flooding kernel log buffer
2281 void ipa3_bam_reg_dump(void)
2283 static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
2285 if (__ratelimit(&_rs)) {
2286 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2287 pr_err("IPA BAM START\n");
2288 sps_get_bam_debug_info(ipa3_ctx->bam_handle, 93,
2289 (SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS))
2291 SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD))),
2293 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2298 * ipa3_init_mem_partition() - Reads IPA memory map from DTS, performs alignment
2299 * checks and logs the fetched values.
2301 * Returns: 0 on success
2303 int ipa3_init_mem_partition(struct device_node *node)
2307 IPADBG("Reading from DTS as u32 array\n");
2308 result = of_property_read_u32_array(node,
2309 "qcom,ipa-ram-mmap", (u32 *)&ipa3_ctx->ctrl->mem_partition,
2310 sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32));
2313 IPAERR("Read operation failed\n");
2317 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
2318 IPA_MEM_PART(nat_size));
2320 if (IPA_MEM_PART(uc_info_ofst) & 3) {
2321 IPAERR("UC INFO OFST 0x%x is unaligned\n",
2322 IPA_MEM_PART(uc_info_ofst));
2326 IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n",
2327 IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size));
2329 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
2331 if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) {
2332 IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n",
2333 IPA_MEM_PART(v4_flt_hash_ofst));
2337 IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2338 IPA_MEM_PART(v4_flt_hash_ofst),
2339 IPA_MEM_PART(v4_flt_hash_size),
2340 IPA_MEM_PART(v4_flt_hash_size_ddr));
2342 if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) {
2343 IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
2344 IPA_MEM_PART(v4_flt_nhash_ofst));
2348 IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2349 IPA_MEM_PART(v4_flt_nhash_ofst),
2350 IPA_MEM_PART(v4_flt_nhash_size),
2351 IPA_MEM_PART(v4_flt_nhash_size_ddr));
2353 if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) {
2354 IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n",
2355 IPA_MEM_PART(v6_flt_hash_ofst));
2359 IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2360 IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size),
2361 IPA_MEM_PART(v6_flt_hash_size_ddr));
2363 if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) {
2364 IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
2365 IPA_MEM_PART(v6_flt_nhash_ofst));
2369 IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2370 IPA_MEM_PART(v6_flt_nhash_ofst),
2371 IPA_MEM_PART(v6_flt_nhash_size),
2372 IPA_MEM_PART(v6_flt_nhash_size_ddr));
2374 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index));
2376 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
2377 IPA_MEM_PART(v4_modem_rt_index_lo),
2378 IPA_MEM_PART(v4_modem_rt_index_hi));
2380 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
2381 IPA_MEM_PART(v4_apps_rt_index_lo),
2382 IPA_MEM_PART(v4_apps_rt_index_hi));
2384 if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) {
2385 IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n",
2386 IPA_MEM_PART(v4_rt_hash_ofst));
2390 IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst));
2392 IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2393 IPA_MEM_PART(v4_rt_hash_size),
2394 IPA_MEM_PART(v4_rt_hash_size_ddr));
2396 if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) {
2397 IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n",
2398 IPA_MEM_PART(v4_rt_nhash_ofst));
2402 IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n",
2403 IPA_MEM_PART(v4_rt_nhash_ofst));
2405 IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2406 IPA_MEM_PART(v4_rt_nhash_size),
2407 IPA_MEM_PART(v4_rt_nhash_size_ddr));
2409 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index));
2411 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
2412 IPA_MEM_PART(v6_modem_rt_index_lo),
2413 IPA_MEM_PART(v6_modem_rt_index_hi));
2415 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
2416 IPA_MEM_PART(v6_apps_rt_index_lo),
2417 IPA_MEM_PART(v6_apps_rt_index_hi));
2419 if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) {
2420 IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n",
2421 IPA_MEM_PART(v6_rt_hash_ofst));
2425 IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst));
2427 IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2428 IPA_MEM_PART(v6_rt_hash_size),
2429 IPA_MEM_PART(v6_rt_hash_size_ddr));
2431 if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) {
2432 IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n",
2433 IPA_MEM_PART(v6_rt_nhash_ofst));
2437 IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n",
2438 IPA_MEM_PART(v6_rt_nhash_ofst));
2440 IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2441 IPA_MEM_PART(v6_rt_nhash_size),
2442 IPA_MEM_PART(v6_rt_nhash_size_ddr));
2444 if (IPA_MEM_PART(modem_hdr_ofst) & 7) {
2445 IPAERR("MODEM HDR OFST 0x%x is unaligned\n",
2446 IPA_MEM_PART(modem_hdr_ofst));
2450 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
2451 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
2453 if (IPA_MEM_PART(apps_hdr_ofst) & 7) {
2454 IPAERR("APPS HDR OFST 0x%x is unaligned\n",
2455 IPA_MEM_PART(apps_hdr_ofst));
2459 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2460 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
2461 IPA_MEM_PART(apps_hdr_size_ddr));
2463 if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) {
2464 IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n",
2465 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2469 IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
2470 IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
2471 IPA_MEM_PART(modem_hdr_proc_ctx_size));
2473 if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) {
2474 IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n",
2475 IPA_MEM_PART(apps_hdr_proc_ctx_ofst));
2479 IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2480 IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
2481 IPA_MEM_PART(apps_hdr_proc_ctx_size),
2482 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
2484 if (IPA_MEM_PART(modem_ofst) & 7) {
2485 IPAERR("MODEM OFST 0x%x is unaligned\n",
2486 IPA_MEM_PART(modem_ofst));
2490 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
2491 IPA_MEM_PART(modem_size));
2493 IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2494 IPA_MEM_PART(apps_v4_flt_hash_ofst),
2495 IPA_MEM_PART(apps_v4_flt_hash_size));
2497 IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2498 IPA_MEM_PART(apps_v4_flt_nhash_ofst),
2499 IPA_MEM_PART(apps_v4_flt_nhash_size));
2501 IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2502 IPA_MEM_PART(apps_v6_flt_hash_ofst),
2503 IPA_MEM_PART(apps_v6_flt_hash_size));
2505 IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2506 IPA_MEM_PART(apps_v6_flt_nhash_ofst),
2507 IPA_MEM_PART(apps_v6_flt_nhash_size));
2509 IPADBG("RAM END OFST 0x%x\n",
2510 IPA_MEM_PART(end_ofst));
2512 IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2513 IPA_MEM_PART(apps_v4_rt_hash_ofst),
2514 IPA_MEM_PART(apps_v4_rt_hash_size));
2516 IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2517 IPA_MEM_PART(apps_v4_rt_nhash_ofst),
2518 IPA_MEM_PART(apps_v4_rt_nhash_size));
2520 IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2521 IPA_MEM_PART(apps_v6_rt_hash_ofst),
2522 IPA_MEM_PART(apps_v6_rt_hash_size));
2524 IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2525 IPA_MEM_PART(apps_v6_rt_nhash_ofst),
2526 IPA_MEM_PART(apps_v6_rt_nhash_size));
2532 * ipa_ctrl_static_bind() - set the appropriate methods for
2533 * IPA Driver based on the HW version
2535 * @ctrl: data structure which holds the function pointers
2536 * @hw_type: the HW type in use
2538 * This function can avoid the runtime assignment by using C99 special
2539 * struct initialization - hard decision... time.vs.mem
2541 int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
2542 enum ipa_hw_type hw_type)
2544 ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
2545 ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
2546 ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
2547 ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
2548 ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
2549 ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
2550 ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
2551 ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
2552 ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
2553 ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
2554 ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0;
2555 ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0;
2556 ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0;
2557 ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v3_0;
2558 ctrl->clock_scaling_bw_threshold_nominal =
2559 IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS;
2560 ctrl->clock_scaling_bw_threshold_turbo =
2561 IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
2562 ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
2563 ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
2564 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
2566 ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
2571 void ipa3_skb_recycle(struct sk_buff *skb)
2573 struct skb_shared_info *shinfo;
2575 shinfo = skb_shinfo(skb);
2576 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
2577 atomic_set(&shinfo->dataref, 1);
2579 memset(skb, 0, offsetof(struct sk_buff, tail));
2580 skb->data = skb->head + NET_SKB_PAD;
2581 skb_reset_tail_pointer(skb);
2584 int ipa3_alloc_rule_id(struct idr *rule_ids)
2586 /* There is two groups of rule-Ids, Modem ones and Apps ones.
2587 * Distinction by high bit: Modem Ids are high bit asserted.
2589 return idr_alloc(rule_ids, NULL,
2590 ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(),
2594 int ipa3_id_alloc(void *ptr)
2598 idr_preload(GFP_KERNEL);
2599 spin_lock(&ipa3_ctx->idr_lock);
2600 id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
2601 spin_unlock(&ipa3_ctx->idr_lock);
2607 void *ipa3_id_find(u32 id)
2611 spin_lock(&ipa3_ctx->idr_lock);
2612 ptr = idr_find(&ipa3_ctx->ipa_idr, id);
2613 spin_unlock(&ipa3_ctx->idr_lock);
2618 void ipa3_id_remove(u32 id)
2620 spin_lock(&ipa3_ctx->idr_lock);
2621 idr_remove(&ipa3_ctx->ipa_idr, id);
2622 spin_unlock(&ipa3_ctx->idr_lock);
2625 void ipa3_tag_destroy_imm(void *user1, int user2)
2627 ipahal_destroy_imm_cmd(user1);
2630 static void ipa3_tag_free_skb(void *user1, int user2)
2632 dev_kfree_skb_any((struct sk_buff *)user1);
2635 #define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
2637 /* ipa3_tag_process() - Initiates a tag process. Incorporates the input
2640 * @desc: descriptors with commands for IC
2641 * @desc_size: amount of descriptors in the above variable
2643 * Note: The descriptors are copied (if there's room), the client needs to
2644 * free his descriptors afterwards
2646 * Return: 0 or negative in case of failure
2648 int ipa3_tag_process(struct ipa3_desc desc[],
2650 unsigned long timeout)
2652 struct ipa3_sys_context *sys;
2653 struct ipa3_desc *tag_desc;
2655 struct ipahal_imm_cmd_ip_packet_init pktinit_cmd;
2656 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2657 struct ipahal_imm_cmd_ip_packet_tag_status status;
2659 struct sk_buff *dummy_skb;
2661 struct ipa3_tag_completion *comp;
2664 /* Not enough room for the required descriptors for the tag process */
2665 if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
2666 IPAERR("up to %d descriptors are allowed (received %d)\n",
2667 IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
2672 ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
2674 IPAERR("Client %u is not mapped\n",
2675 IPA_CLIENT_APPS_CMD_PROD);
2678 sys = ipa3_ctx->ep[ep_idx].sys;
2680 tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
2682 IPAERR("failed to allocate memory\n");
2686 /* Copy the required descriptors from the client now */
2688 memcpy(&(tag_desc[0]), desc, descs_num *
2689 sizeof(tag_desc[0]));
2690 desc_idx += descs_num;
2693 /* NO-OP IC for ensuring that IPA pipeline is empty */
2694 cmd_pyld = ipahal_construct_nop_imm_cmd(
2695 false, IPAHAL_FULL_PIPELINE_CLEAR, false);
2697 IPAERR("failed to construct NOP imm cmd\n");
2699 goto fail_free_tag_desc;
2701 tag_desc[desc_idx].opcode =
2702 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
2703 tag_desc[desc_idx].pyld = cmd_pyld->data;
2704 tag_desc[desc_idx].len = cmd_pyld->len;
2705 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
2706 tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
2707 tag_desc[desc_idx].user1 = cmd_pyld;
2710 /* IP_PACKET_INIT IC for tag status to be sent to apps */
2711 pktinit_cmd.destination_pipe_index =
2712 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
2713 cmd_pyld = ipahal_construct_imm_cmd(
2714 IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
2716 IPAERR("failed to construct ip_packet_init imm cmd\n");
2718 goto fail_free_desc;
2720 tag_desc[desc_idx].opcode =
2721 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
2722 tag_desc[desc_idx].pyld = cmd_pyld->data;
2723 tag_desc[desc_idx].len = cmd_pyld->len;
2724 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
2725 tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
2726 tag_desc[desc_idx].user1 = cmd_pyld;
2730 status.tag = IPA_COOKIE;
2731 cmd_pyld = ipahal_construct_imm_cmd(
2732 IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false);
2734 IPAERR("failed to construct ip_packet_tag_status imm cmd\n");
2736 goto fail_free_desc;
2738 tag_desc[desc_idx].opcode =
2739 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
2740 tag_desc[desc_idx].pyld = cmd_pyld->data;
2741 tag_desc[desc_idx].len = cmd_pyld->len;
2742 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
2743 tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
2744 tag_desc[desc_idx].user1 = cmd_pyld;
2747 comp = kzalloc(sizeof(*comp), GFP_KERNEL);
2751 goto fail_free_desc;
2753 init_completion(&comp->comp);
2755 /* completion needs to be released from both here and rx handler */
2756 atomic_set(&comp->cnt, 2);
2758 /* dummy packet to send to IPA. packet payload is a completion object */
2759 dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
2761 IPAERR("failed to allocate memory\n");
2763 goto fail_free_comp;
2766 memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
2768 tag_desc[desc_idx].pyld = dummy_skb->data;
2769 tag_desc[desc_idx].len = dummy_skb->len;
2770 tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
2771 tag_desc[desc_idx].callback = ipa3_tag_free_skb;
2772 tag_desc[desc_idx].user1 = dummy_skb;
2775 /* send all descriptors to IPA with single EOT */
2776 res = ipa3_send(sys, desc_idx, tag_desc, true);
2778 IPAERR("failed to send TAG packets %d\n", res);
2780 goto fail_free_comp;
2785 IPADBG("waiting for TAG response\n");
2786 res = wait_for_completion_timeout(&comp->comp, timeout);
2788 IPAERR("timeout (%lu msec) on waiting for TAG response\n",
2791 if (atomic_dec_return(&comp->cnt) == 0)
2796 IPADBG("TAG response arrived!\n");
2797 if (atomic_dec_return(&comp->cnt) == 0)
2800 /* sleep for short period to ensure IPA wrote all packets to BAM */
2801 usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
2809 * Free only the first descriptors allocated here.
2810 * [nop, pkt_init, status, dummy_skb]
2811 * The user is responsible to free his allocations
2812 * in case of failure.
2813 * The min is required because we may fail during
2814 * of the initial allocations above
2817 i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++)
2818 if (tag_desc[i].callback)
2819 tag_desc[i].callback(tag_desc[i].user1,
2827 * ipa3_tag_generate_force_close_desc() - generate descriptors for force close
2830 * @desc: descriptors for IC
2831 * @desc_size: desc array size
2832 * @start_pipe: first pipe to close aggregation
2833 * @end_pipe: last (non-inclusive) pipe to close aggregation
2835 * Return: number of descriptors written or negative in case of failure
2837 static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
2838 int desc_size, int start_pipe, int end_pipe)
2841 struct ipa_ep_cfg_aggr ep_aggr;
2844 struct ipahal_imm_cmd_register_write reg_write_agg_close;
2845 struct ipahal_imm_cmd_pyld *cmd_pyld;
2846 struct ipahal_reg_valmask valmask;
2848 for (i = start_pipe; i < end_pipe; i++) {
2849 ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr);
2850 if (!ep_aggr.aggr_en)
2852 IPADBG("Force close ep: %d\n", i);
2853 if (desc_idx + 1 > desc_size) {
2854 IPAERR("Internal error - no descriptors\n");
2859 reg_write_agg_close.skip_pipeline_clear = false;
2860 reg_write_agg_close.pipeline_clear_options =
2861 IPAHAL_FULL_PIPELINE_CLEAR;
2862 reg_write_agg_close.offset =
2863 ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
2864 ipahal_get_aggr_force_close_valmask(i, &valmask);
2865 reg_write_agg_close.value = valmask.val;
2866 reg_write_agg_close.value_mask = valmask.mask;
2867 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2868 ®_write_agg_close, false);
2870 IPAERR("failed to construct register_write imm cmd\n");
2872 goto fail_alloc_reg_write_agg_close;
2875 desc[desc_idx].opcode =
2876 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
2877 desc[desc_idx].pyld = cmd_pyld->data;
2878 desc[desc_idx].len = cmd_pyld->len;
2879 desc[desc_idx].type = IPA_IMM_CMD_DESC;
2880 desc[desc_idx].callback = ipa3_tag_destroy_imm;
2881 desc[desc_idx].user1 = cmd_pyld;
2887 fail_alloc_reg_write_agg_close:
2888 for (i = 0; i < desc_idx; i++)
2889 if (desc[desc_idx].callback)
2890 desc[desc_idx].callback(desc[desc_idx].user1,
2891 desc[desc_idx].user2);
2897 * ipa3_tag_aggr_force_close() - Force close aggregation
2899 * @pipe_num: pipe number or -1 for all pipes
2901 int ipa3_tag_aggr_force_close(int pipe_num)
2903 struct ipa3_desc *desc;
2910 if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) {
2911 IPAERR("Invalid pipe number %d\n", pipe_num);
2915 if (pipe_num == -1) {
2917 end_pipe = ipa3_ctx->ipa_num_pipes;
2919 start_pipe = pipe_num;
2920 end_pipe = pipe_num + 1;
2923 num_descs = end_pipe - start_pipe;
2925 desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
2931 /* Force close aggregation on all valid pipes with aggregation */
2932 num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs,
2933 start_pipe, end_pipe);
2934 if (num_aggr_descs < 0) {
2935 IPAERR("ipa3_tag_generate_force_close_desc failed %d\n",
2937 goto fail_free_desc;
2940 res = ipa3_tag_process(desc, num_aggr_descs,
2941 IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
2950 * ipa3_is_ready() - check if IPA module was initialized
2953 * Return value: true for yes; false for no
2955 bool ipa3_is_ready(void)
2959 if (ipa3_ctx == NULL)
2961 mutex_lock(&ipa3_ctx->lock);
2962 complete = ipa3_ctx->ipa_initialization_complete;
2963 mutex_unlock(&ipa3_ctx->lock);
2968 * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle
2970 * Return value: true for yes; false for no
2972 bool ipa3_is_client_handle_valid(u32 clnt_hdl)
2974 if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes)
2980 * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote
2982 * Return value: none
2984 void ipa3_proxy_clk_unvote(void)
2986 if (!ipa3_is_ready())
2989 mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
2990 if (ipa3_ctx->q6_proxy_clk_vote_valid) {
2991 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
2992 ipa3_ctx->q6_proxy_clk_vote_cnt--;
2993 if (ipa3_ctx->q6_proxy_clk_vote_cnt == 0)
2994 ipa3_ctx->q6_proxy_clk_vote_valid = false;
2996 mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
3000 * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote
3002 * Return value: none
3004 void ipa3_proxy_clk_vote(void)
3006 if (!ipa3_is_ready())
3009 mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
3010 if (!ipa3_ctx->q6_proxy_clk_vote_valid ||
3011 (ipa3_ctx->q6_proxy_clk_vote_cnt > 0)) {
3012 IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
3013 ipa3_ctx->q6_proxy_clk_vote_cnt++;
3014 ipa3_ctx->q6_proxy_clk_vote_valid = true;
3016 mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
3020 * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes
3022 * Return value: u16 - number of IPA smem restricted bytes
3024 u16 ipa3_get_smem_restr_bytes(void)
3027 return ipa3_ctx->smem_restricted_bytes;
3029 IPAERR("IPA Driver not initialized\n");
3035 * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt
3037 * Return value: true if modem configures embedded pipe flt, false otherwise
3039 bool ipa3_get_modem_cfg_emb_pipe_flt(void)
3042 return ipa3_ctx->modem_cfg_emb_pipe_flt;
3044 IPAERR("IPA driver has not been initialized\n");
3050 * ipa3_get_transport_type()- Return ipa3_ctx->transport_prototype
3052 * Return value: enum ipa_transport_type
3054 enum ipa_transport_type ipa3_get_transport_type(void)
3057 return ipa3_ctx->transport_prototype;
3059 IPAERR("IPA driver has not been initialized\n");
3060 return IPA_TRANSPORT_TYPE_GSI;
3063 u32 ipa3_get_num_pipes(void)
3065 return ipahal_read_reg(IPA_ENABLED_PIPES);
3069 * ipa3_disable_apps_wan_cons_deaggr()- set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
3071 * Return value: 0 or negative in case of failure
3073 int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
3078 /* checking if IPA-HW can support */
3079 limit = ipahal_aggr_get_max_byte_limit();
3080 if ((agg_size >> 10) > limit) {
3081 IPAERR("IPA-AGG byte limit %d\n", limit);
3082 IPAERR("exceed aggr_byte_limit\n");
3085 limit = ipahal_aggr_get_max_pkt_limit();
3086 if (agg_count > limit) {
3087 IPAERR("IPA-AGG pkt limit %d\n", limit);
3088 IPAERR("exceed aggr_pkt_limit\n");
3093 ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true;
3099 static void *ipa3_get_ipc_logbuf(void)
3102 return ipa3_ctx->logbuf;
3107 static void *ipa3_get_ipc_logbuf_low(void)
3110 return ipa3_ctx->logbuf_low;
3115 static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
3117 *holb = ipa3_ctx->ep[ep_idx].holb;
3120 static void ipa3_set_tag_process_before_gating(bool val)
3122 ipa3_ctx->tag_process_before_gating = val;
3125 int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
3126 struct ipa_api_controller *api_ctrl)
3128 if (ipa_hw_type < IPA_HW_v3_0) {
3129 IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
3134 api_ctrl->ipa_connect = ipa3_connect;
3135 api_ctrl->ipa_disconnect = ipa3_disconnect;
3136 api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
3137 api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
3138 api_ctrl->ipa_disable_endpoint = NULL;
3139 api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
3140 api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
3141 api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
3142 api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext;
3143 api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode;
3144 api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr;
3145 api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr;
3146 api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route;
3147 api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb;
3148 api_ctrl->ipa_get_holb = ipa3_get_holb;
3149 api_ctrl->ipa_set_tag_process_before_gating =
3150 ipa3_set_tag_process_before_gating;
3151 api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg;
3152 api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask;
3153 api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
3154 api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
3155 api_ctrl->ipa_add_hdr = ipa3_add_hdr;
3156 api_ctrl->ipa_add_hdr_usr = ipa3_add_hdr_usr;
3157 api_ctrl->ipa_del_hdr = ipa3_del_hdr;
3158 api_ctrl->ipa_commit_hdr = ipa3_commit_hdr;
3159 api_ctrl->ipa_reset_hdr = ipa3_reset_hdr;
3160 api_ctrl->ipa_get_hdr = ipa3_get_hdr;
3161 api_ctrl->ipa_put_hdr = ipa3_put_hdr;
3162 api_ctrl->ipa_copy_hdr = ipa3_copy_hdr;
3163 api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
3164 api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
3165 api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
3166 api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr;
3167 api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
3168 api_ctrl->ipa_commit_rt = ipa3_commit_rt;
3169 api_ctrl->ipa_reset_rt = ipa3_reset_rt;
3170 api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl;
3171 api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl;
3172 api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
3173 api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
3174 api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
3175 api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr;
3176 api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
3177 api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
3178 api_ctrl->ipa_commit_flt = ipa3_commit_flt;
3179 api_ctrl->ipa_reset_flt = ipa3_reset_flt;
3180 api_ctrl->allocate_nat_device = ipa3_allocate_nat_device;
3181 api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd;
3182 api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd;
3183 api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd;
3184 api_ctrl->ipa_send_msg = ipa3_send_msg;
3185 api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg;
3186 api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg;
3187 api_ctrl->ipa_register_intf = ipa3_register_intf;
3188 api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext;
3189 api_ctrl->ipa_deregister_intf = ipa3_deregister_intf;
3190 api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode;
3191 api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig;
3192 api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim;
3193 api_ctrl->ipa_tx_dp = ipa3_tx_dp;
3194 api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul;
3195 api_ctrl->ipa_free_skb = ipa3_free_skb;
3196 api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe;
3197 api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe;
3198 api_ctrl->ipa_sys_setup = ipa3_sys_setup;
3199 api_ctrl->ipa_sys_teardown = ipa3_sys_teardown;
3200 api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls;
3201 api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe;
3202 api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe;
3203 api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe;
3204 api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe;
3205 api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
3206 api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
3207 api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
3208 api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
3209 api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
3210 ipa3_broadcast_wdi_quota_reach_ind;
3211 api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
3212 api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
3213 api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
3214 api_ctrl->teth_bridge_init = ipa3_teth_bridge_init;
3215 api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect;
3216 api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect;
3217 api_ctrl->ipa_set_client = ipa3_set_client;
3218 api_ctrl->ipa_get_client = ipa3_get_client;
3219 api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink;
3220 api_ctrl->ipa_dma_init = ipa3_dma_init;
3221 api_ctrl->ipa_dma_enable = ipa3_dma_enable;
3222 api_ctrl->ipa_dma_disable = ipa3_dma_disable;
3223 api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy;
3224 api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
3225 api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy;
3226 api_ctrl->ipa_dma_destroy = ipa3_dma_destroy;
3227 api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine;
3228 api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe;
3229 api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe;
3230 api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel;
3231 api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel;
3232 api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
3233 ipa3_qmi_enable_force_clear_datapath_send;
3234 api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
3235 ipa3_qmi_disable_force_clear_datapath_send;
3236 api_ctrl->ipa_mhi_reset_channel_internal =
3237 ipa3_mhi_reset_channel_internal;
3238 api_ctrl->ipa_mhi_start_channel_internal =
3239 ipa3_mhi_start_channel_internal;
3240 api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info;
3241 api_ctrl->ipa_mhi_resume_channels_internal =
3242 ipa3_mhi_resume_channels_internal;
3243 api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame;
3244 api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel;
3245 api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
3246 ipa3_uc_mhi_send_dl_ul_sync_info;
3247 api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init;
3248 api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel;
3249 api_ctrl->ipa_uc_mhi_stop_event_update_channel =
3250 ipa3_uc_mhi_stop_event_update_channel;
3251 api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup;
3252 api_ctrl->ipa_uc_state_check = ipa3_uc_state_check;
3253 api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id;
3254 api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
3255 api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
3256 api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
3257 api_ctrl->ipa_bam_reg_dump = ipa3_bam_reg_dump;
3258 api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping;
3259 api_ctrl->ipa_is_ready = ipa3_is_ready;
3260 api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote;
3261 api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
3262 api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
3263 api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
3264 api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep;
3265 api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
3266 ipa3_get_modem_cfg_emb_pipe_flt;
3267 api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
3268 api_ctrl->ipa_ap_suspend = ipa3_ap_suspend;
3269 api_ctrl->ipa_ap_resume = ipa3_ap_resume;
3270 api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain;
3271 api_ctrl->ipa_disable_apps_wan_cons_deaggr =
3272 ipa3_disable_apps_wan_cons_deaggr;
3273 api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev;
3274 api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping;
3275 api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
3276 api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
3277 api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
3278 api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
3279 api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
3280 api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
3281 api_ctrl->ipa_inc_client_enable_clks_no_block =
3282 ipa3_inc_client_enable_clks_no_block;
3283 api_ctrl->ipa_suspend_resource_no_block =
3284 ipa3_suspend_resource_no_block;
3285 api_ctrl->ipa_resume_resource = ipa3_resume_resource;
3286 api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync;
3287 api_ctrl->ipa_set_required_perf_profile =
3288 ipa3_set_required_perf_profile;
3289 api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
3290 api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
3291 api_ctrl->ipa_rx_poll = ipa3_rx_poll;
3292 api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
3293 api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
3294 api_ctrl->ipa_tear_down_uc_offload_pipes =
3295 ipa3_tear_down_uc_offload_pipes;
3296 api_ctrl->ipa_get_pdev = ipa3_get_pdev;
3297 api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB;
3298 api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB;
3304 * ipa_is_modem_pipe()- Checks if pipe is owned by the modem
3306 * @pipe_idx: pipe number
3307 * Return value: true if owned by modem, false otherwize
3309 bool ipa_is_modem_pipe(int pipe_idx)
3313 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
3314 IPAERR("Bad pipe index!\n");
3318 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
3319 if (!IPA_CLIENT_IS_Q6_CONS(client_idx) &&
3320 !IPA_CLIENT_IS_Q6_PROD(client_idx))
3322 if (ipa3_get_ep_mapping(client_idx) == pipe_idx)
3329 static void ipa3_write_rsrc_grp_type_reg(int group_index,
3330 enum ipa_rsrc_grp_type_src n, bool src,
3331 struct ipahal_reg_rsrc_grp_cfg *val) {
3334 switch (group_index) {
3337 ipahal_write_reg_n_fields(
3338 IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
3341 case IPA_GROUP_DIAG:
3343 ipahal_write_reg_n_fields(
3344 IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
3347 case IPA_GROUP_Q6ZIP:
3348 case IPA_GROUP_UC_RX_Q:
3349 ipahal_write_reg_n_fields(
3350 IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
3355 " Invalid source resource group,index #%d\n",
3360 switch (group_index) {
3363 ipahal_write_reg_n_fields(
3364 IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
3367 case IPA_GROUP_DIAG:
3369 ipahal_write_reg_n_fields(
3370 IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
3373 case IPA_GROUP_Q6ZIP_GENERAL:
3374 case IPA_GROUP_Q6ZIP_ENGINE:
3375 ipahal_write_reg_n_fields(
3376 IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
3381 " Invalid destination resource group,index #%d\n",
3388 static void ipa3_configure_rx_hps_clients(int depth, bool min)
3391 struct ipahal_reg_rx_hps_clients val;
3394 * depth 0 contains 4 first clients out of 6
3395 * depth 1 contains 2 last clients out of 6
3397 for (i = 0 ; i < (depth ? 2 : 4) ; i++) {
3399 val.client_minmax[i] =
3400 ipa3_rsrc_rx_grp_config
3401 [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
3402 [!depth ? i : 4 + i].min;
3404 val.client_minmax[i] =
3405 ipa3_rsrc_rx_grp_config
3406 [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
3407 [!depth ? i : 4 + i].max;
3410 ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
3411 IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
3414 ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
3415 IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
3420 void ipa3_set_resorce_groups_min_max_limits(void)
3424 struct ipahal_reg_rsrc_grp_cfg val;
3427 IPADBG("Assign source rsrc groups min-max limits\n");
3429 for (i = 0; i < IPA_RSRC_GRP_TYPE_SRC_MAX; i++) {
3430 for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
3431 val.x_min = ipa3_rsrc_src_grp_config[i][j].min;
3432 val.x_max = ipa3_rsrc_src_grp_config[i][j].max;
3433 val.y_min = ipa3_rsrc_src_grp_config[i][j + 1].min;
3434 val.y_max = ipa3_rsrc_src_grp_config[i][j + 1].max;
3435 ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
3439 IPADBG("Assign destination rsrc groups min-max limits\n");
3441 for (i = 0; i < IPA_RSRC_GRP_TYPE_DST_MAX; i++) {
3442 for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
3443 val.x_min = ipa3_rsrc_dst_grp_config[i][j].min;
3444 val.x_max = ipa3_rsrc_dst_grp_config[i][j].max;
3445 val.y_min = ipa3_rsrc_dst_grp_config[i][j + 1].min;
3446 val.y_max = ipa3_rsrc_dst_grp_config[i][j + 1].max;
3447 ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
3451 /* move resource group configuration from HLOS to TZ */
3452 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
3453 IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
3457 IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
3459 ipa3_configure_rx_hps_clients(0, true);
3460 ipa3_configure_rx_hps_clients(1, true);
3461 ipa3_configure_rx_hps_clients(0, false);
3462 ipa3_configure_rx_hps_clients(1, false);
3467 static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
3471 IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
3472 gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
3473 gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
3475 IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
3476 /* queue a work to start polling if don't have one */
3477 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
3478 if (!atomic_read(&ep->sys->curr_polling_state)) {
3479 ipa3_inc_acquire_wakelock();
3480 atomic_set(&ep->sys->curr_polling_state, 1);
3481 queue_work(ep->sys->wq, &ep->sys->work);
3486 void ipa3_suspend_apps_pipes(bool suspend)
3488 struct ipa_ep_cfg_ctrl cfg;
3490 struct ipa3_ep_context *ep;
3492 memset(&cfg, 0, sizeof(cfg));
3493 cfg.ipa_ep_suspend = suspend;
3495 ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
3496 if (ipa_ep_idx < 0) {
3497 IPAERR("IPA client mapping failed\n");
3501 ep = &ipa3_ctx->ep[ipa_ep_idx];
3503 IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
3505 ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
3507 ipa3_gsi_poll_after_suspend(ep);
3508 else if (!atomic_read(&ep->sys->curr_polling_state))
3509 gsi_config_channel_mode(ep->gsi_chan_hdl,
3510 GSI_CHAN_MODE_CALLBACK);
3513 ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
3514 /* Considering the case for SSR. */
3515 if (ipa_ep_idx == -1) {
3516 IPADBG("Invalid client.\n");
3519 ep = &ipa3_ctx->ep[ipa_ep_idx];
3521 IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
3523 ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
3525 ipa3_gsi_poll_after_suspend(ep);
3526 else if (!atomic_read(&ep->sys->curr_polling_state))
3527 gsi_config_channel_mode(ep->gsi_chan_hdl,
3528 GSI_CHAN_MODE_CALLBACK);
3532 int ipa3_allocate_dma_task_for_gsi(void)
3534 struct ipahal_imm_cmd_dma_task_32b_addr cmd = { 0 };
3536 IPADBG("Allocate mem\n");
3537 ipa3_ctx->dma_task_info.mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
3538 ipa3_ctx->dma_task_info.mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
3539 ipa3_ctx->dma_task_info.mem.size,
3540 &ipa3_ctx->dma_task_info.mem.phys_base,
3542 if (!ipa3_ctx->dma_task_info.mem.base) {
3548 cmd.size1 = ipa3_ctx->dma_task_info.mem.size;
3549 cmd.addr1 = ipa3_ctx->dma_task_info.mem.phys_base;
3550 cmd.packet_size = ipa3_ctx->dma_task_info.mem.size;
3551 ipa3_ctx->dma_task_info.cmd_pyld = ipahal_construct_imm_cmd(
3552 IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
3553 if (!ipa3_ctx->dma_task_info.cmd_pyld) {
3554 IPAERR("failed to construct dma_task_32b_addr cmd\n");
3555 dma_free_coherent(ipa3_ctx->pdev,
3556 ipa3_ctx->dma_task_info.mem.size,
3557 ipa3_ctx->dma_task_info.mem.base,
3558 ipa3_ctx->dma_task_info.mem.phys_base);
3559 memset(&ipa3_ctx->dma_task_info, 0,
3560 sizeof(ipa3_ctx->dma_task_info));
3567 void ipa3_free_dma_task_for_gsi(void)
3569 dma_free_coherent(ipa3_ctx->pdev,
3570 ipa3_ctx->dma_task_info.mem.size,
3571 ipa3_ctx->dma_task_info.mem.base,
3572 ipa3_ctx->dma_task_info.mem.phys_base);
3573 ipahal_destroy_imm_cmd(ipa3_ctx->dma_task_info.cmd_pyld);
3574 memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info));
3578 * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
3580 * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG.
3581 * Return value: 0 on success, negative otherwise
3583 int ipa3_inject_dma_task_for_gsi(void)
3585 struct ipa3_desc desc = {0};
3587 desc.opcode = ipahal_imm_cmd_get_opcode_param(
3588 IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
3589 desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data;
3590 desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len;
3591 desc.type = IPA_IMM_CMD_DESC;
3593 IPADBG("sending 1B packet to IPA\n");
3594 if (ipa3_send_cmd_timeout(1, &desc,
3595 IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
3596 IPAERR("ipa3_send_cmd failed\n");
3604 * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
3605 * @chan_hdl: GSI channel handle
3607 * This function implements the sequence to stop a GSI channel
3608 * in IPA. This function returns when the channel is is STOP state.
3610 * Return value: 0 on success, negative otherwise
3612 int ipa3_stop_gsi_channel(u32 clnt_hdl)
3614 struct ipa_mem_buffer mem;
3617 struct ipa3_ep_context *ep;
3619 if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3620 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3621 IPAERR("bad parm.\n");
3625 ep = &ipa3_ctx->ep[clnt_hdl];
3627 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
3629 memset(&mem, 0, sizeof(mem));
3631 if (IPA_CLIENT_IS_PROD(ep->client)) {
3632 IPADBG("Calling gsi_stop_channel ch:%lu\n",
3634 res = gsi_stop_channel(ep->gsi_chan_hdl);
3635 IPADBG("gsi_stop_channel ch: %lu returned %d\n",
3636 ep->gsi_chan_hdl, res);
3640 for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
3641 IPADBG("Calling gsi_stop_channel ch:%lu\n",
3643 res = gsi_stop_channel(ep->gsi_chan_hdl);
3644 IPADBG("gsi_stop_channel ch: %lu returned %d\n",
3645 ep->gsi_chan_hdl, res);
3646 if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
3649 IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
3650 /* Send a 1B packet DMA_TASK to IPA and try again */
3651 res = ipa3_inject_dma_task_for_gsi();
3653 IPAERR("Failed to inject DMA TASk for GSI\n");
3657 /* sleep for short period to flush IPA */
3658 usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
3659 IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
3662 IPAERR("Failed to stop GSI channel with retries\n");
3665 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
3670 static int ipa3_load_single_fw(const struct firmware *firmware,
3671 const struct elf32_phdr *phdr)
3673 uint32_t *fw_mem_base;
3675 const uint32_t *elf_data_ptr;
3677 if (phdr->p_offset > firmware->size) {
3678 IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
3679 phdr->p_offset, firmware->size);
3682 if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
3683 IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
3684 phdr->p_offset, phdr->p_filesz, firmware->size);
3688 if (phdr->p_memsz % sizeof(uint32_t)) {
3689 IPAERR("FW mem size %u doesn't align to 32bit\n",
3694 if (phdr->p_filesz > phdr->p_memsz) {
3695 IPAERR("FW image too big src_size=%u dst_size=%u\n",
3696 phdr->p_filesz, phdr->p_memsz);
3700 fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
3702 IPAERR("Failed to map 0x%x for the size of %u\n",
3703 phdr->p_vaddr, phdr->p_memsz);
3707 /* Set the entire region to 0s */
3708 memset(fw_mem_base, 0, phdr->p_memsz);
3710 elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
3713 for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
3714 writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
3718 iounmap(fw_mem_base);
3724 * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
3726 * @firmware: Structure which contains the FW data from the user space.
3727 * @gsi_mem_base: GSI base address
3729 * Return value: 0 on success, negative otherwise
3732 int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base)
3734 const struct elf32_hdr *ehdr;
3735 const struct elf32_phdr *phdr;
3736 unsigned long gsi_iram_ofst;
3737 unsigned long gsi_iram_size;
3738 phys_addr_t ipa_reg_mem_base;
3742 if (!gsi_mem_base) {
3743 IPAERR("Invalid GSI base address\n");
3747 ipa_assert_on(!firmware);
3748 /* One program header per FW image: GSI, DPS and HPS */
3749 if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
3750 IPAERR("Missing ELF and Program headers firmware size=%zu\n",
3755 ehdr = (struct elf32_hdr *) firmware->data;
3756 ipa_assert_on(!ehdr);
3757 if (ehdr->e_phnum != 3) {
3758 IPAERR("Unexpected number of ELF program headers\n");
3761 phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
3764 * Each ELF program header represents a FW image and contains:
3765 * p_vaddr : The starting address to which the FW needs to loaded.
3766 * p_memsz : The size of the IRAM (where the image loaded)
3767 * p_filesz: The size of the FW image embedded inside the ELF
3768 * p_offset: Absolute offset to the image from the head of the ELF
3771 /* Load GSI FW image */
3772 gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size);
3773 if (phdr->p_vaddr != (gsi_mem_base + gsi_iram_ofst)) {
3775 "Invalid GSI FW img load addr vaddr=0x%x gsi_mem_base=%pa gsi_iram_ofst=0x%lx\n"
3776 , phdr->p_vaddr, &gsi_mem_base, gsi_iram_ofst);
3779 if (phdr->p_memsz > gsi_iram_size) {
3780 IPAERR("Invalid GSI FW img size memsz=%d gsi_iram_size=%lu\n",
3781 phdr->p_memsz, gsi_iram_size);
3784 rc = ipa3_load_single_fw(firmware, phdr);
3789 ipa_reg_mem_base = ipa3_ctx->ipa_wrapper_base + ipahal_get_reg_base();
3791 /* Load IPA DPS FW image */
3792 ipa_reg_ofst = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST);
3793 if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) {
3795 "Invalid IPA DPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n"
3796 , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst);
3799 if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) {
3800 IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n",
3801 phdr->p_memsz, ipahal_get_dps_img_mem_size());
3804 rc = ipa3_load_single_fw(firmware, phdr);
3810 /* Load IPA HPS FW image */
3811 ipa_reg_ofst = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST);
3812 if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) {
3814 "Invalid IPA HPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n"
3815 , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst);
3818 if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) {
3819 IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n",
3820 phdr->p_memsz, ipahal_get_hps_img_mem_size());
3823 rc = ipa3_load_single_fw(firmware, phdr);
3827 IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
3832 * ipa3_is_msm_device() - Is the running device a MSM or MDM?
3833 * Determine according to IPA version
3835 * Return value: true if MSM, false if MDM
3838 bool ipa3_is_msm_device(void)
3840 switch (ipa3_ctx->ipa_hw_type) {
3848 IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
3856 * ipa3_get_pdev() - return a pointer to IPA dev struct
3858 * Return value: a pointer to IPA dev struct
3861 struct device *ipa3_get_pdev(void)
3866 return ipa3_ctx->pdev;