OSDN Git Service

cnss2: Add support for genoa sdio
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / platform / msm / ipa / ipa_v3 / ipa_utils.c
1 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <net/ip.h>
14 #include <linux/genalloc.h>     /* gen_pool_alloc() */
15 #include <linux/io.h>
16 #include <linux/ratelimit.h>
17 #include <linux/msm-bus.h>
18 #include <linux/msm-bus-board.h>
19 #include <linux/msm_gsi.h>
20 #include <linux/elf.h>
21 #include "ipa_i.h"
22 #include "ipahal/ipahal.h"
23 #include "ipahal/ipahal_fltrt.h"
24 #include "../ipa_rm_i.h"
25
26 #define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
27 #define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
28 #define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
29 #define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
30
31 #define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
32 #define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
33
34 #define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
35 #define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
36
37 /* Max pipes + ICs for TAG process */
38 #define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6)
39
40 #define IPA_TAG_SLEEP_MIN_USEC (1000)
41 #define IPA_TAG_SLEEP_MAX_USEC (2000)
42 #define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
43 #define IPA_BCR_REG_VAL_v3_0 (0x00000001)
44 #define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
45 #define IPA_AGGR_GRAN_MIN (1)
46 #define IPA_AGGR_GRAN_MAX (32)
47 #define IPA_EOT_COAL_GRAN_MIN (1)
48 #define IPA_EOT_COAL_GRAN_MAX (16)
49
50 #define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15)
51
52 #define IPA_AGGR_BYTE_LIMIT (\
53                 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
54                 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
55 #define IPA_AGGR_PKT_LIMIT (\
56                 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
57                 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
58
59 /* In IPAv3 only endpoints 0-3 can be configured to deaggregation */
60 #define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3)
61
62 /* configure IPA spare register 1 in order to have correct IPA version
63  * set bits 0,2,3 and 4. see SpareBits documentation.xlsx
64  */
65 #define IPA_SPARE_REG_1_VAL (0x0000081D)
66
67
68 /* HPS, DPS sequencers Types*/
69 #define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY  0x00000000
70 /* DMA + DECIPHER/CIPHER */
71 #define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011
72 /* Packet Processing + no decipher + uCP (for Ethernet Bridging) */
73 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002
74 /* Packet Processing + decipher + uCP */
75 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013
76 /* 2 Packet Processing pass + no decipher + uCP */
77 #define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004
78 /* 2 Packet Processing pass + decipher + uCP */
79 #define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015
80 /* Packet Processing + no decipher + no uCP */
81 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006
82 /* Packet Processing + no decipher + no uCP */
83 #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017
84 /* COMP/DECOMP */
85 #define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020
86 /* Invalid sequencer type */
87 #define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF
88
89 #define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \
90         (seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \
91         seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \
92         seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP)
93
94 #define QMB_MASTER_SELECT_DDR  (0)
95 #define QMB_MASTER_SELECT_PCIE (1)
96
97 #define IPA_CLIENT_NOT_USED \
98         {IPA_EP_NOT_ALLOCATED, IPA_EP_NOT_ALLOCATED, false, \
99                 IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR}
100
101 /* Resource Group index*/
102 #define IPA_GROUP_UL            (0)
103 #define IPA_GROUP_DL            (1)
104 #define IPA_GROUP_DPL           IPA_GROUP_DL
105 #define IPA_GROUP_DIAG          (2)
106 #define IPA_GROUP_DMA           (3)
107 #define IPA_GROUP_IMM_CMD       IPA_GROUP_UL
108 #define IPA_GROUP_Q6ZIP         (4)
109 #define IPA_GROUP_Q6ZIP_GENERAL IPA_GROUP_Q6ZIP
110 #define IPA_GROUP_UC_RX_Q       (5)
111 #define IPA_GROUP_Q6ZIP_ENGINE  IPA_GROUP_UC_RX_Q
112 #define IPA_GROUP_MAX           (6)
113
114 enum ipa_rsrc_grp_type_src {
115         IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
116         IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
117         IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
118         IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
119         IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
120         IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
121         IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS,
122         IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
123         IPA_RSRC_GRP_TYPE_SRC_MAX,
124 };
125 enum ipa_rsrc_grp_type_dst {
126         IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS,
127         IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
128         IPA_RSRC_GRP_TYPE_DST_DPS_DMARS,
129         IPA_RSRC_GRP_TYPE_DST_MAX,
130 };
131 enum ipa_rsrc_grp_type_rx {
132         IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
133         IPA_RSRC_GRP_TYPE_RX_MAX
134 };
135 struct rsrc_min_max {
136         u32 min;
137         u32 max;
138 };
139
140 static const struct rsrc_min_max ipa3_rsrc_src_grp_config
141                         [IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
142                 /*UL    DL      DIAG    DMA     Not Used        uC Rx*/
143         [IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
144                 {3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
145         [IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
146                 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
147         [IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
148                 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
149         [IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
150                 {14, 14}, {16, 16}, {5, 5}, {5, 5},  {0, 0}, {8, 8} },
151         [IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
152                 {19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
153         [IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
154                 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
155         [IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
156                 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
157         [IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
158                 {14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
159 };
160 static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
161                         [IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
162                 /*UL    DL/DPL  DIAG    DMA  Q6zip_gen Q6zip_eng*/
163         [IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
164                 {2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
165         [IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
166                 {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
167         [IPA_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
168                 {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
169 };
170 static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
171                         [IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
172                 /*UL    DL      DIAG    DMA     Not Used        uC Rx*/
173         [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
174                 {16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
175 };
176
177 enum ipa_ver {
178         IPA_3_0,
179         IPA_VER_MAX,
180 };
181
182 struct ipa_ep_configuration {
183         int pipe_num;
184         int group_num;
185         bool support_flt;
186         int sequencer_type;
187         u8 qmb_master_sel;
188 };
189
190 static const struct ipa_ep_configuration ipa3_ep_mapping
191                                         [IPA_VER_MAX][IPA_CLIENT_MAX] = {
192         [IPA_3_0][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
193         [IPA_3_0][IPA_CLIENT_WLAN1_PROD]          = {10, IPA_GROUP_UL, true,
194                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
195                         QMB_MASTER_SELECT_DDR},
196         [IPA_3_0][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
197         [IPA_3_0][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
198         [IPA_3_0][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
199         [IPA_3_0][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
200         [IPA_3_0][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
201         [IPA_3_0][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
202         [IPA_3_0][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
203         [IPA_3_0][IPA_CLIENT_USB_PROD]            = {1, IPA_GROUP_UL, true,
204                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
205                         QMB_MASTER_SELECT_DDR},
206         [IPA_3_0][IPA_CLIENT_UC_USB_PROD]         = {2, IPA_GROUP_UL, true,
207                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
208                         QMB_MASTER_SELECT_DDR},
209         [IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
210         [IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
211         [IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
212         [IPA_3_0][IPA_CLIENT_APPS_LAN_PROD]
213                         = {14, IPA_GROUP_DL, false,
214                         IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
215                         QMB_MASTER_SELECT_DDR},
216         [IPA_3_0][IPA_CLIENT_APPS_WAN_PROD]
217                         = {3, IPA_GROUP_UL, true,
218                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
219                         QMB_MASTER_SELECT_DDR},
220         [IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]
221                         = {22, IPA_GROUP_IMM_CMD, false,
222                         IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
223                         QMB_MASTER_SELECT_DDR},
224         [IPA_3_0][IPA_CLIENT_ODU_PROD]            = {12, IPA_GROUP_UL, true,
225                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
226                         QMB_MASTER_SELECT_DDR},
227         [IPA_3_0][IPA_CLIENT_MHI_PROD]            = {0, IPA_GROUP_UL, true,
228                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
229                         QMB_MASTER_SELECT_PCIE},
230         [IPA_3_0][IPA_CLIENT_Q6_LAN_PROD]         = {9, IPA_GROUP_UL, false,
231                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
232                         QMB_MASTER_SELECT_DDR},
233         [IPA_3_0][IPA_CLIENT_Q6_WAN_PROD]         = {5, IPA_GROUP_DL,
234                         true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
235                         QMB_MASTER_SELECT_DDR},
236         [IPA_3_0][IPA_CLIENT_Q6_CMD_PROD]
237                         = {6, IPA_GROUP_IMM_CMD, false,
238                         IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
239                         QMB_MASTER_SELECT_DDR},
240         [IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD]      = {7, IPA_GROUP_Q6ZIP,
241                         false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
242                         QMB_MASTER_SELECT_DDR},
243         [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = {8, IPA_GROUP_Q6ZIP,
244                         false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
245                         QMB_MASTER_SELECT_DDR},
246         [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
247                         = {12, IPA_GROUP_DMA, false,
248                         IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
249                         QMB_MASTER_SELECT_PCIE},
250         [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
251                         = {13, IPA_GROUP_DMA, false,
252                         IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
253                         QMB_MASTER_SELECT_PCIE},
254         /* Only for test purpose */
255         [IPA_3_0][IPA_CLIENT_TEST_PROD]           = {1, IPA_GROUP_UL, true,
256                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
257                         QMB_MASTER_SELECT_DDR},
258         [IPA_3_0][IPA_CLIENT_TEST1_PROD]          = {1, IPA_GROUP_UL, true,
259                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
260                         QMB_MASTER_SELECT_DDR},
261         [IPA_3_0][IPA_CLIENT_TEST2_PROD]          = {3, IPA_GROUP_UL, true,
262                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
263                         QMB_MASTER_SELECT_DDR},
264         [IPA_3_0][IPA_CLIENT_TEST3_PROD]          = {12, IPA_GROUP_UL, true,
265                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
266                         QMB_MASTER_SELECT_DDR},
267         [IPA_3_0][IPA_CLIENT_TEST4_PROD]          = {13, IPA_GROUP_UL, true,
268                         IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
269                         QMB_MASTER_SELECT_DDR},
270
271         [IPA_3_0][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
272         [IPA_3_0][IPA_CLIENT_WLAN1_CONS]          = {25, IPA_GROUP_DL, false,
273                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
274                         QMB_MASTER_SELECT_DDR},
275         [IPA_3_0][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
276         [IPA_3_0][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
277         [IPA_3_0][IPA_CLIENT_WLAN2_CONS]          = {27, IPA_GROUP_DL, false,
278                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
279                         QMB_MASTER_SELECT_DDR},
280         [IPA_3_0][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
281         [IPA_3_0][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
282         [IPA_3_0][IPA_CLIENT_WLAN3_CONS]          = {28, IPA_GROUP_DL, false,
283                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
284                         QMB_MASTER_SELECT_DDR},
285         [IPA_3_0][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
286         [IPA_3_0][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
287         [IPA_3_0][IPA_CLIENT_WLAN4_CONS]          = {29, IPA_GROUP_DL, false,
288                         IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
289                         QMB_MASTER_SELECT_DDR},
290         [IPA_3_0][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
291         [IPA_3_0][IPA_CLIENT_USB_CONS]            = {26, IPA_GROUP_DL, false,
292                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
293                         QMB_MASTER_SELECT_DDR},
294         [IPA_3_0][IPA_CLIENT_USB_DPL_CONS]        = {17, IPA_GROUP_DPL, false,
295                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
296                         QMB_MASTER_SELECT_DDR},
297         [IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
298         [IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
299         [IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
300         [IPA_3_0][IPA_CLIENT_APPS_LAN_CONS]       = {15, IPA_GROUP_UL, false,
301                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
302                         QMB_MASTER_SELECT_DDR},
303         [IPA_3_0][IPA_CLIENT_APPS_WAN_CONS]       = {16, IPA_GROUP_DL, false,
304                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
305                         QMB_MASTER_SELECT_DDR},
306         [IPA_3_0][IPA_CLIENT_ODU_EMB_CONS]        = {23, IPA_GROUP_DL, false,
307                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
308                         QMB_MASTER_SELECT_DDR},
309         [IPA_3_0][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
310         [IPA_3_0][IPA_CLIENT_MHI_CONS]            = {23, IPA_GROUP_DL, false,
311                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
312                         QMB_MASTER_SELECT_PCIE},
313         [IPA_3_0][IPA_CLIENT_Q6_LAN_CONS]         = {19, IPA_GROUP_DL, false,
314                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
315                         QMB_MASTER_SELECT_DDR},
316         [IPA_3_0][IPA_CLIENT_Q6_WAN_CONS]         = {18, IPA_GROUP_UL, false,
317                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
318                         QMB_MASTER_SELECT_DDR},
319         [IPA_3_0][IPA_CLIENT_Q6_DUN_CONS]         = {30, IPA_GROUP_DIAG,
320                         false, IPA_DPS_HPS_SEQ_TYPE_INVALID,
321                         QMB_MASTER_SELECT_DDR},
322         [IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS]
323                         = {21, IPA_GROUP_Q6ZIP, false,
324                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
325                         QMB_MASTER_SELECT_DDR},
326         [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS]
327                         = {4, IPA_GROUP_Q6ZIP, false,
328                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
329                         QMB_MASTER_SELECT_DDR},
330         [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
331                         = {28, IPA_GROUP_DMA, false,
332                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
333                         QMB_MASTER_SELECT_PCIE},
334         [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
335                         = {29, IPA_GROUP_DMA, false,
336                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
337                         QMB_MASTER_SELECT_PCIE},
338         [IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
339         /* Only for test purpose */
340         [IPA_3_0][IPA_CLIENT_TEST_CONS]           = {26, IPA_GROUP_DL, false,
341                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
342                         QMB_MASTER_SELECT_DDR},
343         [IPA_3_0][IPA_CLIENT_TEST1_CONS]          = {26, IPA_GROUP_DL, false,
344                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
345                         QMB_MASTER_SELECT_DDR},
346         [IPA_3_0][IPA_CLIENT_TEST2_CONS]          = {27, IPA_GROUP_DL, false,
347                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
348                         QMB_MASTER_SELECT_DDR},
349         [IPA_3_0][IPA_CLIENT_TEST3_CONS]          = {28, IPA_GROUP_DL, false,
350                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
351                         QMB_MASTER_SELECT_DDR},
352         [IPA_3_0][IPA_CLIENT_TEST4_CONS]          = {29, IPA_GROUP_DL, false,
353                         IPA_DPS_HPS_SEQ_TYPE_INVALID,
354                         QMB_MASTER_SELECT_DDR},
355 };
356
357 /* this array include information tuple:
358   {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
359 static struct ipa_gsi_ep_config ipa_gsi_ep_info[] = {
360         {0, 0, 8, 16, 0},
361         {1, 3, 8, 16, 0},
362         {3, 5, 16, 32, 0},
363         {4, 9, 4, 4, 1},
364         {5, 0, 16, 32, 1},
365         {6, 1, 18, 28, 1},
366         {7, 2, 0, 0, 1},
367         {8, 3, 0, 0, 1},
368         {9, 4, 8, 12, 1},
369         {10, 1, 8, 16, 3},
370         {12, 9, 8, 16, 0},
371         {13, 10, 8, 16, 0},
372         {14, 11, 8, 16, 0},
373         {15, 7, 8, 12, 0},
374         {16, 8, 8, 12, 0},
375         {17, 2, 8, 12, 0},
376         {18, 5, 8, 12, 1},
377         {19, 6, 8, 12, 1},
378         {21, 8, 4, 4, 1},
379         {22, 6, 18, 28, 0},
380         {23, 1, 8, 8, 0},
381         {25, 4, 8, 8, 3},
382         {26, 12, 8, 8, 0},
383         {27, 4, 8, 8, 0},
384         {28, 13, 8, 8, 0},
385         {29, 14, 8, 8, 0},
386         {30, 7, 4, 4, 1},
387         {-1, -1, -1, -1, -1}
388 };
389
390 static struct msm_bus_vectors ipa_init_vectors_v3_0[]  = {
391         {
392                 .src = MSM_BUS_MASTER_IPA,
393                 .dst = MSM_BUS_SLAVE_EBI_CH0,
394                 .ab = 0,
395                 .ib = 0,
396         },
397         {
398                 .src = MSM_BUS_MASTER_IPA,
399                 .dst = MSM_BUS_SLAVE_OCIMEM,
400                 .ab = 0,
401                 .ib = 0,
402         },
403 };
404
405 static struct msm_bus_vectors ipa_nominal_perf_vectors_v3_0[]  = {
406         {
407                 .src = MSM_BUS_MASTER_IPA,
408                 .dst = MSM_BUS_SLAVE_EBI_CH0,
409                 .ab = 100000000,
410                 .ib = 1300000000,
411         },
412         {
413                 .src = MSM_BUS_MASTER_IPA,
414                 .dst = MSM_BUS_SLAVE_OCIMEM,
415                 .ab = 100000000,
416                 .ib = 1300000000,
417         },
418 };
419
420 static struct msm_bus_paths ipa_usecases_v3_0[]  = {
421         {
422                 ARRAY_SIZE(ipa_init_vectors_v3_0),
423                 ipa_init_vectors_v3_0,
424         },
425         {
426                 ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0),
427                 ipa_nominal_perf_vectors_v3_0,
428         },
429 };
430
431 static struct msm_bus_scale_pdata ipa_bus_client_pdata_v3_0 = {
432         ipa_usecases_v3_0,
433         ARRAY_SIZE(ipa_usecases_v3_0),
434         .name = "ipa",
435 };
436
437 void ipa3_active_clients_lock(void)
438 {
439         unsigned long flags;
440
441         mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
442         spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
443         ipa3_ctx->ipa3_active_clients.mutex_locked = true;
444         spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
445 }
446
447 int ipa3_active_clients_trylock(unsigned long *flags)
448 {
449         spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
450         if (ipa3_ctx->ipa3_active_clients.mutex_locked) {
451                 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock,
452                                          *flags);
453                 return 0;
454         }
455
456         return 1;
457 }
458
459 void ipa3_active_clients_trylock_unlock(unsigned long *flags)
460 {
461         spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
462 }
463
464 void ipa3_active_clients_unlock(void)
465 {
466         unsigned long flags;
467
468         spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
469         ipa3_ctx->ipa3_active_clients.mutex_locked = false;
470         spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
471         mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
472 }
473
474 /**
475  * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
476  * IPA_RM resource
477  *
478  * @resource: [IN] IPA Resource Manager resource
479  * @clients: [OUT] Empty array which will contain the list of clients. The
480  *         caller must initialize this array.
481  *
482  * Return codes: 0 on success, negative on failure.
483  */
484 int ipa3_get_clients_from_rm_resource(
485         enum ipa_rm_resource_name resource,
486         struct ipa3_client_names *clients)
487 {
488         int i = 0;
489
490         if (resource < 0 ||
491             resource >= IPA_RM_RESOURCE_MAX ||
492             !clients) {
493                 IPAERR("Bad parameters\n");
494                 return -EINVAL;
495         }
496
497         switch (resource) {
498         case IPA_RM_RESOURCE_USB_CONS:
499                 clients->names[i++] = IPA_CLIENT_USB_CONS;
500                 break;
501         case IPA_RM_RESOURCE_USB_DPL_CONS:
502                 clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
503                 break;
504         case IPA_RM_RESOURCE_HSIC_CONS:
505                 clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
506                 break;
507         case IPA_RM_RESOURCE_WLAN_CONS:
508                 clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
509                 clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
510                 clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
511                 clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
512                 break;
513         case IPA_RM_RESOURCE_MHI_CONS:
514                 clients->names[i++] = IPA_CLIENT_MHI_CONS;
515                 break;
516         case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
517                 clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
518                 clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
519                 break;
520         case IPA_RM_RESOURCE_USB_PROD:
521                 clients->names[i++] = IPA_CLIENT_USB_PROD;
522                 break;
523         case IPA_RM_RESOURCE_HSIC_PROD:
524                 clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
525                 break;
526         case IPA_RM_RESOURCE_MHI_PROD:
527                 clients->names[i++] = IPA_CLIENT_MHI_PROD;
528                 break;
529         case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
530                 clients->names[i++] = IPA_CLIENT_ODU_PROD;
531         default:
532                 break;
533         }
534         clients->length = i;
535
536         return 0;
537 }
538
539 /**
540  * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should
541  * be suspended during a power save scenario. False otherwise.
542  *
543  * @client: [IN] IPA client
544  */
545 bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
546 {
547         struct ipa3_ep_context *ep;
548         int ipa_ep_idx;
549
550         ipa_ep_idx = ipa3_get_ep_mapping(client);
551         if (ipa_ep_idx == -1) {
552                 IPAERR("Invalid client.\n");
553                 WARN_ON(1);
554                 return false;
555         }
556
557         ep = &ipa3_ctx->ep[ipa_ep_idx];
558
559         if (ep->keep_ipa_awake)
560                 return false;
561
562         if (client == IPA_CLIENT_USB_CONS     ||
563             client == IPA_CLIENT_USB_DPL_CONS ||
564             client == IPA_CLIENT_MHI_CONS     ||
565             client == IPA_CLIENT_HSIC1_CONS   ||
566             client == IPA_CLIENT_WLAN1_CONS   ||
567             client == IPA_CLIENT_WLAN2_CONS   ||
568             client == IPA_CLIENT_WLAN3_CONS   ||
569             client == IPA_CLIENT_WLAN4_CONS   ||
570             client == IPA_CLIENT_ODU_EMB_CONS ||
571             client == IPA_CLIENT_ODU_TETH_CONS)
572                 return true;
573
574         return false;
575 }
576
577 /**
578  * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
579  * resource and decrement active clients counter, which may result in clock
580  * gating of IPA clocks.
581  *
582  * @resource: [IN] IPA Resource Manager resource
583  *
584  * Return codes: 0 on success, negative on failure.
585  */
586 int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
587 {
588         struct ipa3_client_names clients;
589         int res;
590         int index;
591         struct ipa_ep_cfg_ctrl suspend;
592         enum ipa_client_type client;
593         int ipa_ep_idx;
594         bool pipe_suspended = false;
595
596         memset(&clients, 0, sizeof(clients));
597         res = ipa3_get_clients_from_rm_resource(resource, &clients);
598         if (res) {
599                 IPAERR("Bad params.\n");
600                 return res;
601         }
602
603         for (index = 0; index < clients.length; index++) {
604                 client = clients.names[index];
605                 ipa_ep_idx = ipa3_get_ep_mapping(client);
606                 if (ipa_ep_idx == -1) {
607                         IPAERR("Invalid client.\n");
608                         res = -EINVAL;
609                         continue;
610                 }
611                 ipa3_ctx->resume_on_connect[client] = false;
612                 if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
613                     ipa3_should_pipe_be_suspended(client)) {
614                         if (ipa3_ctx->ep[ipa_ep_idx].valid) {
615                                 /* suspend endpoint */
616                                 memset(&suspend, 0, sizeof(suspend));
617                                 suspend.ipa_ep_suspend = true;
618                                 ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
619                                 pipe_suspended = true;
620                         }
621                 }
622         }
623         /* Sleep ~1 msec */
624         if (pipe_suspended)
625                 usleep_range(1000, 2000);
626
627         /* before gating IPA clocks do TAG process */
628         ipa3_ctx->tag_process_before_gating = true;
629         IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
630
631         return 0;
632 }
633
634 /**
635  * ipa3_suspend_resource_no_block() - suspend client endpoints related to the
636  * IPA_RM resource and decrement active clients counter. This function is
637  * guaranteed to avoid sleeping.
638  *
639  * @resource: [IN] IPA Resource Manager resource
640  *
641  * Return codes: 0 on success, negative on failure.
642  */
643 int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
644 {
645         int res;
646         struct ipa3_client_names clients;
647         int index;
648         enum ipa_client_type client;
649         struct ipa_ep_cfg_ctrl suspend;
650         int ipa_ep_idx;
651         unsigned long flags;
652         struct ipa_active_client_logging_info log_info;
653
654         if (ipa3_active_clients_trylock(&flags) == 0)
655                 return -EPERM;
656         if (ipa3_ctx->ipa3_active_clients.cnt == 1) {
657                 res = -EPERM;
658                 goto bail;
659         }
660
661         memset(&clients, 0, sizeof(clients));
662         res = ipa3_get_clients_from_rm_resource(resource, &clients);
663         if (res) {
664                 IPAERR(
665                         "ipa3_get_clients_from_rm_resource() failed, name = %d.\n",
666                         resource);
667                 goto bail;
668         }
669
670         for (index = 0; index < clients.length; index++) {
671                 client = clients.names[index];
672                 ipa_ep_idx = ipa3_get_ep_mapping(client);
673                 if (ipa_ep_idx == -1) {
674                         IPAERR("Invalid client.\n");
675                         res = -EINVAL;
676                         continue;
677                 }
678                 ipa3_ctx->resume_on_connect[client] = false;
679                 if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
680                     ipa3_should_pipe_be_suspended(client)) {
681                         if (ipa3_ctx->ep[ipa_ep_idx].valid) {
682                                 /* suspend endpoint */
683                                 memset(&suspend, 0, sizeof(suspend));
684                                 suspend.ipa_ep_suspend = true;
685                                 ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
686                         }
687                 }
688         }
689
690         if (res == 0) {
691                 IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
692                                 ipa_rm_resource_str(resource));
693                 ipa3_active_clients_log_dec(&log_info, true);
694                 ipa3_ctx->ipa3_active_clients.cnt--;
695                 IPADBG("active clients = %d\n",
696                        ipa3_ctx->ipa3_active_clients.cnt);
697         }
698 bail:
699         ipa3_active_clients_trylock_unlock(&flags);
700
701         return res;
702 }
703
704 /**
705  * ipa3_resume_resource() - resume client endpoints related to the IPA_RM
706  * resource.
707  *
708  * @resource: [IN] IPA Resource Manager resource
709  *
710  * Return codes: 0 on success, negative on failure.
711  */
712 int ipa3_resume_resource(enum ipa_rm_resource_name resource)
713 {
714
715         struct ipa3_client_names clients;
716         int res;
717         int index;
718         struct ipa_ep_cfg_ctrl suspend;
719         enum ipa_client_type client;
720         int ipa_ep_idx;
721
722         memset(&clients, 0, sizeof(clients));
723         res = ipa3_get_clients_from_rm_resource(resource, &clients);
724         if (res) {
725                 IPAERR("ipa3_get_clients_from_rm_resource() failed.\n");
726                 return res;
727         }
728
729         for (index = 0; index < clients.length; index++) {
730                 client = clients.names[index];
731                 ipa_ep_idx = ipa3_get_ep_mapping(client);
732                 if (ipa_ep_idx == -1) {
733                         IPAERR("Invalid client.\n");
734                         res = -EINVAL;
735                         continue;
736                 }
737                 /*
738                  * The related ep, will be resumed on connect
739                  * while its resource is granted
740                  */
741                 ipa3_ctx->resume_on_connect[client] = true;
742                 IPADBG("%d will be resumed on connect.\n", client);
743                 if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
744                     ipa3_should_pipe_be_suspended(client)) {
745                         if (ipa3_ctx->ep[ipa_ep_idx].valid) {
746                                 memset(&suspend, 0, sizeof(suspend));
747                                 suspend.ipa_ep_suspend = false;
748                                 ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
749                         }
750                 }
751         }
752
753         return res;
754 }
755
756 /**
757  * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW
758  *
759  * Returns:     None
760  */
761 void _ipa_sram_settings_read_v3_0(void)
762 {
763         struct ipahal_reg_shared_mem_size smem_sz;
764
765         memset(&smem_sz, 0, sizeof(smem_sz));
766
767         ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
768
769         ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr;
770         ipa3_ctx->smem_sz = smem_sz.shared_mem_sz;
771
772         /* reg fields are in 8B units */
773         ipa3_ctx->smem_restricted_bytes *= 8;
774         ipa3_ctx->smem_sz *= 8;
775         ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
776         ipa3_ctx->hdr_tbl_lcl = 0;
777         ipa3_ctx->hdr_proc_ctx_tbl_lcl = 1;
778
779         /*
780          * when proc ctx table is located in internal memory,
781          * modem entries resides first.
782          */
783         if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
784                 ipa3_ctx->hdr_proc_ctx_tbl.start_offset =
785                         IPA_MEM_PART(modem_hdr_proc_ctx_size);
786         }
787         ipa3_ctx->ip4_rt_tbl_hash_lcl = 0;
788         ipa3_ctx->ip4_rt_tbl_nhash_lcl = 0;
789         ipa3_ctx->ip6_rt_tbl_hash_lcl = 0;
790         ipa3_ctx->ip6_rt_tbl_nhash_lcl = 0;
791         ipa3_ctx->ip4_flt_tbl_hash_lcl = 0;
792         ipa3_ctx->ip4_flt_tbl_nhash_lcl = 0;
793         ipa3_ctx->ip6_flt_tbl_hash_lcl = 0;
794         ipa3_ctx->ip6_flt_tbl_nhash_lcl = 0;
795 }
796
797 /**
798  * ipa3_cfg_clkon_cfg() - configure IPA clkon_cfg
799  * @clkon_cfg: IPA clkon_cfg
800  *
801  * Return codes:
802  * 0: success
803  */
804 int ipa3_cfg_clkon_cfg(struct ipahal_reg_clkon_cfg *clkon_cfg)
805 {
806
807         IPA_ACTIVE_CLIENTS_INC_SIMPLE();
808
809         IPADBG("cgc_open_misc = %d\n",
810                 clkon_cfg->cgc_open_misc);
811
812         ipahal_write_reg_fields(IPA_CLKON_CFG, clkon_cfg);
813
814         IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
815
816         return 0;
817 }
818
819 /**
820  * ipa3_cfg_route() - configure IPA route
821  * @route: IPA route
822  *
823  * Return codes:
824  * 0: success
825  */
826 int ipa3_cfg_route(struct ipahal_reg_route *route)
827 {
828
829         IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
830                 route->route_dis,
831                 route->route_def_pipe,
832                 route->route_def_hdr_table);
833         IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
834                 route->route_def_hdr_ofst,
835                 route->route_frag_def_pipe);
836
837         IPADBG("default_retain_hdr=%d\n",
838                 route->route_def_retain_hdr);
839
840         if (route->route_dis) {
841                 IPAERR("Route disable is not supported!\n");
842                 return -EPERM;
843         }
844
845         IPA_ACTIVE_CLIENTS_INC_SIMPLE();
846
847         ipahal_write_reg_fields(IPA_ROUTE, route);
848
849         IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
850
851         return 0;
852 }
853
854 /**
855  * ipa3_cfg_filter() - configure filter
856  * @disable: disable value
857  *
858  * Return codes:
859  * 0: success
860  */
861 int ipa3_cfg_filter(u32 disable)
862 {
863         IPAERR_RL("Filter disable is not supported!\n");
864         return -EPERM;
865 }
866
867 /**
868  * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes
869  *
870  * Returns:     None
871  */
872 void ipa3_cfg_qsb(void)
873 {
874         int qsb_max_writes[2] = { 8, 2 };
875         int qsb_max_reads[2] = { 8, 8 };
876
877         ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
878         ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
879 }
880
881 /**
882  * ipa3_init_hw() - initialize HW
883  *
884  * Return codes:
885  * 0: success
886  */
887 int ipa3_init_hw(void)
888 {
889         u32 ipa_version = 0;
890         u32 val;
891
892         /* Read IPA version and make sure we have access to the registers */
893         ipa_version = ipahal_read_reg(IPA_VERSION);
894         if (ipa_version == 0)
895                 return -EFAULT;
896
897         switch (ipa3_ctx->ipa_hw_type) {
898         case IPA_HW_v3_0:
899         case IPA_HW_v3_1:
900                 val = IPA_BCR_REG_VAL_v3_0;
901                 break;
902         case IPA_HW_v3_5:
903         case IPA_HW_v3_5_1:
904                 val = IPA_BCR_REG_VAL_v3_5;
905                 break;
906         default:
907                 IPAERR("unknown HW type in dts\n");
908                 return -EFAULT;
909         }
910
911         ipahal_write_reg(IPA_BCR, val);
912
913         ipa3_cfg_qsb();
914
915         return 0;
916 }
917
918 /**
919  * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index
920  *      into ipa3_ep_mapping[] array.
921  *
922  * Return value: HW type index
923  */
924 u8 ipa3_get_hw_type_index(void)
925 {
926         u8 hw_type_index;
927
928         switch (ipa3_ctx->ipa_hw_type) {
929         case IPA_HW_v3_0:
930         case IPA_HW_v3_1:
931                 hw_type_index = IPA_3_0;
932                 break;
933         default:
934                 IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
935                 hw_type_index = IPA_3_0;
936                 break;
937         }
938
939         return hw_type_index;
940 }
941
942 /**
943  * ipa3_get_ep_mapping() - provide endpoint mapping
944  * @client: client type
945  *
946  * Return value: endpoint mapping
947  */
948 int ipa3_get_ep_mapping(enum ipa_client_type client)
949 {
950         int ipa_ep_idx;
951
952         if (client >= IPA_CLIENT_MAX || client < 0) {
953                 IPAERR_RL("Bad client number! client =%d\n", client);
954                 return IPA_EP_NOT_ALLOCATED;
955         }
956
957         ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
958         if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA3_MAX_NUM_PIPES)
959                 return IPA_EP_NOT_ALLOCATED;
960
961         return ipa_ep_idx;
962 }
963
964 /**
965  * ipa3_get_gsi_ep_info() - provide gsi ep information
966  * @ipa_ep_idx: IPA endpoint index
967  *
968  * Return value: pointer to ipa_gsi_ep_info
969  */
970 struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx)
971 {
972         int i;
973
974         for (i = 0; ; i++) {
975                 if (ipa_gsi_ep_info[i].ipa_ep_num < 0)
976                         break;
977
978                 if (ipa_gsi_ep_info[i].ipa_ep_num ==
979                         ipa_ep_idx)
980                         return &(ipa_gsi_ep_info[i]);
981         }
982
983         return NULL;
984 }
985
986 /**
987  * ipa_get_ep_group() - provide endpoint group by client
988  * @client: client type
989  *
990  * Return value: endpoint group
991  */
992 int ipa_get_ep_group(enum ipa_client_type client)
993 {
994         if (client >= IPA_CLIENT_MAX || client < 0) {
995                 IPAERR("Bad client number! client =%d\n", client);
996                 return -EINVAL;
997         }
998
999         return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num;
1000 }
1001
1002 /**
1003  * ipa3_get_qmb_master_sel() - provide QMB master selection for the client
1004  * @client: client type
1005  *
1006  * Return value: QMB master index
1007  */
1008 u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
1009 {
1010         if (client >= IPA_CLIENT_MAX || client < 0) {
1011                 IPAERR("Bad client number! client =%d\n", client);
1012                 return -EINVAL;
1013         }
1014
1015         return ipa3_ep_mapping[ipa3_get_hw_type_index()]
1016                 [client].qmb_master_sel;
1017 }
1018
1019 /* ipa3_set_client() - provide client mapping
1020  * @client: client type
1021  *
1022  * Return value: none
1023  */
1024
1025 void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
1026 {
1027         if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
1028                 IPAERR("Bad client number! client =%d\n", client);
1029         } else if (index >= IPA3_MAX_NUM_PIPES || index < 0) {
1030                 IPAERR("Bad pipe index! index =%d\n", index);
1031         } else {
1032                 ipa3_ctx->ipacm_client[index].client_enum = client;
1033                 ipa3_ctx->ipacm_client[index].uplink = uplink;
1034         }
1035 }
1036
1037 /* ipa3_get_wlan_stats() - get ipa wifi stats
1038  *
1039  * Return value: success or failure
1040  */
1041 int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
1042 {
1043         if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
1044                 ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
1045                         wdi_sap_stats);
1046         } else {
1047                 IPAERR("uc_wdi_ctx.stats_notify NULL\n");
1048                 return -EFAULT;
1049         }
1050         return 0;
1051 }
1052
1053 int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
1054 {
1055         if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
1056                 ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
1057                         wdi_quota);
1058         } else {
1059                 IPAERR("uc_wdi_ctx.stats_notify NULL\n");
1060                 return -EFAULT;
1061         }
1062         return 0;
1063 }
1064
1065 /**
1066  * ipa3_get_client() - provide client mapping
1067  * @client: client type
1068  *
1069  * Return value: client mapping enum
1070  */
1071 enum ipacm_client_enum ipa3_get_client(int pipe_idx)
1072 {
1073         if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) {
1074                 IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
1075                 return IPACM_CLIENT_MAX;
1076         } else {
1077                 return ipa3_ctx->ipacm_client[pipe_idx].client_enum;
1078         }
1079 }
1080
1081 /**
1082  * ipa2_get_client_uplink() - provide client mapping
1083  * @client: client type
1084  *
1085  * Return value: none
1086  */
1087 bool ipa3_get_client_uplink(int pipe_idx)
1088 {
1089         if (pipe_idx < 0 || pipe_idx >= IPA3_MAX_NUM_PIPES) {
1090                 IPAERR("invalid pipe idx %d\n", pipe_idx);
1091                 return false;
1092         }
1093
1094         return ipa3_ctx->ipacm_client[pipe_idx].uplink;
1095 }
1096
1097 /**
1098  * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
1099  * the supplied pipe index.
1100  *
1101  * @pipe_idx:
1102  *
1103  * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
1104  * found.
1105  */
1106 enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx)
1107 {
1108         int i;
1109         int j;
1110         enum ipa_client_type client;
1111         struct ipa3_client_names clients;
1112         bool found = false;
1113
1114         if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1115                 IPAERR("Bad pipe index!\n");
1116                 return -EINVAL;
1117         }
1118
1119         client = ipa3_ctx->ep[pipe_idx].client;
1120
1121         for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
1122                 memset(&clients, 0, sizeof(clients));
1123                 ipa3_get_clients_from_rm_resource(i, &clients);
1124                 for (j = 0; j < clients.length; j++) {
1125                         if (clients.names[j] == client) {
1126                                 found = true;
1127                                 break;
1128                         }
1129                 }
1130                 if (found)
1131                         break;
1132         }
1133
1134         if (!found)
1135                 return -EFAULT;
1136
1137         return i;
1138 }
1139
1140 /**
1141  * ipa3_get_client_mapping() - provide client mapping
1142  * @pipe_idx: IPA end-point number
1143  *
1144  * Return value: client mapping
1145  */
1146 enum ipa_client_type ipa3_get_client_mapping(int pipe_idx)
1147 {
1148         if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1149                 IPAERR("Bad pipe index!\n");
1150                 return -EINVAL;
1151         }
1152
1153         return ipa3_ctx->ep[pipe_idx].client;
1154 }
1155
1156 /**
1157  * ipa_init_ep_flt_bitmap() - Initialize the bitmap
1158  * that represents the End-points that supports filtering
1159  */
1160 void ipa_init_ep_flt_bitmap(void)
1161 {
1162         enum ipa_client_type cl;
1163         u8 hw_type_idx = ipa3_get_hw_type_index();
1164         u32 bitmap;
1165
1166         bitmap = 0;
1167
1168         BUG_ON(ipa3_ctx->ep_flt_bitmap);
1169
1170         for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
1171                 if (ipa3_ep_mapping[hw_type_idx][cl].support_flt) {
1172                         bitmap |=
1173                                 (1U<<ipa3_ep_mapping[hw_type_idx][cl].pipe_num);
1174                         if (bitmap != ipa3_ctx->ep_flt_bitmap) {
1175                                 ipa3_ctx->ep_flt_bitmap = bitmap;
1176                                 ipa3_ctx->ep_flt_num++;
1177                         }
1178                 }
1179         }
1180 }
1181
1182 /**
1183  * ipa_is_ep_support_flt() - Given an End-point check
1184  * whether it supports filtering or not.
1185  *
1186  * @pipe_idx:
1187  *
1188  * Return values:
1189  * true if supports and false if not
1190  */
1191 bool ipa_is_ep_support_flt(int pipe_idx)
1192 {
1193         if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1194                 IPAERR("Bad pipe index!\n");
1195                 return false;
1196         }
1197
1198         return ipa3_ctx->ep_flt_bitmap & (1U<<pipe_idx);
1199 }
1200
1201 /**
1202  * ipa3_cfg_ep_seq() - IPA end-point HPS/DPS sequencer type configuration
1203  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1204  *
1205  * Returns:     0 on success, negative on failure
1206  *
1207  * Note:        Should not be called from atomic context
1208  */
1209 int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg)
1210 {
1211         int type;
1212
1213         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1214             ipa3_ctx->ep[clnt_hdl].valid == 0) {
1215                 IPAERR("bad param, clnt_hdl = %d", clnt_hdl);
1216                 return -EINVAL;
1217         }
1218
1219         if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1220                 IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl);
1221                 return -EINVAL;
1222         }
1223
1224         /*
1225          * Skip Configure sequencers type for test clients.
1226          * These are configured dynamically in ipa3_cfg_ep_mode
1227          */
1228         if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
1229                 IPADBG("Skip sequencers configuration for test clients\n");
1230                 return 0;
1231         }
1232
1233         if (seq_cfg->set_dynamic)
1234                 type = seq_cfg->seq_type;
1235         else
1236                 type = ipa3_ep_mapping[ipa3_get_hw_type_index()]
1237                         [ipa3_ctx->ep[clnt_hdl].client].sequencer_type;
1238
1239         if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) {
1240                 if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA &&
1241                         !IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) {
1242                         IPAERR("Configuring non-DMA SEQ type to DMA pipe\n");
1243                         BUG();
1244                 }
1245                 IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1246                 /* Configure sequencers type*/
1247
1248                 IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type,
1249                                 clnt_hdl);
1250                 ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
1251
1252                 IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1253         } else {
1254                 IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl);
1255         }
1256
1257         return 0;
1258 }
1259
1260 /**
1261  * ipa3_cfg_ep - IPA end-point configuration
1262  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1263  * @ipa_ep_cfg: [in] IPA end-point configuration params
1264  *
1265  * This includes nat, header, mode, aggregation and route settings and is a one
1266  * shot API to configure the IPA end-point fully
1267  *
1268  * Returns:     0 on success, negative on failure
1269  *
1270  * Note:        Should not be called from atomic context
1271  */
1272 int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
1273 {
1274         int result = -EINVAL;
1275
1276         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1277             ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
1278                 IPAERR("bad parm.\n");
1279                 return -EINVAL;
1280         }
1281
1282         result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
1283         if (result)
1284                 return result;
1285
1286         result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
1287         if (result)
1288                 return result;
1289
1290         result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
1291         if (result)
1292                 return result;
1293
1294         result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
1295         if (result)
1296                 return result;
1297
1298         if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
1299                 result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
1300                 if (result)
1301                         return result;
1302
1303                 result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
1304                 if (result)
1305                         return result;
1306
1307                 result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq);
1308                 if (result)
1309                         return result;
1310
1311                 result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
1312                 if (result)
1313                         return result;
1314
1315                 result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
1316                 if (result)
1317                         return result;
1318         } else {
1319                 result = ipa3_cfg_ep_metadata_mask(clnt_hdl,
1320                                 &ipa_ep_cfg->metadata_mask);
1321                 if (result)
1322                         return result;
1323         }
1324
1325         return 0;
1326 }
1327
1328 const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
1329 {
1330         switch (nat_en) {
1331         case (IPA_BYPASS_NAT):
1332                 return "NAT disabled";
1333         case (IPA_SRC_NAT):
1334                 return "Source NAT";
1335         case (IPA_DST_NAT):
1336                 return "Dst NAT";
1337         }
1338
1339         return "undefined";
1340 }
1341
1342 /**
1343  * ipa3_cfg_ep_nat() - IPA end-point NAT configuration
1344  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1345  * @ipa_ep_cfg: [in] IPA end-point configuration params
1346  *
1347  * Returns:     0 on success, negative on failure
1348  *
1349  * Note:        Should not be called from atomic context
1350  */
1351 int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
1352 {
1353         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1354             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
1355                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1356                                         clnt_hdl,
1357                                         ipa3_ctx->ep[clnt_hdl].valid);
1358                 return -EINVAL;
1359         }
1360
1361         if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1362                 IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
1363                 return -EINVAL;
1364         }
1365
1366         IPADBG("pipe=%d, nat_en=%d(%s)\n",
1367                         clnt_hdl,
1368                         ep_nat->nat_en,
1369                         ipa3_get_nat_en_str(ep_nat->nat_en));
1370
1371         /* copy over EP cfg */
1372         ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
1373
1374         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1375
1376         ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat);
1377
1378         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1379
1380         return 0;
1381 }
1382
1383
1384 /**
1385  * ipa3_cfg_ep_status() - IPA end-point status configuration
1386  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1387  * @ipa_ep_cfg: [in] IPA end-point configuration params
1388  *
1389  * Returns:     0 on success, negative on failure
1390  *
1391  * Note:        Should not be called from atomic context
1392  */
1393 int ipa3_cfg_ep_status(u32 clnt_hdl,
1394         const struct ipahal_reg_ep_cfg_status *ep_status)
1395 {
1396         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1397             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
1398                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1399                                         clnt_hdl,
1400                                         ipa3_ctx->ep[clnt_hdl].valid);
1401                 return -EINVAL;
1402         }
1403
1404         IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n",
1405                         clnt_hdl,
1406                         ep_status->status_en,
1407                         ep_status->status_ep,
1408                         ep_status->status_location);
1409
1410         /* copy over EP cfg */
1411         ipa3_ctx->ep[clnt_hdl].status = *ep_status;
1412
1413         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1414
1415         ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status);
1416
1417         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1418
1419         return 0;
1420 }
1421
1422 /**
1423  * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration
1424  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1425  * @ipa_ep_cfg: [in] IPA end-point configuration params
1426  *
1427  * Returns:     0 on success, negative on failure
1428  *
1429  * Note:        Should not be called from atomic context
1430  */
1431 int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
1432 {
1433         u8 qmb_master_sel;
1434
1435         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1436             ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
1437                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1438                                         clnt_hdl,
1439                                         ipa3_ctx->ep[clnt_hdl].valid);
1440                 return -EINVAL;
1441         }
1442
1443         /* copy over EP cfg */
1444         ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
1445
1446         /* Override QMB master selection */
1447         qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client);
1448         ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel;
1449         IPADBG(
1450                "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n",
1451                         clnt_hdl,
1452                         ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en,
1453                         ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en,
1454                         ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset,
1455                         ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel);
1456
1457         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1458
1459         ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl,
1460                                   &ipa3_ctx->ep[clnt_hdl].cfg.cfg);
1461
1462         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1463
1464         return 0;
1465 }
1466
1467 /**
1468  * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
1469  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1470  * @ipa_ep_cfg: [in] IPA end-point configuration params
1471  *
1472  * Returns:     0 on success, negative on failure
1473  *
1474  * Note:        Should not be called from atomic context
1475  */
1476 int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
1477                 const struct ipa_ep_cfg_metadata_mask
1478                 *metadata_mask)
1479 {
1480         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1481             ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
1482                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1483                                         clnt_hdl,
1484                                         ipa3_ctx->ep[clnt_hdl].valid);
1485                 return -EINVAL;
1486         }
1487
1488         IPADBG("pipe=%d, metadata_mask=0x%x\n",
1489                         clnt_hdl,
1490                         metadata_mask->metadata_mask);
1491
1492         /* copy over EP cfg */
1493         ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
1494
1495         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1496
1497         ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
1498                 clnt_hdl, metadata_mask);
1499
1500         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1501
1502         return 0;
1503 }
1504
1505 /**
1506  * ipa3_cfg_ep_hdr() -  IPA end-point header configuration
1507  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1508  * @ipa_ep_cfg: [in] IPA end-point configuration params
1509  *
1510  * Returns:     0 on success, negative on failure
1511  *
1512  * Note:        Should not be called from atomic context
1513  */
1514 int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
1515 {
1516         struct ipa3_ep_context *ep;
1517
1518         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1519             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
1520                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1521                                 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1522                 return -EINVAL;
1523         }
1524         IPADBG("pipe=%d metadata_reg_valid=%d\n",
1525                 clnt_hdl,
1526                 ep_hdr->hdr_metadata_reg_valid);
1527
1528         IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
1529                 ep_hdr->hdr_remove_additional,
1530                 ep_hdr->hdr_a5_mux,
1531                 ep_hdr->hdr_ofst_pkt_size);
1532
1533         IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
1534                 ep_hdr->hdr_ofst_pkt_size_valid,
1535                 ep_hdr->hdr_additional_const_len);
1536
1537         IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
1538                 ep_hdr->hdr_ofst_metadata,
1539                 ep_hdr->hdr_ofst_metadata_valid,
1540                 ep_hdr->hdr_len);
1541
1542         ep = &ipa3_ctx->ep[clnt_hdl];
1543
1544         /* copy over EP cfg */
1545         ep->cfg.hdr = *ep_hdr;
1546
1547         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1548
1549         ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr);
1550
1551         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1552
1553         return 0;
1554 }
1555
1556 /**
1557  * ipa3_cfg_ep_hdr_ext() -  IPA end-point extended header configuration
1558  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1559  * @ep_hdr_ext: [in] IPA end-point configuration params
1560  *
1561  * Returns:     0 on success, negative on failure
1562  *
1563  * Note:        Should not be called from atomic context
1564  */
1565 int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
1566                        const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
1567 {
1568         struct ipa3_ep_context *ep;
1569
1570         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1571             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
1572                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1573                                 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1574                 return -EINVAL;
1575         }
1576
1577         IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
1578                 clnt_hdl,
1579                 ep_hdr_ext->hdr_pad_to_alignment);
1580
1581         IPADBG("hdr_total_len_or_pad_offset=%d\n",
1582                 ep_hdr_ext->hdr_total_len_or_pad_offset);
1583
1584         IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
1585                 ep_hdr_ext->hdr_payload_len_inc_padding,
1586                 ep_hdr_ext->hdr_total_len_or_pad);
1587
1588         IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
1589                 ep_hdr_ext->hdr_total_len_or_pad_valid,
1590                 ep_hdr_ext->hdr_little_endian);
1591
1592         ep = &ipa3_ctx->ep[clnt_hdl];
1593
1594         /* copy over EP cfg */
1595         ep->cfg.hdr_ext = *ep_hdr_ext;
1596
1597         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1598
1599         ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl,
1600                 &ep->cfg.hdr_ext);
1601
1602         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1603
1604         return 0;
1605 }
1606
1607 /**
1608  * ipa3_cfg_ep_ctrl() -  IPA end-point Control configuration
1609  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1610  * @ipa_ep_cfg_ctrl:    [in] IPA end-point configuration params
1611  *
1612  * Returns:     0 on success, negative on failure
1613  */
1614 int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
1615 {
1616         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) {
1617                 IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
1618                 return -EINVAL;
1619         }
1620
1621         IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
1622                 clnt_hdl,
1623                 ep_ctrl->ipa_ep_suspend,
1624                 ep_ctrl->ipa_ep_delay);
1625
1626         ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl);
1627
1628         if (ep_ctrl->ipa_ep_suspend == true &&
1629                         IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client))
1630                 ipa3_suspend_active_aggr_wa(clnt_hdl);
1631
1632         return 0;
1633 }
1634
1635 const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
1636 {
1637         switch (mode) {
1638         case (IPA_BASIC):
1639                 return "Basic";
1640         case (IPA_ENABLE_FRAMING_HDLC):
1641                 return "HDLC framing";
1642         case (IPA_ENABLE_DEFRAMING_HDLC):
1643                 return "HDLC de-framing";
1644         case (IPA_DMA):
1645                 return "DMA";
1646         }
1647
1648         return "undefined";
1649 }
1650
1651 /**
1652  * ipa3_cfg_ep_mode() - IPA end-point mode configuration
1653  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1654  * @ipa_ep_cfg: [in] IPA end-point configuration params
1655  *
1656  * Returns:     0 on success, negative on failure
1657  *
1658  * Note:        Should not be called from atomic context
1659  */
1660 int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
1661 {
1662         int ep;
1663         int type;
1664         struct ipahal_reg_endp_init_mode init_mode;
1665
1666         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1667             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
1668                 IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%p\n",
1669                                 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid,
1670                                 ep_mode);
1671                 return -EINVAL;
1672         }
1673
1674         if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1675                 IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
1676                 return -EINVAL;
1677         }
1678
1679         ep = ipa3_get_ep_mapping(ep_mode->dst);
1680         if (ep == -1 && ep_mode->mode == IPA_DMA) {
1681                 IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst);
1682                 return -EINVAL;
1683         }
1684
1685         WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
1686
1687         if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
1688                 ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1689
1690         IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
1691                         clnt_hdl,
1692                         ep_mode->mode,
1693                         ipa3_get_mode_type_str(ep_mode->mode),
1694                         ep_mode->dst);
1695
1696         /* copy over EP cfg */
1697         ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
1698         ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep;
1699
1700         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1701
1702         init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index;
1703         init_mode.ep_mode = *ep_mode;
1704         ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode);
1705
1706          /* Configure sequencers type for test clients*/
1707         if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
1708                 if (ep_mode->mode == IPA_DMA)
1709                         type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
1710                 else
1711                         type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP;
1712
1713                 IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
1714                                 clnt_hdl);
1715                 ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
1716         }
1717         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1718
1719         return 0;
1720 }
1721
1722 const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
1723 {
1724         switch (aggr_en) {
1725         case (IPA_BYPASS_AGGR):
1726                         return "no aggregation";
1727         case (IPA_ENABLE_AGGR):
1728                         return "aggregation enabled";
1729         case (IPA_ENABLE_DEAGGR):
1730                 return "de-aggregation enabled";
1731         }
1732
1733         return "undefined";
1734 }
1735
1736 const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type)
1737 {
1738         switch (aggr_type) {
1739         case (IPA_MBIM_16):
1740                         return "MBIM_16";
1741         case (IPA_HDLC):
1742                 return "HDLC";
1743         case (IPA_TLP):
1744                         return "TLP";
1745         case (IPA_RNDIS):
1746                         return "RNDIS";
1747         case (IPA_GENERIC):
1748                         return "GENERIC";
1749         case (IPA_QCMAP):
1750                         return "QCMAP";
1751         }
1752         return "undefined";
1753 }
1754
1755 /**
1756  * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration
1757  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1758  * @ipa_ep_cfg: [in] IPA end-point configuration params
1759  *
1760  * Returns:     0 on success, negative on failure
1761  *
1762  * Note:        Should not be called from atomic context
1763  */
1764 int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
1765 {
1766         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1767             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
1768                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1769                         clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1770                 return -EINVAL;
1771         }
1772
1773         if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR &&
1774             !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) {
1775                 IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl);
1776                 WARN_ON(1);
1777                 return -EINVAL;
1778         }
1779
1780         IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
1781                         clnt_hdl,
1782                         ep_aggr->aggr_en,
1783                         ipa3_get_aggr_enable_str(ep_aggr->aggr_en),
1784                         ep_aggr->aggr,
1785                         ipa3_get_aggr_type_str(ep_aggr->aggr),
1786                         ep_aggr->aggr_byte_limit,
1787                         ep_aggr->aggr_time_limit);
1788         IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n",
1789                 ep_aggr->aggr_hard_byte_limit_en,
1790                 ep_aggr->aggr_sw_eof_active);
1791
1792         /* copy over EP cfg */
1793         ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
1794
1795         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1796
1797         ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, ep_aggr);
1798
1799         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1800
1801         return 0;
1802 }
1803
1804 /**
1805  * ipa3_cfg_ep_route() - IPA end-point routing configuration
1806  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1807  * @ipa_ep_cfg: [in] IPA end-point configuration params
1808  *
1809  * Returns:     0 on success, negative on failure
1810  *
1811  * Note:        Should not be called from atomic context
1812  */
1813 int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
1814 {
1815         struct ipahal_reg_endp_init_route init_rt;
1816
1817         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1818             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
1819                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1820                         clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1821                 return -EINVAL;
1822         }
1823
1824         if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
1825                 IPAERR("ROUTE does not apply to IPA out EP %d\n",
1826                                 clnt_hdl);
1827                 return -EINVAL;
1828         }
1829
1830         /*
1831          * if DMA mode was configured previously for this EP, return with
1832          * success
1833          */
1834         if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
1835                 IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
1836                                 clnt_hdl);
1837                 return 0;
1838         }
1839
1840         if (ep_route->rt_tbl_hdl)
1841                 IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
1842
1843         IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
1844                         clnt_hdl,
1845                         ep_route->rt_tbl_hdl);
1846
1847         /* always use "default" routing table when programming EP ROUTE reg */
1848         ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
1849                 IPA_MEM_PART(v4_apps_rt_index_lo);
1850
1851         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1852
1853         init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
1854         ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
1855
1856         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1857
1858         return 0;
1859 }
1860
1861 /**
1862  * ipa3_cfg_ep_holb() - IPA end-point holb configuration
1863  *
1864  * If an IPA producer pipe is full, IPA HW by default will block
1865  * indefinitely till space opens up. During this time no packets
1866  * including those from unrelated pipes will be processed. Enabling
1867  * HOLB means IPA HW will be allowed to drop packets as/when needed
1868  * and indefinite blocking is avoided.
1869  *
1870  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1871  * @ipa_ep_cfg: [in] IPA end-point configuration params
1872  *
1873  * Returns:     0 on success, negative on failure
1874  */
1875 int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
1876 {
1877         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1878             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
1879             ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val ||
1880             ep_holb->en > 1) {
1881                 IPAERR("bad parm.\n");
1882                 return -EINVAL;
1883         }
1884
1885         if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
1886                 IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
1887                 return -EINVAL;
1888         }
1889
1890         ipa3_ctx->ep[clnt_hdl].holb = *ep_holb;
1891
1892         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1893
1894         ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
1895                 ep_holb);
1896
1897         ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl,
1898                 ep_holb);
1899
1900         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1901
1902         IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
1903                                 ep_holb->tmr_val);
1904
1905         return 0;
1906 }
1907
1908 /**
1909  * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration
1910  *
1911  * Wrapper function for ipa3_cfg_ep_holb() with client name instead of
1912  * client handle. This function is used for clients that does not have
1913  * client handle.
1914  *
1915  * @client:     [in] client name
1916  * @ipa_ep_cfg: [in] IPA end-point configuration params
1917  *
1918  * Returns:     0 on success, negative on failure
1919  */
1920 int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
1921                                 const struct ipa_ep_cfg_holb *ep_holb)
1922 {
1923         return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
1924 }
1925
1926 /**
1927  * ipa3_cfg_ep_deaggr() -  IPA end-point deaggregation configuration
1928  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1929  * @ep_deaggr:  [in] IPA end-point configuration params
1930  *
1931  * Returns:     0 on success, negative on failure
1932  *
1933  * Note:        Should not be called from atomic context
1934  */
1935 int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
1936                         const struct ipa_ep_cfg_deaggr *ep_deaggr)
1937 {
1938         struct ipa3_ep_context *ep;
1939
1940         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1941             ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
1942                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1943                                 clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1944                 return -EINVAL;
1945         }
1946
1947         IPADBG("pipe=%d deaggr_hdr_len=%d\n",
1948                 clnt_hdl,
1949                 ep_deaggr->deaggr_hdr_len);
1950
1951         IPADBG("packet_offset_valid=%d\n",
1952                 ep_deaggr->packet_offset_valid);
1953
1954         IPADBG("packet_offset_location=%d max_packet_len=%d\n",
1955                 ep_deaggr->packet_offset_location,
1956                 ep_deaggr->max_packet_len);
1957
1958         ep = &ipa3_ctx->ep[clnt_hdl];
1959
1960         /* copy over EP cfg */
1961         ep->cfg.deaggr = *ep_deaggr;
1962
1963         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
1964
1965         ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl,
1966                 &ep->cfg.deaggr);
1967
1968         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
1969
1970         return 0;
1971 }
1972
1973 /**
1974  * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration
1975  * @clnt_hdl:   [in] opaque client handle assigned by IPA to client
1976  * @ipa_ep_cfg: [in] IPA end-point configuration params
1977  *
1978  * Returns:     0 on success, negative on failure
1979  *
1980  * Note:        Should not be called from atomic context
1981  */
1982 int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
1983 {
1984         u32 qmap_id = 0;
1985         struct ipa_ep_cfg_metadata ep_md_reg_wrt;
1986
1987         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
1988                 ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
1989                 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
1990                                         clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
1991                 return -EINVAL;
1992         }
1993
1994         IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
1995
1996         /* copy over EP cfg */
1997         ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
1998
1999         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
2000
2001         ep_md_reg_wrt = *ep_md;
2002         qmap_id = (ep_md->qmap_id <<
2003                 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &
2004                 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK;
2005
2006         ep_md_reg_wrt.qmap_id = qmap_id;
2007         ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl,
2008                 &ep_md_reg_wrt);
2009         ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
2010         ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl,
2011                 &ipa3_ctx->ep[clnt_hdl].cfg.hdr);
2012
2013         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
2014
2015         return 0;
2016 }
2017
2018 int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
2019 {
2020         struct ipa_ep_cfg_metadata meta;
2021         struct ipa3_ep_context *ep;
2022         int ipa_ep_idx;
2023         int result = -EINVAL;
2024
2025         if (param_in->client  >= IPA_CLIENT_MAX) {
2026                 IPAERR_RL("bad parm client:%d\n", param_in->client);
2027                 goto fail;
2028         }
2029
2030         ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
2031         if (ipa_ep_idx == -1) {
2032                 IPAERR_RL("Invalid client.\n");
2033                 goto fail;
2034         }
2035
2036         ep = &ipa3_ctx->ep[ipa_ep_idx];
2037         if (!ep->valid) {
2038                 IPAERR_RL("EP not allocated.\n");
2039                 goto fail;
2040         }
2041
2042         meta.qmap_id = param_in->qmap_id;
2043         if (param_in->client == IPA_CLIENT_USB_PROD ||
2044             param_in->client == IPA_CLIENT_HSIC1_PROD ||
2045             param_in->client == IPA_CLIENT_ODU_PROD) {
2046                 result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
2047         } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
2048                 ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
2049                 result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
2050                 if (result)
2051                         IPAERR_RL("qmap_id %d write failed on ep=%d\n",
2052                                         meta.qmap_id, ipa_ep_idx);
2053                 result = 0;
2054         }
2055
2056 fail:
2057         return result;
2058 }
2059
2060 /**
2061  * ipa3_dump_buff_internal() - dumps buffer for debug purposes
2062  * @base: buffer base address
2063  * @phy_base: buffer physical base address
2064  * @size: size of the buffer
2065  */
2066 void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
2067 {
2068         int i;
2069         u32 *cur = (u32 *)base;
2070         u8 *byt;
2071
2072         IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
2073         for (i = 0; i < size / 4; i++) {
2074                 byt = (u8 *)(cur + i);
2075                 IPADBG("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
2076                                 byt[0], byt[1], byt[2], byt[3]);
2077         }
2078         IPADBG("END\n");
2079 }
2080
2081 /**
2082  * ipa3_pipe_mem_init() - initialize the pipe memory
2083  * @start_ofst: start offset
2084  * @size: size
2085  *
2086  * Return value:
2087  * 0: success
2088  * -ENOMEM: no memory
2089  */
2090 int ipa3_pipe_mem_init(u32 start_ofst, u32 size)
2091 {
2092         int res;
2093         u32 aligned_start_ofst;
2094         u32 aligned_size;
2095         struct gen_pool *pool;
2096
2097         if (!size) {
2098                 IPAERR("no IPA pipe memory allocated\n");
2099                 goto fail;
2100         }
2101
2102         aligned_start_ofst = IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst);
2103         aligned_size = size - (aligned_start_ofst - start_ofst);
2104
2105         IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
2106                start_ofst, aligned_start_ofst, size, aligned_size);
2107
2108         /* allocation order of 8 i.e. 128 bytes, global pool */
2109         pool = gen_pool_create(8, -1);
2110         if (!pool) {
2111                 IPAERR("Failed to create a new memory pool.\n");
2112                 goto fail;
2113         }
2114
2115         res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
2116         if (res) {
2117                 IPAERR("Failed to add memory to IPA pipe pool\n");
2118                 goto err_pool_add;
2119         }
2120
2121         ipa3_ctx->pipe_mem_pool = pool;
2122         return 0;
2123
2124 err_pool_add:
2125         gen_pool_destroy(pool);
2126 fail:
2127         return -ENOMEM;
2128 }
2129
2130 /**
2131  * ipa3_pipe_mem_alloc() - allocate pipe memory
2132  * @ofst: offset
2133  * @size: size
2134  *
2135  * Return value:
2136  * 0: success
2137  */
2138 int ipa3_pipe_mem_alloc(u32 *ofst, u32 size)
2139 {
2140         u32 vaddr;
2141         int res = -1;
2142
2143         if (!ipa3_ctx->pipe_mem_pool || !size) {
2144                 IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
2145                                 ipa3_ctx->pipe_mem_pool);
2146                 return res;
2147         }
2148
2149         vaddr = gen_pool_alloc(ipa3_ctx->pipe_mem_pool, size);
2150
2151         if (vaddr) {
2152                 *ofst = vaddr;
2153                 res = 0;
2154                 IPADBG("size=%u ofst=%u\n", size, vaddr);
2155         } else {
2156                 IPAERR("size=%u failed\n", size);
2157         }
2158
2159         return res;
2160 }
2161
2162 /**
2163  * ipa3_pipe_mem_free() - free pipe memory
2164  * @ofst: offset
2165  * @size: size
2166  *
2167  * Return value:
2168  * 0: success
2169  */
2170 int ipa3_pipe_mem_free(u32 ofst, u32 size)
2171 {
2172         IPADBG("size=%u ofst=%u\n", size, ofst);
2173         if (ipa3_ctx->pipe_mem_pool && size)
2174                 gen_pool_free(ipa3_ctx->pipe_mem_pool, ofst, size);
2175         return 0;
2176 }
2177
2178 /**
2179  * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
2180  * @mode:       [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
2181  * etc
2182  *
2183  * Returns:     0 on success
2184  */
2185 int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
2186 {
2187         struct ipahal_reg_qcncm qcncm;
2188
2189         IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2190         ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
2191         qcncm.mode_en = mode;
2192         ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
2193         IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2194
2195         return 0;
2196 }
2197
2198 /**
2199  * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
2200  * mode
2201  * @sig:        [in] the first 3 bytes of QCNCM NDP signature (expected to be
2202  * "QND")
2203  *
2204  * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
2205  * (expected to be 'P') needs to be set using the header addition mechanism
2206  *
2207  * Returns:     0 on success, negative on failure
2208  */
2209 int ipa3_set_qcncm_ndp_sig(char sig[3])
2210 {
2211         struct ipahal_reg_qcncm qcncm;
2212
2213         if (sig == NULL) {
2214                 IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
2215                 return -EINVAL;
2216         }
2217         IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2218         ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
2219         qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]);
2220         ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
2221         IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2222
2223         return 0;
2224 }
2225
2226 /**
2227  * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
2228  * configuration
2229  * @enable:     [in] true for single NDP/MBIM; false otherwise
2230  *
2231  * Returns:     0 on success
2232  */
2233 int ipa3_set_single_ndp_per_mbim(bool enable)
2234 {
2235         struct ipahal_reg_single_ndp_mode mode;
2236
2237         IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2238         ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
2239         mode.single_ndp_en = enable;
2240         ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
2241         IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2242
2243         return 0;
2244 }
2245
2246 /**
2247  * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a boundary
2248  * @start: start address of the memory buffer
2249  * @end: end address of the memory buffer
2250  * @boundary: boundary
2251  *
2252  * Return value:
2253  * 1: if the interval [start, end] straddles boundary
2254  * 0: otherwise
2255  */
2256 int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary)
2257 {
2258         u32 next_start;
2259         u32 prev_end;
2260
2261         IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
2262
2263         next_start = (start + (boundary - 1)) & ~(boundary - 1);
2264         prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
2265
2266         while (next_start < prev_end)
2267                 next_start += boundary;
2268
2269         if (next_start == prev_end)
2270                 return 1;
2271         else
2272                 return 0;
2273 }
2274
2275 /**
2276  * ipa3_bam_reg_dump() - Dump selected BAM registers for IPA.
2277  * The API is right now used only to dump IPA registers towards USB.
2278  *
2279  * Function is rate limited to avoid flooding kernel log buffer
2280  */
2281 void ipa3_bam_reg_dump(void)
2282 {
2283         static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
2284
2285         if (__ratelimit(&_rs)) {
2286                 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2287                 pr_err("IPA BAM START\n");
2288                 sps_get_bam_debug_info(ipa3_ctx->bam_handle, 93,
2289                         (SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS))
2290                         |
2291                         SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD))),
2292                         0, 2);
2293                 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2294         }
2295 }
2296
2297 /**
2298  * ipa3_init_mem_partition() - Reads IPA memory map from DTS, performs alignment
2299  * checks and logs the fetched values.
2300  *
2301  * Returns:     0 on success
2302  */
2303 int ipa3_init_mem_partition(struct device_node *node)
2304 {
2305         int result;
2306
2307         IPADBG("Reading from DTS as u32 array\n");
2308         result = of_property_read_u32_array(node,
2309                 "qcom,ipa-ram-mmap", (u32 *)&ipa3_ctx->ctrl->mem_partition,
2310                 sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32));
2311
2312         if (result) {
2313                 IPAERR("Read operation failed\n");
2314                 return -ENODEV;
2315         }
2316
2317         IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
2318                 IPA_MEM_PART(nat_size));
2319
2320         if (IPA_MEM_PART(uc_info_ofst) & 3) {
2321                 IPAERR("UC INFO OFST 0x%x is unaligned\n",
2322                         IPA_MEM_PART(uc_info_ofst));
2323                 return -ENODEV;
2324         }
2325
2326         IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n",
2327                 IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size));
2328
2329         IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
2330
2331         if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) {
2332                 IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n",
2333                         IPA_MEM_PART(v4_flt_hash_ofst));
2334                 return -ENODEV;
2335         }
2336
2337         IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2338                 IPA_MEM_PART(v4_flt_hash_ofst),
2339                 IPA_MEM_PART(v4_flt_hash_size),
2340                 IPA_MEM_PART(v4_flt_hash_size_ddr));
2341
2342         if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) {
2343                 IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
2344                         IPA_MEM_PART(v4_flt_nhash_ofst));
2345                 return -ENODEV;
2346         }
2347
2348         IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2349                 IPA_MEM_PART(v4_flt_nhash_ofst),
2350                 IPA_MEM_PART(v4_flt_nhash_size),
2351                 IPA_MEM_PART(v4_flt_nhash_size_ddr));
2352
2353         if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) {
2354                 IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n",
2355                         IPA_MEM_PART(v6_flt_hash_ofst));
2356                 return -ENODEV;
2357         }
2358
2359         IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2360                 IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size),
2361                 IPA_MEM_PART(v6_flt_hash_size_ddr));
2362
2363         if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) {
2364                 IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
2365                         IPA_MEM_PART(v6_flt_nhash_ofst));
2366                 return -ENODEV;
2367         }
2368
2369         IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2370                 IPA_MEM_PART(v6_flt_nhash_ofst),
2371                 IPA_MEM_PART(v6_flt_nhash_size),
2372                 IPA_MEM_PART(v6_flt_nhash_size_ddr));
2373
2374         IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index));
2375
2376         IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
2377                 IPA_MEM_PART(v4_modem_rt_index_lo),
2378                 IPA_MEM_PART(v4_modem_rt_index_hi));
2379
2380         IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
2381                 IPA_MEM_PART(v4_apps_rt_index_lo),
2382                 IPA_MEM_PART(v4_apps_rt_index_hi));
2383
2384         if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) {
2385                 IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n",
2386                         IPA_MEM_PART(v4_rt_hash_ofst));
2387                 return -ENODEV;
2388         }
2389
2390         IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst));
2391
2392         IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2393                 IPA_MEM_PART(v4_rt_hash_size),
2394                 IPA_MEM_PART(v4_rt_hash_size_ddr));
2395
2396         if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) {
2397                 IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n",
2398                         IPA_MEM_PART(v4_rt_nhash_ofst));
2399                 return -ENODEV;
2400         }
2401
2402         IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n",
2403                 IPA_MEM_PART(v4_rt_nhash_ofst));
2404
2405         IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2406                 IPA_MEM_PART(v4_rt_nhash_size),
2407                 IPA_MEM_PART(v4_rt_nhash_size_ddr));
2408
2409         IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index));
2410
2411         IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
2412                 IPA_MEM_PART(v6_modem_rt_index_lo),
2413                 IPA_MEM_PART(v6_modem_rt_index_hi));
2414
2415         IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
2416                 IPA_MEM_PART(v6_apps_rt_index_lo),
2417                 IPA_MEM_PART(v6_apps_rt_index_hi));
2418
2419         if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) {
2420                 IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n",
2421                         IPA_MEM_PART(v6_rt_hash_ofst));
2422                 return -ENODEV;
2423         }
2424
2425         IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst));
2426
2427         IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2428                 IPA_MEM_PART(v6_rt_hash_size),
2429                 IPA_MEM_PART(v6_rt_hash_size_ddr));
2430
2431         if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) {
2432                 IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n",
2433                         IPA_MEM_PART(v6_rt_nhash_ofst));
2434                 return -ENODEV;
2435         }
2436
2437         IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n",
2438                 IPA_MEM_PART(v6_rt_nhash_ofst));
2439
2440         IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
2441                 IPA_MEM_PART(v6_rt_nhash_size),
2442                 IPA_MEM_PART(v6_rt_nhash_size_ddr));
2443
2444         if (IPA_MEM_PART(modem_hdr_ofst) & 7) {
2445                 IPAERR("MODEM HDR OFST 0x%x is unaligned\n",
2446                         IPA_MEM_PART(modem_hdr_ofst));
2447                 return -ENODEV;
2448         }
2449
2450         IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
2451                 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
2452
2453         if (IPA_MEM_PART(apps_hdr_ofst) & 7) {
2454                 IPAERR("APPS HDR OFST 0x%x is unaligned\n",
2455                         IPA_MEM_PART(apps_hdr_ofst));
2456                 return -ENODEV;
2457         }
2458
2459         IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2460                 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
2461                 IPA_MEM_PART(apps_hdr_size_ddr));
2462
2463         if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) {
2464                 IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n",
2465                         IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2466                 return -ENODEV;
2467         }
2468
2469         IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
2470                 IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
2471                 IPA_MEM_PART(modem_hdr_proc_ctx_size));
2472
2473         if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) {
2474                 IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n",
2475                         IPA_MEM_PART(apps_hdr_proc_ctx_ofst));
2476                 return -ENODEV;
2477         }
2478
2479         IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
2480                 IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
2481                 IPA_MEM_PART(apps_hdr_proc_ctx_size),
2482                 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
2483
2484         if (IPA_MEM_PART(modem_ofst) & 7) {
2485                 IPAERR("MODEM OFST 0x%x is unaligned\n",
2486                         IPA_MEM_PART(modem_ofst));
2487                 return -ENODEV;
2488         }
2489
2490         IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
2491                 IPA_MEM_PART(modem_size));
2492
2493         IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2494                 IPA_MEM_PART(apps_v4_flt_hash_ofst),
2495                 IPA_MEM_PART(apps_v4_flt_hash_size));
2496
2497         IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2498                 IPA_MEM_PART(apps_v4_flt_nhash_ofst),
2499                 IPA_MEM_PART(apps_v4_flt_nhash_size));
2500
2501         IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2502                 IPA_MEM_PART(apps_v6_flt_hash_ofst),
2503                 IPA_MEM_PART(apps_v6_flt_hash_size));
2504
2505         IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
2506                 IPA_MEM_PART(apps_v6_flt_nhash_ofst),
2507                 IPA_MEM_PART(apps_v6_flt_nhash_size));
2508
2509         IPADBG("RAM END OFST 0x%x\n",
2510                 IPA_MEM_PART(end_ofst));
2511
2512         IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2513                 IPA_MEM_PART(apps_v4_rt_hash_ofst),
2514                 IPA_MEM_PART(apps_v4_rt_hash_size));
2515
2516         IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2517                 IPA_MEM_PART(apps_v4_rt_nhash_ofst),
2518                 IPA_MEM_PART(apps_v4_rt_nhash_size));
2519
2520         IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2521                 IPA_MEM_PART(apps_v6_rt_hash_ofst),
2522                 IPA_MEM_PART(apps_v6_rt_hash_size));
2523
2524         IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
2525                 IPA_MEM_PART(apps_v6_rt_nhash_ofst),
2526                 IPA_MEM_PART(apps_v6_rt_nhash_size));
2527
2528         return 0;
2529 }
2530
2531 /**
2532  * ipa_ctrl_static_bind() - set the appropriate methods for
2533  *  IPA Driver based on the HW version
2534  *
2535  *  @ctrl: data structure which holds the function pointers
2536  *  @hw_type: the HW type in use
2537  *
2538  *  This function can avoid the runtime assignment by using C99 special
2539  *  struct initialization - hard decision... time.vs.mem
2540  */
2541 int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
2542                 enum ipa_hw_type hw_type)
2543 {
2544         ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
2545         ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
2546         ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
2547         ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
2548         ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
2549         ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
2550         ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
2551         ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
2552         ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
2553         ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
2554         ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0;
2555         ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0;
2556         ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0;
2557         ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v3_0;
2558         ctrl->clock_scaling_bw_threshold_nominal =
2559                 IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS;
2560         ctrl->clock_scaling_bw_threshold_turbo =
2561                 IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
2562         ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
2563         ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
2564         ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
2565
2566         ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
2567
2568         return 0;
2569 }
2570
2571 void ipa3_skb_recycle(struct sk_buff *skb)
2572 {
2573         struct skb_shared_info *shinfo;
2574
2575         shinfo = skb_shinfo(skb);
2576         memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
2577         atomic_set(&shinfo->dataref, 1);
2578
2579         memset(skb, 0, offsetof(struct sk_buff, tail));
2580         skb->data = skb->head + NET_SKB_PAD;
2581         skb_reset_tail_pointer(skb);
2582 }
2583
2584 int ipa3_alloc_rule_id(struct idr *rule_ids)
2585 {
2586         /* There is two groups of rule-Ids, Modem ones and Apps ones.
2587          * Distinction by high bit: Modem Ids are high bit asserted.
2588          */
2589         return idr_alloc(rule_ids, NULL,
2590                 ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(),
2591                 GFP_KERNEL);
2592 }
2593
2594 int ipa3_id_alloc(void *ptr)
2595 {
2596         int id;
2597
2598         idr_preload(GFP_KERNEL);
2599         spin_lock(&ipa3_ctx->idr_lock);
2600         id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
2601         spin_unlock(&ipa3_ctx->idr_lock);
2602         idr_preload_end();
2603
2604         return id;
2605 }
2606
2607 void *ipa3_id_find(u32 id)
2608 {
2609         void *ptr;
2610
2611         spin_lock(&ipa3_ctx->idr_lock);
2612         ptr = idr_find(&ipa3_ctx->ipa_idr, id);
2613         spin_unlock(&ipa3_ctx->idr_lock);
2614
2615         return ptr;
2616 }
2617
2618 void ipa3_id_remove(u32 id)
2619 {
2620         spin_lock(&ipa3_ctx->idr_lock);
2621         idr_remove(&ipa3_ctx->ipa_idr, id);
2622         spin_unlock(&ipa3_ctx->idr_lock);
2623 }
2624
2625 void ipa3_tag_destroy_imm(void *user1, int user2)
2626 {
2627         ipahal_destroy_imm_cmd(user1);
2628 }
2629
2630 static void ipa3_tag_free_skb(void *user1, int user2)
2631 {
2632         dev_kfree_skb_any((struct sk_buff *)user1);
2633 }
2634
2635 #define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
2636
2637 /* ipa3_tag_process() - Initiates a tag process. Incorporates the input
2638  * descriptors
2639  *
2640  * @desc:       descriptors with commands for IC
2641  * @desc_size:  amount of descriptors in the above variable
2642  *
2643  * Note: The descriptors are copied (if there's room), the client needs to
2644  * free his descriptors afterwards
2645  *
2646  * Return: 0 or negative in case of failure
2647  */
2648 int ipa3_tag_process(struct ipa3_desc desc[],
2649         int descs_num,
2650         unsigned long timeout)
2651 {
2652         struct ipa3_sys_context *sys;
2653         struct ipa3_desc *tag_desc;
2654         int desc_idx = 0;
2655         struct ipahal_imm_cmd_ip_packet_init pktinit_cmd;
2656         struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2657         struct ipahal_imm_cmd_ip_packet_tag_status status;
2658         int i;
2659         struct sk_buff *dummy_skb;
2660         int res;
2661         struct ipa3_tag_completion *comp;
2662         int ep_idx;
2663
2664         /* Not enough room for the required descriptors for the tag process */
2665         if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
2666                 IPAERR("up to %d descriptors are allowed (received %d)\n",
2667                        IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
2668                        descs_num);
2669                 return -ENOMEM;
2670         }
2671
2672         ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
2673         if (-1 == ep_idx) {
2674                 IPAERR("Client %u is not mapped\n",
2675                         IPA_CLIENT_APPS_CMD_PROD);
2676                 return -EFAULT;
2677         }
2678         sys = ipa3_ctx->ep[ep_idx].sys;
2679
2680         tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
2681         if (!tag_desc) {
2682                 IPAERR("failed to allocate memory\n");
2683                 return -ENOMEM;
2684         }
2685
2686         /* Copy the required descriptors from the client now */
2687         if (desc) {
2688                 memcpy(&(tag_desc[0]), desc, descs_num *
2689                         sizeof(tag_desc[0]));
2690                 desc_idx += descs_num;
2691         }
2692
2693         /* NO-OP IC for ensuring that IPA pipeline is empty */
2694         cmd_pyld = ipahal_construct_nop_imm_cmd(
2695                 false, IPAHAL_FULL_PIPELINE_CLEAR, false);
2696         if (!cmd_pyld) {
2697                 IPAERR("failed to construct NOP imm cmd\n");
2698                 res = -ENOMEM;
2699                 goto fail_free_tag_desc;
2700         }
2701         tag_desc[desc_idx].opcode =
2702                 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
2703         tag_desc[desc_idx].pyld = cmd_pyld->data;
2704         tag_desc[desc_idx].len = cmd_pyld->len;
2705         tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
2706         tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
2707         tag_desc[desc_idx].user1 = cmd_pyld;
2708         desc_idx++;
2709
2710         /* IP_PACKET_INIT IC for tag status to be sent to apps */
2711         pktinit_cmd.destination_pipe_index =
2712                 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
2713         cmd_pyld = ipahal_construct_imm_cmd(
2714                 IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
2715         if (!cmd_pyld) {
2716                 IPAERR("failed to construct ip_packet_init imm cmd\n");
2717                 res = -ENOMEM;
2718                 goto fail_free_desc;
2719         }
2720         tag_desc[desc_idx].opcode =
2721                 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
2722         tag_desc[desc_idx].pyld = cmd_pyld->data;
2723         tag_desc[desc_idx].len = cmd_pyld->len;
2724         tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
2725         tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
2726         tag_desc[desc_idx].user1 = cmd_pyld;
2727         desc_idx++;
2728
2729         /* status IC */
2730         status.tag = IPA_COOKIE;
2731         cmd_pyld = ipahal_construct_imm_cmd(
2732                 IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false);
2733         if (!cmd_pyld) {
2734                 IPAERR("failed to construct ip_packet_tag_status imm cmd\n");
2735                 res = -ENOMEM;
2736                 goto fail_free_desc;
2737         }
2738         tag_desc[desc_idx].opcode =
2739                 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
2740         tag_desc[desc_idx].pyld = cmd_pyld->data;
2741         tag_desc[desc_idx].len = cmd_pyld->len;
2742         tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
2743         tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
2744         tag_desc[desc_idx].user1 = cmd_pyld;
2745         desc_idx++;
2746
2747         comp = kzalloc(sizeof(*comp), GFP_KERNEL);
2748         if (!comp) {
2749                 IPAERR("no mem\n");
2750                 res = -ENOMEM;
2751                 goto fail_free_desc;
2752         }
2753         init_completion(&comp->comp);
2754
2755         /* completion needs to be released from both here and rx handler */
2756         atomic_set(&comp->cnt, 2);
2757
2758         /* dummy packet to send to IPA. packet payload is a completion object */
2759         dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
2760         if (!dummy_skb) {
2761                 IPAERR("failed to allocate memory\n");
2762                 res = -ENOMEM;
2763                 goto fail_free_comp;
2764         }
2765
2766         memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
2767
2768         tag_desc[desc_idx].pyld = dummy_skb->data;
2769         tag_desc[desc_idx].len = dummy_skb->len;
2770         tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
2771         tag_desc[desc_idx].callback = ipa3_tag_free_skb;
2772         tag_desc[desc_idx].user1 = dummy_skb;
2773         desc_idx++;
2774
2775         /* send all descriptors to IPA with single EOT */
2776         res = ipa3_send(sys, desc_idx, tag_desc, true);
2777         if (res) {
2778                 IPAERR("failed to send TAG packets %d\n", res);
2779                 res = -ENOMEM;
2780                 goto fail_free_comp;
2781         }
2782         kfree(tag_desc);
2783         tag_desc = NULL;
2784
2785         IPADBG("waiting for TAG response\n");
2786         res = wait_for_completion_timeout(&comp->comp, timeout);
2787         if (res == 0) {
2788                 IPAERR("timeout (%lu msec) on waiting for TAG response\n",
2789                         timeout);
2790                 WARN_ON(1);
2791                 if (atomic_dec_return(&comp->cnt) == 0)
2792                         kfree(comp);
2793                 return -ETIME;
2794         }
2795
2796         IPADBG("TAG response arrived!\n");
2797         if (atomic_dec_return(&comp->cnt) == 0)
2798                 kfree(comp);
2799
2800         /* sleep for short period to ensure IPA wrote all packets to BAM */
2801         usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
2802
2803         return 0;
2804
2805 fail_free_comp:
2806         kfree(comp);
2807 fail_free_desc:
2808         /*
2809          * Free only the first descriptors allocated here.
2810          * [nop, pkt_init, status, dummy_skb]
2811          * The user is responsible to free his allocations
2812          * in case of failure.
2813          * The min is required because we may fail during
2814          * of the initial allocations above
2815          */
2816         for (i = descs_num;
2817                 i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++)
2818                 if (tag_desc[i].callback)
2819                         tag_desc[i].callback(tag_desc[i].user1,
2820                                 tag_desc[i].user2);
2821 fail_free_tag_desc:
2822         kfree(tag_desc);
2823         return res;
2824 }
2825
2826 /**
2827  * ipa3_tag_generate_force_close_desc() - generate descriptors for force close
2828  *                                       immediate command
2829  *
2830  * @desc: descriptors for IC
2831  * @desc_size: desc array size
2832  * @start_pipe: first pipe to close aggregation
2833  * @end_pipe: last (non-inclusive) pipe to close aggregation
2834  *
2835  * Return: number of descriptors written or negative in case of failure
2836  */
2837 static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
2838         int desc_size, int start_pipe, int end_pipe)
2839 {
2840         int i;
2841         struct ipa_ep_cfg_aggr ep_aggr;
2842         int desc_idx = 0;
2843         int res;
2844         struct ipahal_imm_cmd_register_write reg_write_agg_close;
2845         struct ipahal_imm_cmd_pyld *cmd_pyld;
2846         struct ipahal_reg_valmask valmask;
2847
2848         for (i = start_pipe; i < end_pipe; i++) {
2849                 ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr);
2850                 if (!ep_aggr.aggr_en)
2851                         continue;
2852                 IPADBG("Force close ep: %d\n", i);
2853                 if (desc_idx + 1 > desc_size) {
2854                         IPAERR("Internal error - no descriptors\n");
2855                         res = -EFAULT;
2856                         goto fail_no_desc;
2857                 }
2858
2859                 reg_write_agg_close.skip_pipeline_clear = false;
2860                 reg_write_agg_close.pipeline_clear_options =
2861                         IPAHAL_FULL_PIPELINE_CLEAR;
2862                 reg_write_agg_close.offset =
2863                         ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
2864                 ipahal_get_aggr_force_close_valmask(i, &valmask);
2865                 reg_write_agg_close.value = valmask.val;
2866                 reg_write_agg_close.value_mask = valmask.mask;
2867                 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2868                         &reg_write_agg_close, false);
2869                 if (!cmd_pyld) {
2870                         IPAERR("failed to construct register_write imm cmd\n");
2871                         res = -ENOMEM;
2872                         goto fail_alloc_reg_write_agg_close;
2873                 }
2874
2875                 desc[desc_idx].opcode =
2876                         ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
2877                 desc[desc_idx].pyld = cmd_pyld->data;
2878                 desc[desc_idx].len = cmd_pyld->len;
2879                 desc[desc_idx].type = IPA_IMM_CMD_DESC;
2880                 desc[desc_idx].callback = ipa3_tag_destroy_imm;
2881                 desc[desc_idx].user1 = cmd_pyld;
2882                 desc_idx++;
2883         }
2884
2885         return desc_idx;
2886
2887 fail_alloc_reg_write_agg_close:
2888         for (i = 0; i < desc_idx; i++)
2889                 if (desc[desc_idx].callback)
2890                         desc[desc_idx].callback(desc[desc_idx].user1,
2891                                 desc[desc_idx].user2);
2892 fail_no_desc:
2893         return res;
2894 }
2895
2896 /**
2897  * ipa3_tag_aggr_force_close() - Force close aggregation
2898  *
2899  * @pipe_num: pipe number or -1 for all pipes
2900  */
2901 int ipa3_tag_aggr_force_close(int pipe_num)
2902 {
2903         struct ipa3_desc *desc;
2904         int res = -1;
2905         int start_pipe;
2906         int end_pipe;
2907         int num_descs;
2908         int num_aggr_descs;
2909
2910         if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) {
2911                 IPAERR("Invalid pipe number %d\n", pipe_num);
2912                 return -EINVAL;
2913         }
2914
2915         if (pipe_num == -1) {
2916                 start_pipe = 0;
2917                 end_pipe = ipa3_ctx->ipa_num_pipes;
2918         } else {
2919                 start_pipe = pipe_num;
2920                 end_pipe = pipe_num + 1;
2921         }
2922
2923         num_descs = end_pipe - start_pipe;
2924
2925         desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
2926         if (!desc) {
2927                 IPAERR("no mem\n");
2928                 return -ENOMEM;
2929         }
2930
2931         /* Force close aggregation on all valid pipes with aggregation */
2932         num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs,
2933                                                 start_pipe, end_pipe);
2934         if (num_aggr_descs < 0) {
2935                 IPAERR("ipa3_tag_generate_force_close_desc failed %d\n",
2936                         num_aggr_descs);
2937                 goto fail_free_desc;
2938         }
2939
2940         res = ipa3_tag_process(desc, num_aggr_descs,
2941                               IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
2942
2943 fail_free_desc:
2944         kfree(desc);
2945
2946         return res;
2947 }
2948
2949 /**
2950  * ipa3_is_ready() - check if IPA module was initialized
2951  * successfully
2952  *
2953  * Return value: true for yes; false for no
2954  */
2955 bool ipa3_is_ready(void)
2956 {
2957         bool complete;
2958
2959         if (ipa3_ctx == NULL)
2960                 return false;
2961         mutex_lock(&ipa3_ctx->lock);
2962         complete = ipa3_ctx->ipa_initialization_complete;
2963         mutex_unlock(&ipa3_ctx->lock);
2964         return complete;
2965 }
2966
2967 /**
2968  * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle
2969  *
2970  * Return value: true for yes; false for no
2971  */
2972 bool ipa3_is_client_handle_valid(u32 clnt_hdl)
2973 {
2974         if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes)
2975                 return true;
2976         return false;
2977 }
2978
2979 /**
2980  * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote
2981  *
2982  * Return value: none
2983  */
2984 void ipa3_proxy_clk_unvote(void)
2985 {
2986         if (!ipa3_is_ready())
2987                 return;
2988
2989         mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
2990         if (ipa3_ctx->q6_proxy_clk_vote_valid) {
2991                 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
2992                 ipa3_ctx->q6_proxy_clk_vote_cnt--;
2993                 if (ipa3_ctx->q6_proxy_clk_vote_cnt == 0)
2994                         ipa3_ctx->q6_proxy_clk_vote_valid = false;
2995         }
2996         mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
2997 }
2998
2999 /**
3000  * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote
3001  *
3002  * Return value: none
3003  */
3004 void ipa3_proxy_clk_vote(void)
3005 {
3006         if (!ipa3_is_ready())
3007                 return;
3008
3009         mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
3010         if (!ipa3_ctx->q6_proxy_clk_vote_valid ||
3011                 (ipa3_ctx->q6_proxy_clk_vote_cnt > 0)) {
3012                 IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
3013                 ipa3_ctx->q6_proxy_clk_vote_cnt++;
3014                 ipa3_ctx->q6_proxy_clk_vote_valid = true;
3015         }
3016         mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
3017 }
3018
3019 /**
3020  * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes
3021  *
3022  * Return value: u16 - number of IPA smem restricted bytes
3023  */
3024 u16 ipa3_get_smem_restr_bytes(void)
3025 {
3026         if (ipa3_ctx)
3027                 return ipa3_ctx->smem_restricted_bytes;
3028
3029         IPAERR("IPA Driver not initialized\n");
3030
3031         return 0;
3032 }
3033
3034 /**
3035  * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt
3036  *
3037  * Return value: true if modem configures embedded pipe flt, false otherwise
3038  */
3039 bool ipa3_get_modem_cfg_emb_pipe_flt(void)
3040 {
3041         if (ipa3_ctx)
3042                 return ipa3_ctx->modem_cfg_emb_pipe_flt;
3043
3044         IPAERR("IPA driver has not been initialized\n");
3045
3046         return false;
3047 }
3048
3049 /**
3050  * ipa3_get_transport_type()- Return ipa3_ctx->transport_prototype
3051  *
3052  * Return value: enum ipa_transport_type
3053  */
3054 enum ipa_transport_type ipa3_get_transport_type(void)
3055 {
3056         if (ipa3_ctx)
3057                 return ipa3_ctx->transport_prototype;
3058
3059         IPAERR("IPA driver has not been initialized\n");
3060         return IPA_TRANSPORT_TYPE_GSI;
3061 }
3062
3063 u32 ipa3_get_num_pipes(void)
3064 {
3065         return ipahal_read_reg(IPA_ENABLED_PIPES);
3066 }
3067
3068 /**
3069  * ipa3_disable_apps_wan_cons_deaggr()- set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
3070  *
3071  * Return value: 0 or negative in case of failure
3072  */
3073 int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
3074 {
3075         int res = -1;
3076         u32 limit;
3077
3078         /* checking if IPA-HW can support */
3079         limit = ipahal_aggr_get_max_byte_limit();
3080         if ((agg_size >> 10) > limit) {
3081                 IPAERR("IPA-AGG byte limit %d\n", limit);
3082                 IPAERR("exceed aggr_byte_limit\n");
3083                 return res;
3084         }
3085         limit = ipahal_aggr_get_max_pkt_limit();
3086         if (agg_count > limit) {
3087                 IPAERR("IPA-AGG pkt limit %d\n", limit);
3088                 IPAERR("exceed aggr_pkt_limit\n");
3089                 return res;
3090         }
3091
3092         if (ipa3_ctx) {
3093                 ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true;
3094                 return 0;
3095         }
3096         return res;
3097 }
3098
3099 static void *ipa3_get_ipc_logbuf(void)
3100 {
3101         if (ipa3_ctx)
3102                 return ipa3_ctx->logbuf;
3103
3104         return NULL;
3105 }
3106
3107 static void *ipa3_get_ipc_logbuf_low(void)
3108 {
3109         if (ipa3_ctx)
3110                 return ipa3_ctx->logbuf_low;
3111
3112         return NULL;
3113 }
3114
3115 static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
3116 {
3117         *holb = ipa3_ctx->ep[ep_idx].holb;
3118 }
3119
3120 static void ipa3_set_tag_process_before_gating(bool val)
3121 {
3122         ipa3_ctx->tag_process_before_gating = val;
3123 }
3124
3125 int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
3126         struct ipa_api_controller *api_ctrl)
3127 {
3128         if (ipa_hw_type < IPA_HW_v3_0) {
3129                 IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
3130                 WARN_ON(1);
3131                 return -EPERM;
3132         }
3133
3134         api_ctrl->ipa_connect = ipa3_connect;
3135         api_ctrl->ipa_disconnect = ipa3_disconnect;
3136         api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
3137         api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
3138         api_ctrl->ipa_disable_endpoint = NULL;
3139         api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
3140         api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
3141         api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
3142         api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext;
3143         api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode;
3144         api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr;
3145         api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr;
3146         api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route;
3147         api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb;
3148         api_ctrl->ipa_get_holb = ipa3_get_holb;
3149         api_ctrl->ipa_set_tag_process_before_gating =
3150                         ipa3_set_tag_process_before_gating;
3151         api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg;
3152         api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask;
3153         api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
3154         api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
3155         api_ctrl->ipa_add_hdr = ipa3_add_hdr;
3156         api_ctrl->ipa_add_hdr_usr = ipa3_add_hdr_usr;
3157         api_ctrl->ipa_del_hdr = ipa3_del_hdr;
3158         api_ctrl->ipa_commit_hdr = ipa3_commit_hdr;
3159         api_ctrl->ipa_reset_hdr = ipa3_reset_hdr;
3160         api_ctrl->ipa_get_hdr = ipa3_get_hdr;
3161         api_ctrl->ipa_put_hdr = ipa3_put_hdr;
3162         api_ctrl->ipa_copy_hdr = ipa3_copy_hdr;
3163         api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
3164         api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
3165         api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
3166         api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr;
3167         api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
3168         api_ctrl->ipa_commit_rt = ipa3_commit_rt;
3169         api_ctrl->ipa_reset_rt = ipa3_reset_rt;
3170         api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl;
3171         api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl;
3172         api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
3173         api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
3174         api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
3175         api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr;
3176         api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
3177         api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
3178         api_ctrl->ipa_commit_flt = ipa3_commit_flt;
3179         api_ctrl->ipa_reset_flt = ipa3_reset_flt;
3180         api_ctrl->allocate_nat_device = ipa3_allocate_nat_device;
3181         api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd;
3182         api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd;
3183         api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd;
3184         api_ctrl->ipa_send_msg = ipa3_send_msg;
3185         api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg;
3186         api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg;
3187         api_ctrl->ipa_register_intf = ipa3_register_intf;
3188         api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext;
3189         api_ctrl->ipa_deregister_intf = ipa3_deregister_intf;
3190         api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode;
3191         api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig;
3192         api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim;
3193         api_ctrl->ipa_tx_dp = ipa3_tx_dp;
3194         api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul;
3195         api_ctrl->ipa_free_skb = ipa3_free_skb;
3196         api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe;
3197         api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe;
3198         api_ctrl->ipa_sys_setup = ipa3_sys_setup;
3199         api_ctrl->ipa_sys_teardown = ipa3_sys_teardown;
3200         api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls;
3201         api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe;
3202         api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe;
3203         api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe;
3204         api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe;
3205         api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
3206         api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
3207         api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
3208         api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
3209         api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
3210                         ipa3_broadcast_wdi_quota_reach_ind;
3211         api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
3212         api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
3213         api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
3214         api_ctrl->teth_bridge_init = ipa3_teth_bridge_init;
3215         api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect;
3216         api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect;
3217         api_ctrl->ipa_set_client = ipa3_set_client;
3218         api_ctrl->ipa_get_client = ipa3_get_client;
3219         api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink;
3220         api_ctrl->ipa_dma_init = ipa3_dma_init;
3221         api_ctrl->ipa_dma_enable = ipa3_dma_enable;
3222         api_ctrl->ipa_dma_disable = ipa3_dma_disable;
3223         api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy;
3224         api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
3225         api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy;
3226         api_ctrl->ipa_dma_destroy = ipa3_dma_destroy;
3227         api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine;
3228         api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe;
3229         api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe;
3230         api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel;
3231         api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel;
3232         api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
3233                         ipa3_qmi_enable_force_clear_datapath_send;
3234         api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
3235                         ipa3_qmi_disable_force_clear_datapath_send;
3236         api_ctrl->ipa_mhi_reset_channel_internal =
3237                         ipa3_mhi_reset_channel_internal;
3238         api_ctrl->ipa_mhi_start_channel_internal =
3239                         ipa3_mhi_start_channel_internal;
3240         api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info;
3241         api_ctrl->ipa_mhi_resume_channels_internal =
3242                         ipa3_mhi_resume_channels_internal;
3243         api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame;
3244         api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel;
3245         api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
3246                         ipa3_uc_mhi_send_dl_ul_sync_info;
3247         api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init;
3248         api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel;
3249         api_ctrl->ipa_uc_mhi_stop_event_update_channel =
3250                         ipa3_uc_mhi_stop_event_update_channel;
3251         api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup;
3252         api_ctrl->ipa_uc_state_check = ipa3_uc_state_check;
3253         api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id;
3254         api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
3255         api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
3256         api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
3257         api_ctrl->ipa_bam_reg_dump = ipa3_bam_reg_dump;
3258         api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping;
3259         api_ctrl->ipa_is_ready = ipa3_is_ready;
3260         api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote;
3261         api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
3262         api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
3263         api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
3264         api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep;
3265         api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
3266                 ipa3_get_modem_cfg_emb_pipe_flt;
3267         api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
3268         api_ctrl->ipa_ap_suspend = ipa3_ap_suspend;
3269         api_ctrl->ipa_ap_resume = ipa3_ap_resume;
3270         api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain;
3271         api_ctrl->ipa_disable_apps_wan_cons_deaggr =
3272                 ipa3_disable_apps_wan_cons_deaggr;
3273         api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev;
3274         api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping;
3275         api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
3276         api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
3277         api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
3278         api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
3279         api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
3280         api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
3281         api_ctrl->ipa_inc_client_enable_clks_no_block =
3282                 ipa3_inc_client_enable_clks_no_block;
3283         api_ctrl->ipa_suspend_resource_no_block =
3284                 ipa3_suspend_resource_no_block;
3285         api_ctrl->ipa_resume_resource = ipa3_resume_resource;
3286         api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync;
3287         api_ctrl->ipa_set_required_perf_profile =
3288                 ipa3_set_required_perf_profile;
3289         api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
3290         api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
3291         api_ctrl->ipa_rx_poll = ipa3_rx_poll;
3292         api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
3293         api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
3294         api_ctrl->ipa_tear_down_uc_offload_pipes =
3295                 ipa3_tear_down_uc_offload_pipes;
3296         api_ctrl->ipa_get_pdev = ipa3_get_pdev;
3297         api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB;
3298         api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB;
3299
3300         return 0;
3301 }
3302
3303 /**
3304  * ipa_is_modem_pipe()- Checks if pipe is owned by the modem
3305  *
3306  * @pipe_idx: pipe number
3307  * Return value: true if owned by modem, false otherwize
3308  */
3309 bool ipa_is_modem_pipe(int pipe_idx)
3310 {
3311         int client_idx;
3312
3313         if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
3314                 IPAERR("Bad pipe index!\n");
3315                 return false;
3316         }
3317
3318         for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
3319                 if (!IPA_CLIENT_IS_Q6_CONS(client_idx) &&
3320                         !IPA_CLIENT_IS_Q6_PROD(client_idx))
3321                         continue;
3322                 if (ipa3_get_ep_mapping(client_idx) == pipe_idx)
3323                         return true;
3324         }
3325
3326         return false;
3327 }
3328
3329 static void ipa3_write_rsrc_grp_type_reg(int group_index,
3330                         enum ipa_rsrc_grp_type_src n, bool src,
3331                         struct ipahal_reg_rsrc_grp_cfg *val) {
3332
3333         if (src) {
3334                 switch (group_index) {
3335                 case IPA_GROUP_UL:
3336                 case IPA_GROUP_DL:
3337                         ipahal_write_reg_n_fields(
3338                                 IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
3339                                 n, val);
3340                         break;
3341                 case IPA_GROUP_DIAG:
3342                 case IPA_GROUP_DMA:
3343                         ipahal_write_reg_n_fields(
3344                                 IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
3345                                 n, val);
3346                         break;
3347                 case IPA_GROUP_Q6ZIP:
3348                 case IPA_GROUP_UC_RX_Q:
3349                         ipahal_write_reg_n_fields(
3350                                 IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
3351                                 n, val);
3352                         break;
3353                 default:
3354                         IPAERR(
3355                         " Invalid source resource group,index #%d\n",
3356                         group_index);
3357                         break;
3358                 }
3359         } else {
3360                 switch (group_index) {
3361                 case IPA_GROUP_UL:
3362                 case IPA_GROUP_DL:
3363                         ipahal_write_reg_n_fields(
3364                                 IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
3365                                 n, val);
3366                         break;
3367                 case IPA_GROUP_DIAG:
3368                 case IPA_GROUP_DMA:
3369                         ipahal_write_reg_n_fields(
3370                                 IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
3371                                 n, val);
3372                         break;
3373                 case IPA_GROUP_Q6ZIP_GENERAL:
3374                 case IPA_GROUP_Q6ZIP_ENGINE:
3375                         ipahal_write_reg_n_fields(
3376                                 IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
3377                                 n, val);
3378                         break;
3379                 default:
3380                         IPAERR(
3381                         " Invalid destination resource group,index #%d\n",
3382                         group_index);
3383                         break;
3384                 }
3385         }
3386 }
3387
3388 static void ipa3_configure_rx_hps_clients(int depth, bool min)
3389 {
3390         int i;
3391         struct ipahal_reg_rx_hps_clients val;
3392
3393         /*
3394          * depth 0 contains 4 first clients out of 6
3395          * depth 1 contains 2 last clients out of 6
3396          */
3397         for (i = 0 ; i < (depth ? 2 : 4) ; i++) {
3398                 if (min)
3399                         val.client_minmax[i] =
3400                                 ipa3_rsrc_rx_grp_config
3401                                 [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
3402                                 [!depth ? i : 4 + i].min;
3403                 else
3404                         val.client_minmax[i] =
3405                                 ipa3_rsrc_rx_grp_config
3406                                 [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
3407                                 [!depth ? i : 4 + i].max;
3408         }
3409         if (depth) {
3410                 ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
3411                                         IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
3412                                         &val);
3413         } else {
3414                 ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
3415                                         IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
3416                                         &val);
3417         }
3418 }
3419
3420 void ipa3_set_resorce_groups_min_max_limits(void)
3421 {
3422         int i;
3423         int j;
3424         struct ipahal_reg_rsrc_grp_cfg val;
3425
3426         IPADBG("ENTER\n");
3427         IPADBG("Assign source rsrc groups min-max limits\n");
3428
3429         for (i = 0; i < IPA_RSRC_GRP_TYPE_SRC_MAX; i++) {
3430                 for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
3431                         val.x_min = ipa3_rsrc_src_grp_config[i][j].min;
3432                         val.x_max = ipa3_rsrc_src_grp_config[i][j].max;
3433                         val.y_min = ipa3_rsrc_src_grp_config[i][j + 1].min;
3434                         val.y_max = ipa3_rsrc_src_grp_config[i][j + 1].max;
3435                         ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
3436                 }
3437         }
3438
3439         IPADBG("Assign destination rsrc groups min-max limits\n");
3440
3441         for (i = 0; i < IPA_RSRC_GRP_TYPE_DST_MAX; i++) {
3442                 for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
3443                         val.x_min = ipa3_rsrc_dst_grp_config[i][j].min;
3444                         val.x_max = ipa3_rsrc_dst_grp_config[i][j].max;
3445                         val.y_min = ipa3_rsrc_dst_grp_config[i][j + 1].min;
3446                         val.y_max = ipa3_rsrc_dst_grp_config[i][j + 1].max;
3447                         ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
3448                 }
3449         }
3450
3451         /* move resource group configuration from HLOS to TZ */
3452         if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
3453                 IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
3454                 return;
3455         }
3456
3457         IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
3458
3459         ipa3_configure_rx_hps_clients(0, true);
3460         ipa3_configure_rx_hps_clients(1, true);
3461         ipa3_configure_rx_hps_clients(0, false);
3462         ipa3_configure_rx_hps_clients(1, false);
3463
3464         IPADBG("EXIT\n");
3465 }
3466
3467 static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
3468 {
3469         bool empty;
3470
3471         IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
3472         gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
3473         gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
3474         if (!empty) {
3475                 IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
3476                 /* queue a work to start polling if don't have one */
3477                 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
3478                 if (!atomic_read(&ep->sys->curr_polling_state)) {
3479                         ipa3_inc_acquire_wakelock();
3480                         atomic_set(&ep->sys->curr_polling_state, 1);
3481                         queue_work(ep->sys->wq, &ep->sys->work);
3482                 }
3483         }
3484 }
3485
3486 void ipa3_suspend_apps_pipes(bool suspend)
3487 {
3488         struct ipa_ep_cfg_ctrl cfg;
3489         int ipa_ep_idx;
3490         struct ipa3_ep_context *ep;
3491
3492         memset(&cfg, 0, sizeof(cfg));
3493         cfg.ipa_ep_suspend = suspend;
3494
3495         ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
3496         if (ipa_ep_idx < 0) {
3497                 IPAERR("IPA client mapping failed\n");
3498                 ipa_assert();
3499                 return;
3500         }
3501         ep = &ipa3_ctx->ep[ipa_ep_idx];
3502         if (ep->valid) {
3503                 IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
3504                         ipa_ep_idx);
3505                 ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
3506                 if (suspend)
3507                         ipa3_gsi_poll_after_suspend(ep);
3508                 else if (!atomic_read(&ep->sys->curr_polling_state))
3509                         gsi_config_channel_mode(ep->gsi_chan_hdl,
3510                                 GSI_CHAN_MODE_CALLBACK);
3511         }
3512
3513         ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
3514         /* Considering the case for SSR. */
3515         if (ipa_ep_idx == -1) {
3516                 IPADBG("Invalid client.\n");
3517                 return;
3518         }
3519         ep = &ipa3_ctx->ep[ipa_ep_idx];
3520         if (ep->valid) {
3521                 IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
3522                         ipa_ep_idx);
3523                 ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
3524                 if (suspend)
3525                         ipa3_gsi_poll_after_suspend(ep);
3526                 else if (!atomic_read(&ep->sys->curr_polling_state))
3527                         gsi_config_channel_mode(ep->gsi_chan_hdl,
3528                                 GSI_CHAN_MODE_CALLBACK);
3529         }
3530 }
3531
3532 int ipa3_allocate_dma_task_for_gsi(void)
3533 {
3534         struct ipahal_imm_cmd_dma_task_32b_addr cmd = { 0 };
3535
3536         IPADBG("Allocate mem\n");
3537         ipa3_ctx->dma_task_info.mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
3538         ipa3_ctx->dma_task_info.mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
3539                 ipa3_ctx->dma_task_info.mem.size,
3540                 &ipa3_ctx->dma_task_info.mem.phys_base,
3541                 GFP_KERNEL);
3542         if (!ipa3_ctx->dma_task_info.mem.base) {
3543                 IPAERR("no mem\n");
3544                 return -EFAULT;
3545         }
3546
3547         cmd.flsh = 1;
3548         cmd.size1 = ipa3_ctx->dma_task_info.mem.size;
3549         cmd.addr1 = ipa3_ctx->dma_task_info.mem.phys_base;
3550         cmd.packet_size = ipa3_ctx->dma_task_info.mem.size;
3551         ipa3_ctx->dma_task_info.cmd_pyld = ipahal_construct_imm_cmd(
3552                         IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
3553         if (!ipa3_ctx->dma_task_info.cmd_pyld) {
3554                 IPAERR("failed to construct dma_task_32b_addr cmd\n");
3555                 dma_free_coherent(ipa3_ctx->pdev,
3556                         ipa3_ctx->dma_task_info.mem.size,
3557                         ipa3_ctx->dma_task_info.mem.base,
3558                         ipa3_ctx->dma_task_info.mem.phys_base);
3559                 memset(&ipa3_ctx->dma_task_info, 0,
3560                         sizeof(ipa3_ctx->dma_task_info));
3561                 return -EFAULT;
3562         }
3563
3564         return 0;
3565 }
3566
3567 void ipa3_free_dma_task_for_gsi(void)
3568 {
3569         dma_free_coherent(ipa3_ctx->pdev,
3570                 ipa3_ctx->dma_task_info.mem.size,
3571                 ipa3_ctx->dma_task_info.mem.base,
3572                 ipa3_ctx->dma_task_info.mem.phys_base);
3573         ipahal_destroy_imm_cmd(ipa3_ctx->dma_task_info.cmd_pyld);
3574         memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info));
3575 }
3576
3577 /**
3578  * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
3579  *
3580  * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG.
3581  * Return value: 0 on success, negative otherwise
3582  */
3583 int ipa3_inject_dma_task_for_gsi(void)
3584 {
3585         struct ipa3_desc desc = {0};
3586
3587         desc.opcode = ipahal_imm_cmd_get_opcode_param(
3588                 IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
3589         desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data;
3590         desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len;
3591         desc.type = IPA_IMM_CMD_DESC;
3592
3593         IPADBG("sending 1B packet to IPA\n");
3594         if (ipa3_send_cmd_timeout(1, &desc,
3595                 IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
3596                 IPAERR("ipa3_send_cmd failed\n");
3597                 return -EFAULT;
3598         }
3599
3600         return 0;
3601 }
3602
3603 /**
3604  * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
3605  * @chan_hdl: GSI channel handle
3606  *
3607  * This function implements the sequence to stop a GSI channel
3608  * in IPA. This function returns when the channel is is STOP state.
3609  *
3610  * Return value: 0 on success, negative otherwise
3611  */
3612 int ipa3_stop_gsi_channel(u32 clnt_hdl)
3613 {
3614         struct ipa_mem_buffer mem;
3615         int res = 0;
3616         int i;
3617         struct ipa3_ep_context *ep;
3618
3619         if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
3620                 ipa3_ctx->ep[clnt_hdl].valid == 0) {
3621                 IPAERR("bad parm.\n");
3622                 return -EINVAL;
3623         }
3624
3625         ep = &ipa3_ctx->ep[clnt_hdl];
3626
3627         IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
3628
3629         memset(&mem, 0, sizeof(mem));
3630
3631         if (IPA_CLIENT_IS_PROD(ep->client)) {
3632                 IPADBG("Calling gsi_stop_channel ch:%lu\n",
3633                         ep->gsi_chan_hdl);
3634                 res = gsi_stop_channel(ep->gsi_chan_hdl);
3635                 IPADBG("gsi_stop_channel ch: %lu returned %d\n",
3636                         ep->gsi_chan_hdl, res);
3637                 goto end_sequence;
3638         }
3639
3640         for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
3641                 IPADBG("Calling gsi_stop_channel ch:%lu\n",
3642                         ep->gsi_chan_hdl);
3643                 res = gsi_stop_channel(ep->gsi_chan_hdl);
3644                 IPADBG("gsi_stop_channel ch: %lu returned %d\n",
3645                         ep->gsi_chan_hdl, res);
3646                 if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
3647                         goto end_sequence;
3648
3649                 IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
3650                 /* Send a 1B packet DMA_TASK to IPA and try again */
3651                 res = ipa3_inject_dma_task_for_gsi();
3652                 if (res) {
3653                         IPAERR("Failed to inject DMA TASk for GSI\n");
3654                         goto end_sequence;
3655                 }
3656
3657                 /* sleep for short period to flush IPA */
3658                 usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
3659                         IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
3660         }
3661
3662         IPAERR("Failed  to stop GSI channel with retries\n");
3663         res = -EFAULT;
3664 end_sequence:
3665         IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
3666
3667         return res;
3668 }
3669
3670 static int ipa3_load_single_fw(const struct firmware *firmware,
3671         const struct elf32_phdr *phdr)
3672 {
3673         uint32_t *fw_mem_base;
3674         int index;
3675         const uint32_t *elf_data_ptr;
3676
3677         if (phdr->p_offset > firmware->size) {
3678                 IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
3679                         phdr->p_offset, firmware->size);
3680                 return -EINVAL;
3681         }
3682         if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
3683                 IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
3684                         phdr->p_offset, phdr->p_filesz, firmware->size);
3685                 return -EINVAL;
3686         }
3687
3688         if (phdr->p_memsz % sizeof(uint32_t)) {
3689                 IPAERR("FW mem size %u doesn't align to 32bit\n",
3690                         phdr->p_memsz);
3691                 return -EFAULT;
3692         }
3693
3694         if (phdr->p_filesz > phdr->p_memsz) {
3695                 IPAERR("FW image too big src_size=%u dst_size=%u\n",
3696                         phdr->p_filesz, phdr->p_memsz);
3697                 return -EFAULT;
3698         }
3699
3700         fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
3701         if (!fw_mem_base) {
3702                 IPAERR("Failed to map 0x%x for the size of %u\n",
3703                         phdr->p_vaddr, phdr->p_memsz);
3704                 return -ENOMEM;
3705         }
3706
3707         /* Set the entire region to 0s */
3708         memset(fw_mem_base, 0, phdr->p_memsz);
3709
3710         elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
3711
3712         /* Write the FW */
3713         for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
3714                 writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
3715                 elf_data_ptr++;
3716         }
3717
3718         iounmap(fw_mem_base);
3719
3720         return 0;
3721 }
3722
3723 /**
3724  * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
3725  *
3726  * @firmware: Structure which contains the FW data from the user space.
3727  * @gsi_mem_base: GSI base address
3728  *
3729  * Return value: 0 on success, negative otherwise
3730  *
3731  */
3732 int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base)
3733 {
3734         const struct elf32_hdr *ehdr;
3735         const struct elf32_phdr *phdr;
3736         unsigned long gsi_iram_ofst;
3737         unsigned long gsi_iram_size;
3738         phys_addr_t ipa_reg_mem_base;
3739         u32 ipa_reg_ofst;
3740         int rc;
3741
3742         if (!gsi_mem_base) {
3743                 IPAERR("Invalid GSI base address\n");
3744                 return -EINVAL;
3745         }
3746
3747         ipa_assert_on(!firmware);
3748         /* One program header per FW image: GSI, DPS and HPS */
3749         if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
3750                 IPAERR("Missing ELF and Program headers firmware size=%zu\n",
3751                         firmware->size);
3752                 return -EINVAL;
3753         }
3754
3755         ehdr = (struct elf32_hdr *) firmware->data;
3756         ipa_assert_on(!ehdr);
3757         if (ehdr->e_phnum != 3) {
3758                 IPAERR("Unexpected number of ELF program headers\n");
3759                 return -EINVAL;
3760         }
3761         phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
3762
3763         /*
3764          * Each ELF program header represents a FW image and contains:
3765          *  p_vaddr : The starting address to which the FW needs to loaded.
3766          *  p_memsz : The size of the IRAM (where the image loaded)
3767          *  p_filesz: The size of the FW image embedded inside the ELF
3768          *  p_offset: Absolute offset to the image from the head of the ELF
3769          */
3770
3771         /* Load GSI FW image */
3772         gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size);
3773         if (phdr->p_vaddr != (gsi_mem_base + gsi_iram_ofst)) {
3774                 IPAERR(
3775                         "Invalid GSI FW img load addr vaddr=0x%x gsi_mem_base=%pa gsi_iram_ofst=0x%lx\n"
3776                         , phdr->p_vaddr, &gsi_mem_base, gsi_iram_ofst);
3777                 return -EINVAL;
3778         }
3779         if (phdr->p_memsz > gsi_iram_size) {
3780                 IPAERR("Invalid GSI FW img size memsz=%d gsi_iram_size=%lu\n",
3781                         phdr->p_memsz, gsi_iram_size);
3782                 return -EINVAL;
3783         }
3784         rc = ipa3_load_single_fw(firmware, phdr);
3785         if (rc)
3786                 return rc;
3787
3788         phdr++;
3789         ipa_reg_mem_base = ipa3_ctx->ipa_wrapper_base + ipahal_get_reg_base();
3790
3791         /* Load IPA DPS FW image */
3792         ipa_reg_ofst = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST);
3793         if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) {
3794                 IPAERR(
3795                         "Invalid IPA DPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n"
3796                         , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst);
3797                 return -EINVAL;
3798         }
3799         if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) {
3800                 IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n",
3801                         phdr->p_memsz, ipahal_get_dps_img_mem_size());
3802                 return -EINVAL;
3803         }
3804         rc = ipa3_load_single_fw(firmware, phdr);
3805         if (rc)
3806                 return rc;
3807
3808         phdr++;
3809
3810         /* Load IPA HPS FW image */
3811         ipa_reg_ofst = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST);
3812         if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) {
3813                 IPAERR(
3814                         "Invalid IPA HPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n"
3815                         , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst);
3816                 return -EINVAL;
3817         }
3818         if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) {
3819                 IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n",
3820                         phdr->p_memsz, ipahal_get_hps_img_mem_size());
3821                 return -EINVAL;
3822         }
3823         rc = ipa3_load_single_fw(firmware, phdr);
3824         if (rc)
3825                 return rc;
3826
3827         IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
3828         return 0;
3829 }
3830
3831 /**
3832  * ipa3_is_msm_device() - Is the running device a MSM or MDM?
3833  *  Determine according to IPA version
3834  *
3835  * Return value: true if MSM, false if MDM
3836  *
3837  */
3838 bool ipa3_is_msm_device(void)
3839 {
3840         switch (ipa3_ctx->ipa_hw_type) {
3841         case IPA_HW_v3_0:
3842         case IPA_HW_v3_5:
3843                 return false;
3844         case IPA_HW_v3_1:
3845         case IPA_HW_v3_5_1:
3846                 return true;
3847         default:
3848                 IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
3849                 ipa_assert();
3850         }
3851
3852         return false;
3853 }
3854
3855 /**
3856  * ipa3_get_pdev() - return a pointer to IPA dev struct
3857  *
3858  * Return value: a pointer to IPA dev struct
3859  *
3860  */
3861 struct device *ipa3_get_pdev(void)
3862 {
3863         if (!ipa3_ctx)
3864                 return NULL;
3865
3866         return ipa3_ctx->pdev;
3867 }