OSDN Git Service

Merge tag '5.6-rc-smb3-plugfest-patches' of git://git.samba.org/sfrench/cifs-2.6
[tomoyo/tomoyo-test1.git] / drivers / soc / mediatek / mtk-cmdq-helper.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
11
12 #define CMDQ_WRITE_ENABLE_MASK  BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK   BIT(0)
14 #define CMDQ_EOC_IRQ_EN         BIT(0)
15
16 struct cmdq_instruction {
17         union {
18                 u32 value;
19                 u32 mask;
20         };
21         union {
22                 u16 offset;
23                 u16 event;
24         };
25         u8 subsys;
26         u8 op;
27 };
28
29 int cmdq_dev_get_client_reg(struct device *dev,
30                             struct cmdq_client_reg *client_reg, int idx)
31 {
32         struct of_phandle_args spec;
33         int err;
34
35         if (!client_reg)
36                 return -ENOENT;
37
38         err = of_parse_phandle_with_fixed_args(dev->of_node,
39                                                "mediatek,gce-client-reg",
40                                                3, idx, &spec);
41         if (err < 0) {
42                 dev_err(dev,
43                         "error %d can't parse gce-client-reg property (%d)",
44                         err, idx);
45
46                 return err;
47         }
48
49         client_reg->subsys = (u8)spec.args[0];
50         client_reg->offset = (u16)spec.args[1];
51         client_reg->size = (u16)spec.args[2];
52         of_node_put(spec.np);
53
54         return 0;
55 }
56 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
57
58 static void cmdq_client_timeout(struct timer_list *t)
59 {
60         struct cmdq_client *client = from_timer(client, t, timer);
61
62         dev_err(client->client.dev, "cmdq timeout!\n");
63 }
64
65 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
66 {
67         struct cmdq_client *client;
68
69         client = kzalloc(sizeof(*client), GFP_KERNEL);
70         if (!client)
71                 return (struct cmdq_client *)-ENOMEM;
72
73         client->timeout_ms = timeout;
74         if (timeout != CMDQ_NO_TIMEOUT) {
75                 spin_lock_init(&client->lock);
76                 timer_setup(&client->timer, cmdq_client_timeout, 0);
77         }
78         client->pkt_cnt = 0;
79         client->client.dev = dev;
80         client->client.tx_block = false;
81         client->chan = mbox_request_channel(&client->client, index);
82
83         if (IS_ERR(client->chan)) {
84                 long err;
85
86                 dev_err(dev, "failed to request channel\n");
87                 err = PTR_ERR(client->chan);
88                 kfree(client);
89
90                 return ERR_PTR(err);
91         }
92
93         return client;
94 }
95 EXPORT_SYMBOL(cmdq_mbox_create);
96
97 void cmdq_mbox_destroy(struct cmdq_client *client)
98 {
99         if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
100                 spin_lock(&client->lock);
101                 del_timer_sync(&client->timer);
102                 spin_unlock(&client->lock);
103         }
104         mbox_free_channel(client->chan);
105         kfree(client);
106 }
107 EXPORT_SYMBOL(cmdq_mbox_destroy);
108
109 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
110 {
111         struct cmdq_pkt *pkt;
112         struct device *dev;
113         dma_addr_t dma_addr;
114
115         pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
116         if (!pkt)
117                 return ERR_PTR(-ENOMEM);
118         pkt->va_base = kzalloc(size, GFP_KERNEL);
119         if (!pkt->va_base) {
120                 kfree(pkt);
121                 return ERR_PTR(-ENOMEM);
122         }
123         pkt->buf_size = size;
124         pkt->cl = (void *)client;
125
126         dev = client->chan->mbox->dev;
127         dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
128                                   DMA_TO_DEVICE);
129         if (dma_mapping_error(dev, dma_addr)) {
130                 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
131                 kfree(pkt->va_base);
132                 kfree(pkt);
133                 return ERR_PTR(-ENOMEM);
134         }
135
136         pkt->pa_base = dma_addr;
137
138         return pkt;
139 }
140 EXPORT_SYMBOL(cmdq_pkt_create);
141
142 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
143 {
144         struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
145
146         dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
147                          DMA_TO_DEVICE);
148         kfree(pkt->va_base);
149         kfree(pkt);
150 }
151 EXPORT_SYMBOL(cmdq_pkt_destroy);
152
153 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
154                                    struct cmdq_instruction inst)
155 {
156         struct cmdq_instruction *cmd_ptr;
157
158         if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
159                 /*
160                  * In the case of allocated buffer size (pkt->buf_size) is used
161                  * up, the real required size (pkt->cmdq_buf_size) is still
162                  * increased, so that the user knows how much memory should be
163                  * ultimately allocated after appending all commands and
164                  * flushing the command packet. Therefor, the user can call
165                  * cmdq_pkt_create() again with the real required buffer size.
166                  */
167                 pkt->cmd_buf_size += CMDQ_INST_SIZE;
168                 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
169                         __func__, (u32)pkt->buf_size);
170                 return -ENOMEM;
171         }
172
173         cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
174         *cmd_ptr = inst;
175         pkt->cmd_buf_size += CMDQ_INST_SIZE;
176
177         return 0;
178 }
179
180 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
181 {
182         struct cmdq_instruction inst;
183
184         inst.op = CMDQ_CODE_WRITE;
185         inst.value = value;
186         inst.offset = offset;
187         inst.subsys = subsys;
188
189         return cmdq_pkt_append_command(pkt, inst);
190 }
191 EXPORT_SYMBOL(cmdq_pkt_write);
192
193 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
194                         u16 offset, u32 value, u32 mask)
195 {
196         struct cmdq_instruction inst = { {0} };
197         u16 offset_mask = offset;
198         int err;
199
200         if (mask != 0xffffffff) {
201                 inst.op = CMDQ_CODE_MASK;
202                 inst.mask = ~mask;
203                 err = cmdq_pkt_append_command(pkt, inst);
204                 if (err < 0)
205                         return err;
206
207                 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
208         }
209         err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
210
211         return err;
212 }
213 EXPORT_SYMBOL(cmdq_pkt_write_mask);
214
215 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
216 {
217         struct cmdq_instruction inst = { {0} };
218
219         if (event >= CMDQ_MAX_EVENT)
220                 return -EINVAL;
221
222         inst.op = CMDQ_CODE_WFE;
223         inst.value = CMDQ_WFE_OPTION;
224         inst.event = event;
225
226         return cmdq_pkt_append_command(pkt, inst);
227 }
228 EXPORT_SYMBOL(cmdq_pkt_wfe);
229
230 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
231 {
232         struct cmdq_instruction inst = { {0} };
233
234         if (event >= CMDQ_MAX_EVENT)
235                 return -EINVAL;
236
237         inst.op = CMDQ_CODE_WFE;
238         inst.value = CMDQ_WFE_UPDATE;
239         inst.event = event;
240
241         return cmdq_pkt_append_command(pkt, inst);
242 }
243 EXPORT_SYMBOL(cmdq_pkt_clear_event);
244
245 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
246                   u16 offset, u32 value)
247 {
248         struct cmdq_instruction inst = { {0} };
249         int err;
250
251         inst.op = CMDQ_CODE_POLL;
252         inst.value = value;
253         inst.offset = offset;
254         inst.subsys = subsys;
255         err = cmdq_pkt_append_command(pkt, inst);
256
257         return err;
258 }
259 EXPORT_SYMBOL(cmdq_pkt_poll);
260
261 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
262                        u16 offset, u32 value, u32 mask)
263 {
264         struct cmdq_instruction inst = { {0} };
265         int err;
266
267         inst.op = CMDQ_CODE_MASK;
268         inst.mask = ~mask;
269         err = cmdq_pkt_append_command(pkt, inst);
270         if (err < 0)
271                 return err;
272
273         offset = offset | CMDQ_POLL_ENABLE_MASK;
274         err = cmdq_pkt_poll(pkt, subsys, offset, value);
275
276         return err;
277 }
278 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
279
280 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
281 {
282         struct cmdq_instruction inst = { {0} };
283         int err;
284
285         /* insert EOC and generate IRQ for each command iteration */
286         inst.op = CMDQ_CODE_EOC;
287         inst.value = CMDQ_EOC_IRQ_EN;
288         err = cmdq_pkt_append_command(pkt, inst);
289         if (err < 0)
290                 return err;
291
292         /* JUMP to end */
293         inst.op = CMDQ_CODE_JUMP;
294         inst.value = CMDQ_JUMP_PASS;
295         err = cmdq_pkt_append_command(pkt, inst);
296
297         return err;
298 }
299
300 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
301 {
302         struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
303         struct cmdq_task_cb *cb = &pkt->cb;
304         struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
305
306         if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
307                 unsigned long flags = 0;
308
309                 spin_lock_irqsave(&client->lock, flags);
310                 if (--client->pkt_cnt == 0)
311                         del_timer(&client->timer);
312                 else
313                         mod_timer(&client->timer, jiffies +
314                                   msecs_to_jiffies(client->timeout_ms));
315                 spin_unlock_irqrestore(&client->lock, flags);
316         }
317
318         dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
319                                 pkt->cmd_buf_size, DMA_TO_DEVICE);
320         if (cb->cb) {
321                 data.data = cb->data;
322                 cb->cb(data);
323         }
324 }
325
326 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
327                          void *data)
328 {
329         int err;
330         unsigned long flags = 0;
331         struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
332
333         err = cmdq_pkt_finalize(pkt);
334         if (err < 0)
335                 return err;
336
337         pkt->cb.cb = cb;
338         pkt->cb.data = data;
339         pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
340         pkt->async_cb.data = pkt;
341
342         dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
343                                    pkt->cmd_buf_size, DMA_TO_DEVICE);
344
345         if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
346                 spin_lock_irqsave(&client->lock, flags);
347                 if (client->pkt_cnt++ == 0)
348                         mod_timer(&client->timer, jiffies +
349                                   msecs_to_jiffies(client->timeout_ms));
350                 spin_unlock_irqrestore(&client->lock, flags);
351         }
352
353         mbox_send_message(client->chan, pkt);
354         /* We can send next packet immediately, so just call txdone. */
355         mbox_client_txdone(client->chan, 0);
356
357         return 0;
358 }
359 EXPORT_SYMBOL(cmdq_pkt_flush_async);
360
361 struct cmdq_flush_completion {
362         struct completion cmplt;
363         bool err;
364 };
365
366 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
367 {
368         struct cmdq_flush_completion *cmplt;
369
370         cmplt = (struct cmdq_flush_completion *)data.data;
371         if (data.sta != CMDQ_CB_NORMAL)
372                 cmplt->err = true;
373         else
374                 cmplt->err = false;
375         complete(&cmplt->cmplt);
376 }
377
378 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
379 {
380         struct cmdq_flush_completion cmplt;
381         int err;
382
383         init_completion(&cmplt.cmplt);
384         err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
385         if (err < 0)
386                 return err;
387         wait_for_completion(&cmplt.cmplt);
388
389         return cmplt.err ? -EFAULT : 0;
390 }
391 EXPORT_SYMBOL(cmdq_pkt_flush);
392
393 MODULE_LICENSE("GPL v2");