1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
16 struct cmdq_instruction {
29 int cmdq_dev_get_client_reg(struct device *dev,
30 struct cmdq_client_reg *client_reg, int idx)
32 struct of_phandle_args spec;
38 err = of_parse_phandle_with_fixed_args(dev->of_node,
39 "mediatek,gce-client-reg",
43 "error %d can't parse gce-client-reg property (%d)",
49 client_reg->subsys = (u8)spec.args[0];
50 client_reg->offset = (u16)spec.args[1];
51 client_reg->size = (u16)spec.args[2];
56 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
58 static void cmdq_client_timeout(struct timer_list *t)
60 struct cmdq_client *client = from_timer(client, t, timer);
62 dev_err(client->client.dev, "cmdq timeout!\n");
65 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
67 struct cmdq_client *client;
69 client = kzalloc(sizeof(*client), GFP_KERNEL);
71 return (struct cmdq_client *)-ENOMEM;
73 client->timeout_ms = timeout;
74 if (timeout != CMDQ_NO_TIMEOUT) {
75 spin_lock_init(&client->lock);
76 timer_setup(&client->timer, cmdq_client_timeout, 0);
79 client->client.dev = dev;
80 client->client.tx_block = false;
81 client->chan = mbox_request_channel(&client->client, index);
83 if (IS_ERR(client->chan)) {
86 dev_err(dev, "failed to request channel\n");
87 err = PTR_ERR(client->chan);
95 EXPORT_SYMBOL(cmdq_mbox_create);
97 void cmdq_mbox_destroy(struct cmdq_client *client)
99 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
100 spin_lock(&client->lock);
101 del_timer_sync(&client->timer);
102 spin_unlock(&client->lock);
104 mbox_free_channel(client->chan);
107 EXPORT_SYMBOL(cmdq_mbox_destroy);
109 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
111 struct cmdq_pkt *pkt;
115 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
117 return ERR_PTR(-ENOMEM);
118 pkt->va_base = kzalloc(size, GFP_KERNEL);
121 return ERR_PTR(-ENOMEM);
123 pkt->buf_size = size;
124 pkt->cl = (void *)client;
126 dev = client->chan->mbox->dev;
127 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
129 if (dma_mapping_error(dev, dma_addr)) {
130 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
133 return ERR_PTR(-ENOMEM);
136 pkt->pa_base = dma_addr;
140 EXPORT_SYMBOL(cmdq_pkt_create);
142 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
144 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
146 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
151 EXPORT_SYMBOL(cmdq_pkt_destroy);
153 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
154 struct cmdq_instruction inst)
156 struct cmdq_instruction *cmd_ptr;
158 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
160 * In the case of allocated buffer size (pkt->buf_size) is used
161 * up, the real required size (pkt->cmdq_buf_size) is still
162 * increased, so that the user knows how much memory should be
163 * ultimately allocated after appending all commands and
164 * flushing the command packet. Therefor, the user can call
165 * cmdq_pkt_create() again with the real required buffer size.
167 pkt->cmd_buf_size += CMDQ_INST_SIZE;
168 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
169 __func__, (u32)pkt->buf_size);
173 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
175 pkt->cmd_buf_size += CMDQ_INST_SIZE;
180 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
182 struct cmdq_instruction inst;
184 inst.op = CMDQ_CODE_WRITE;
186 inst.offset = offset;
187 inst.subsys = subsys;
189 return cmdq_pkt_append_command(pkt, inst);
191 EXPORT_SYMBOL(cmdq_pkt_write);
193 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
194 u16 offset, u32 value, u32 mask)
196 struct cmdq_instruction inst = { {0} };
197 u16 offset_mask = offset;
200 if (mask != 0xffffffff) {
201 inst.op = CMDQ_CODE_MASK;
203 err = cmdq_pkt_append_command(pkt, inst);
207 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
209 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
213 EXPORT_SYMBOL(cmdq_pkt_write_mask);
215 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
217 struct cmdq_instruction inst = { {0} };
219 if (event >= CMDQ_MAX_EVENT)
222 inst.op = CMDQ_CODE_WFE;
223 inst.value = CMDQ_WFE_OPTION;
226 return cmdq_pkt_append_command(pkt, inst);
228 EXPORT_SYMBOL(cmdq_pkt_wfe);
230 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
232 struct cmdq_instruction inst = { {0} };
234 if (event >= CMDQ_MAX_EVENT)
237 inst.op = CMDQ_CODE_WFE;
238 inst.value = CMDQ_WFE_UPDATE;
241 return cmdq_pkt_append_command(pkt, inst);
243 EXPORT_SYMBOL(cmdq_pkt_clear_event);
245 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
246 u16 offset, u32 value)
248 struct cmdq_instruction inst = { {0} };
251 inst.op = CMDQ_CODE_POLL;
253 inst.offset = offset;
254 inst.subsys = subsys;
255 err = cmdq_pkt_append_command(pkt, inst);
259 EXPORT_SYMBOL(cmdq_pkt_poll);
261 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
262 u16 offset, u32 value, u32 mask)
264 struct cmdq_instruction inst = { {0} };
267 inst.op = CMDQ_CODE_MASK;
269 err = cmdq_pkt_append_command(pkt, inst);
273 offset = offset | CMDQ_POLL_ENABLE_MASK;
274 err = cmdq_pkt_poll(pkt, subsys, offset, value);
278 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
280 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
282 struct cmdq_instruction inst = { {0} };
285 /* insert EOC and generate IRQ for each command iteration */
286 inst.op = CMDQ_CODE_EOC;
287 inst.value = CMDQ_EOC_IRQ_EN;
288 err = cmdq_pkt_append_command(pkt, inst);
293 inst.op = CMDQ_CODE_JUMP;
294 inst.value = CMDQ_JUMP_PASS;
295 err = cmdq_pkt_append_command(pkt, inst);
300 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
302 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
303 struct cmdq_task_cb *cb = &pkt->cb;
304 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
306 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
307 unsigned long flags = 0;
309 spin_lock_irqsave(&client->lock, flags);
310 if (--client->pkt_cnt == 0)
311 del_timer(&client->timer);
313 mod_timer(&client->timer, jiffies +
314 msecs_to_jiffies(client->timeout_ms));
315 spin_unlock_irqrestore(&client->lock, flags);
318 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
319 pkt->cmd_buf_size, DMA_TO_DEVICE);
321 data.data = cb->data;
326 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
330 unsigned long flags = 0;
331 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
333 err = cmdq_pkt_finalize(pkt);
339 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
340 pkt->async_cb.data = pkt;
342 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
343 pkt->cmd_buf_size, DMA_TO_DEVICE);
345 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
346 spin_lock_irqsave(&client->lock, flags);
347 if (client->pkt_cnt++ == 0)
348 mod_timer(&client->timer, jiffies +
349 msecs_to_jiffies(client->timeout_ms));
350 spin_unlock_irqrestore(&client->lock, flags);
353 mbox_send_message(client->chan, pkt);
354 /* We can send next packet immediately, so just call txdone. */
355 mbox_client_txdone(client->chan, 0);
359 EXPORT_SYMBOL(cmdq_pkt_flush_async);
361 struct cmdq_flush_completion {
362 struct completion cmplt;
366 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
368 struct cmdq_flush_completion *cmplt;
370 cmplt = (struct cmdq_flush_completion *)data.data;
371 if (data.sta != CMDQ_CB_NORMAL)
375 complete(&cmplt->cmplt);
378 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
380 struct cmdq_flush_completion cmplt;
383 init_completion(&cmplt.cmplt);
384 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
387 wait_for_completion(&cmplt.cmplt);
389 return cmplt.err ? -EFAULT : 0;
391 EXPORT_SYMBOL(cmdq_pkt_flush);
393 MODULE_LICENSE("GPL v2");