1 /* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/sched.h>
19 #include <linux/ratelimit.h>
20 #include <linux/workqueue.h>
21 #include <linux/diagchar.h>
22 #include <linux/delay.h>
23 #include <linux/kmemleak.h>
24 #include <linux/uaccess.h>
26 #include "diag_memorydevice.h"
27 #include "diagfwd_bridge.h"
31 #include "diagfwd_peripheral.h"
32 #include "diag_ipc_logging.h"
34 struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
38 .mempool = POOL_TYPE_MUX_APPS,
43 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
47 .mempool = POOL_TYPE_MDM_MUX,
55 .mempool = POOL_TYPE_MDM2_MUX,
63 .mempool = POOL_TYPE_QSC_MUX,
71 int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
73 if (id < 0 || id >= NUM_DIAG_MD_DEV || !ops)
76 diag_md[id].ops = ops;
77 diag_md[id].ctx = ctx;
81 void diag_md_open_all()
84 struct diag_md_info *ch = NULL;
86 for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
88 if (ch->ops && ch->ops->open)
89 ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
95 void diag_md_close_all()
99 struct diag_md_info *ch = NULL;
100 struct diag_buf_tbl_t *entry = NULL;
102 for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
105 if (ch->ops && ch->ops->close)
106 ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
109 * When we close the Memory device mode, make sure we flush the
110 * internal buffers in the table so that there are no stale
113 spin_lock_irqsave(&ch->lock, flags);
114 for (j = 0; j < ch->num_tbl_entries; j++) {
118 if (ch->ops && ch->ops->write_done)
119 ch->ops->write_done(entry->buf, entry->len,
121 DIAG_MEMORY_DEVICE_MODE);
126 spin_unlock_irqrestore(&ch->lock, flags);
129 diag_ws_reset(DIAG_WS_MUX);
132 int diag_md_write(int id, unsigned char *buf, int len, int ctx)
137 struct diag_md_info *ch = NULL;
139 struct diag_md_session_t *session_info = NULL;
141 if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
148 diag_md_get_peripheral(ctx);
153 diag_md_session_get_peripheral(peripheral);
161 spin_lock_irqsave(&ch->lock, flags);
162 for (i = 0; i < ch->num_tbl_entries && !found; i++) {
163 if (ch->tbl[i].buf != buf)
166 pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
167 buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
168 GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
169 ch->tbl[i].buf = NULL;
173 spin_unlock_irqrestore(&ch->lock, flags);
178 spin_lock_irqsave(&ch->lock, flags);
179 for (i = 0; i < ch->num_tbl_entries && !found; i++) {
180 if (ch->tbl[i].len == 0) {
181 ch->tbl[i].buf = buf;
182 ch->tbl[i].len = len;
183 ch->tbl[i].ctx = ctx;
185 diag_ws_on_read(DIAG_WS_MUX, len);
188 spin_unlock_irqrestore(&ch->lock, flags);
191 pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
197 for (i = 0; i < driver->num_clients && !found; i++) {
198 if ((driver->client_map[i].pid !=
199 session_info->pid) ||
200 (driver->client_map[i].pid == 0))
204 driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
205 pr_debug("diag: wake up logging process\n");
206 wake_up_interruptible(&driver->wait_q);
215 int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
216 struct diag_md_session_t *info)
224 struct diag_md_info *ch = NULL;
225 struct diag_buf_tbl_t *entry = NULL;
226 uint8_t drain_again = 0;
227 uint8_t peripheral = 0;
228 struct diag_md_session_t *session_info = NULL;
229 struct pid *pid_struct = NULL;
231 for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
233 for (j = 0; j < ch->num_tbl_entries && !err; j++) {
235 if (entry->len <= 0 || entry->buf == NULL)
238 peripheral = diag_md_get_peripheral(entry->ctx);
243 diag_md_session_get_peripheral(peripheral);
248 if (session_info && info &&
249 (session_info->pid != info->pid))
251 if ((info && (info->peripheral_mask &
252 MD_PERIPHERAL_MASK(peripheral)) == 0))
254 pid_struct = find_get_pid(session_info->pid);
257 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
258 "diag: No such md_session_map[%d] with pid = %d err=%d exists..\n",
259 peripheral, session_info->pid, err);
263 * If the data is from remote processor, copy the remote
267 if ((ret + (3 * sizeof(int)) + entry->len) >=
273 if ((ret + (2 * sizeof(int)) + entry->len) >=
280 remote_token = diag_get_remote(i);
281 if (get_pid_task(pid_struct, PIDTYPE_PID)) {
282 err = copy_to_user(buf + ret,
291 /* Copy the length of data being passed */
292 if (get_pid_task(pid_struct, PIDTYPE_PID)) {
293 err = copy_to_user(buf + ret,
294 (void *)&(entry->len),
301 /* Copy the actual data being passed */
302 if (get_pid_task(pid_struct, PIDTYPE_PID)) {
303 err = copy_to_user(buf + ret,
311 * The data is now copied to the user space client,
312 * Notify that the write is complete and delete its
313 * entry from the table
317 spin_lock_irqsave(&ch->lock, flags);
318 if (ch->ops && ch->ops->write_done)
319 ch->ops->write_done(entry->buf, entry->len,
321 DIAG_MEMORY_DEVICE_MODE);
322 diag_ws_on_copy(DIAG_WS_MUX);
326 spin_unlock_irqrestore(&ch->lock, flags);
331 if (pid_struct && get_pid_task(pid_struct, PIDTYPE_PID)) {
332 err = copy_to_user(buf + sizeof(int),
336 diag_ws_on_copy_complete(DIAG_WS_MUX);
338 chk_logging_wakeup();
343 int diag_md_close_peripheral(int id, uint8_t peripheral)
348 struct diag_md_info *ch = NULL;
349 struct diag_buf_tbl_t *entry = NULL;
351 if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
356 spin_lock_irqsave(&ch->lock, flags);
357 for (i = 0; i < ch->num_tbl_entries && !found; i++) {
360 if (peripheral > NUM_PERIPHERALS) {
361 if (GET_PD_CTXT(entry->ctx) != peripheral)
364 if (GET_BUF_PERIPHERAL(entry->ctx) !=
369 if (ch->ops && ch->ops->write_done) {
370 ch->ops->write_done(entry->buf, entry->len,
372 DIAG_MEMORY_DEVICE_MODE);
378 spin_unlock_irqrestore(&ch->lock, flags);
385 struct diag_md_info *ch = NULL;
387 for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
389 ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
390 ch->tbl = kzalloc(ch->num_tbl_entries *
391 sizeof(struct diag_buf_tbl_t),
396 for (j = 0; j < ch->num_tbl_entries; j++) {
397 ch->tbl[j].buf = NULL;
401 spin_lock_init(&(ch->lock));
414 struct diag_md_info *ch = NULL;
416 for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
419 ch->num_tbl_entries = 0;