1 /* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/ratelimit.h>
19 #include <linux/workqueue.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/diagchar.h>
22 #include <linux/delay.h>
23 #include <linux/reboot.h>
25 #include <linux/kmemleak.h>
26 #ifdef CONFIG_DIAG_OVER_USB
27 #include <linux/usb/usbdiag.h>
29 #include <soc/qcom/socinfo.h>
30 #include <soc/qcom/restart.h>
34 #include "diagfwd_peripheral.h"
35 #include "diagfwd_cntl.h"
36 #include "diagchar_hdlc.h"
38 #include "diag_masks.h"
41 #include "diag_ipc_logging.h"
43 #define STM_CMD_VERSION_OFFSET 4
44 #define STM_CMD_MASK_OFFSET 5
45 #define STM_CMD_DATA_OFFSET 6
46 #define STM_CMD_NUM_BYTES 7
48 #define STM_RSP_SUPPORTED_INDEX 7
49 #define STM_RSP_STATUS_INDEX 8
50 #define STM_RSP_NUM_BYTES 9
52 static int timestamp_switch;
53 module_param(timestamp_switch, int, 0644);
57 static struct diag_hdlc_decode_type *hdlc_decode;
59 #define DIAG_NUM_COMMON_CMD 1
60 static uint8_t common_cmds[DIAG_NUM_COMMON_CMD] = {
64 static uint8_t hdlc_timer_in_progress;
66 /* Determine if this device uses a device tree */
68 static int has_device_tree(void)
70 struct device_node *node;
72 node = of_find_node_by_path("/");
80 static int has_device_tree(void)
86 int chk_config_get_id(void)
88 switch (socinfo_get_msm_cpu()) {
90 return APQ8060_TOOLS_ID;
93 return AO8960_TOOLS_ID;
97 return APQ8064_TOOLS_ID;
101 return MSM8930_TOOLS_ID;
103 return MSM8974_TOOLS_ID;
105 return MSM8625_TOOLS_ID;
107 return APQ8084_TOOLS_ID;
109 return MSM8916_TOOLS_ID;
111 return MSM8939_TOOLS_ID;
113 return MSM8994_TOOLS_ID;
115 return APQ8026_TOOLS_ID;
117 return MSM8909_TOOLS_ID;
119 return MSM8992_TOOLS_ID;
121 return MSM_8996_TOOLS_ID;
123 if (driver->use_device_tree) {
124 if (machine_is_msm8974())
125 return MSM8974_TOOLS_ID;
126 else if (machine_is_apq8074())
127 return APQ8074_TOOLS_ID;
137 * This will return TRUE for targets which support apps only mode and hence SSR.
138 * This applies to 8960 and newer targets.
140 int chk_apps_only(void)
142 if (driver->use_device_tree)
145 switch (socinfo_get_msm_cpu()) {
164 * This will return TRUE for targets which support apps as master.
165 * Thus, SW DLOAD and Mode Reset are supported on apps processor.
166 * This applies to 8960 and newer targets.
168 int chk_apps_master(void)
170 if (driver->use_device_tree)
176 int chk_polling_response(void)
178 if (!(driver->polling_reg_flag) && chk_apps_master())
180 * If the apps processor is master and no other processor
181 * has registered to respond for polling
184 else if (!(driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
185 driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
186 (driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask))
188 * If the apps processor is not the master and the modem
189 * is not up or we did not receive the feature masks from Modem
197 * This function should be called if you feel that the logging process may
198 * need to be woken up. For instance, if the logging mode is MEMORY_DEVICE MODE
199 * and while trying to read data from data channel there are no buffers
200 * available to read the data into, then this function should be called to
201 * determine if the logging process needs to be woken up.
203 void chk_logging_wakeup(void)
209 for (j = 0; j < NUM_MD_SESSIONS; j++) {
210 if (!driver->md_session_map[j])
212 pid = driver->md_session_map[j]->pid;
214 /* Find the index of the logging process */
215 for (i = 0; i < driver->num_clients; i++) {
216 if (driver->client_map[i].pid != pid)
218 if (driver->data_ready[i] & USER_SPACE_DATA_TYPE)
221 * At very high logging rates a race condition can
222 * occur where the buffers containing the data from
223 * a channel are all in use, but the data_ready flag
224 * is cleared. In this case, the buffers never have
225 * their data read/logged. Detect and remedy this
228 driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
229 pr_debug("diag: Force wakeup of logging process\n");
230 wake_up_interruptible(&driver->wait_q);
234 * Diag Memory Device is in normal. Check only for the first
235 * index as all the indices point to the same session
238 if ((driver->md_session_mask == DIAG_CON_ALL) && (j == 0))
243 static void pack_rsp_and_send(unsigned char *buf, int len,
244 struct diag_md_session_t *info)
247 int retry_count = 0, i, rsp_ctxt;
248 uint32_t write_len = 0;
250 unsigned char *rsp_ptr = driver->encoded_rsp_buf;
251 struct diag_pkt_frame_t header;
253 if (!rsp_ptr || !buf)
256 if (len > DIAG_MAX_RSP_SIZE || len < 0) {
257 pr_err("diag: In %s, invalid len %d, permissible len %d\n",
258 __func__, len, DIAG_MAX_RSP_SIZE);
262 if (info && info->peripheral_mask) {
263 if (info->peripheral_mask == DIAG_CON_ALL ||
264 (info->peripheral_mask & (1 << APPS_DATA)) ||
265 (info->peripheral_mask & (1 << PERIPHERAL_MODEM))) {
266 rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
268 for (i = 0; i <= NUM_PERIPHERALS; i++) {
269 if (info->peripheral_mask & (1 << i))
272 rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
275 rsp_ctxt = driver->rsp_buf_ctxt;
278 * Keep trying till we get the buffer back. It should probably
279 * take one or two iterations. When this loops till UINT_MAX, it
280 * means we did not get a write complete for the previous
283 while (retry_count < UINT_MAX) {
284 if (!driver->rsp_buf_busy)
287 * Wait for sometime and try again. The value 10000 was chosen
288 * empirically as an optimum value for USB to complete a write
290 usleep_range(10000, 10100);
294 * There can be a race conditon that clears the data ready flag
295 * for responses. Make sure we don't miss previous wakeups for
296 * draining responses when we are in Memory Device Mode.
298 if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
299 driver->logging_mode == DIAG_MULTI_MODE)
300 chk_logging_wakeup();
302 if (driver->rsp_buf_busy) {
303 pr_err("diag: unable to get hold of response buffer\n");
307 driver->rsp_buf_busy = 1;
308 header.start = CONTROL_CHAR;
311 memcpy(rsp_ptr, &header, sizeof(header));
312 write_len += sizeof(header);
313 memcpy(rsp_ptr + write_len, buf, len);
315 *(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
316 write_len += sizeof(uint8_t);
318 err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len, rsp_ctxt);
320 pr_err("diag: In %s, unable to write to mux, err: %d\n",
322 spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
323 driver->rsp_buf_busy = 0;
324 spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
328 static void encode_rsp_and_send(unsigned char *buf, int len,
329 struct diag_md_session_t *info)
331 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
332 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
333 unsigned char *rsp_ptr = driver->encoded_rsp_buf;
334 int err, i, rsp_ctxt, retry_count = 0;
337 if (!rsp_ptr || !buf)
340 if (len > DIAG_MAX_RSP_SIZE || len < 0) {
341 pr_err("diag: In %s, invalid len %d, permissible len %d\n",
342 __func__, len, DIAG_MAX_RSP_SIZE);
346 if (info && info->peripheral_mask) {
347 if (info->peripheral_mask == DIAG_CON_ALL ||
348 (info->peripheral_mask & (1 << APPS_DATA)) ||
349 (info->peripheral_mask & (1 << PERIPHERAL_MODEM))) {
350 rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
352 for (i = 0; i <= NUM_PERIPHERALS; i++) {
353 if (info->peripheral_mask & (1 << i))
356 rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
359 rsp_ctxt = driver->rsp_buf_ctxt;
362 * Keep trying till we get the buffer back. It should probably
363 * take one or two iterations. When this loops till UINT_MAX, it
364 * means we did not get a write complete for the previous
367 while (retry_count < UINT_MAX) {
368 if (!driver->rsp_buf_busy)
371 * Wait for sometime and try again. The value 10000 was chosen
372 * empirically as an optimum value for USB to complete a write
374 usleep_range(10000, 10100);
378 * There can be a race conditon that clears the data ready flag
379 * for responses. Make sure we don't miss previous wakeups for
380 * draining responses when we are in Memory Device Mode.
382 if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
383 driver->logging_mode == DIAG_MULTI_MODE)
384 chk_logging_wakeup();
387 if (driver->rsp_buf_busy) {
388 pr_err("diag: unable to get hold of response buffer\n");
392 spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
393 driver->rsp_buf_busy = 1;
394 spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
395 send.state = DIAG_STATE_START;
397 send.last = (void *)(buf + len - 1);
400 enc.dest_last = (void *)(rsp_ptr + DIAG_MAX_HDLC_BUF_SIZE - 1);
401 diag_hdlc_encode(&send, &enc);
402 driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
403 err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
406 pr_err("diag: In %s, Unable to write to device, err: %d\n",
408 spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
409 driver->rsp_buf_busy = 0;
410 spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
412 memset(buf, '\0', DIAG_MAX_RSP_SIZE);
415 void diag_send_rsp(unsigned char *buf, int len, struct diag_md_session_t *info)
417 struct diag_md_session_t *session_info = NULL;
418 uint8_t hdlc_disabled;
420 session_info = (info) ? info :
421 diag_md_session_get_peripheral(APPS_DATA);
423 hdlc_disabled = session_info->hdlc_disabled;
425 hdlc_disabled = driver->hdlc_disabled;
428 pack_rsp_and_send(buf, len, session_info);
430 encode_rsp_and_send(buf, len, session_info);
433 void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
435 unsigned char *ptr = NULL;
436 unsigned char *temp = buf;
438 uint32_t *length = NULL;
439 uint32_t max_len = 0;
441 if (!buf || len == 0) {
442 pr_err("diag: In %s, Invalid ptr %pK and length %d\n",
449 ptr = driver->apps_req_buf;
450 length = &driver->apps_req_buf_len;
451 max_len = DIAG_MAX_REQ_SIZE;
452 in_busy = &driver->in_busy_pktdata;
455 ptr = driver->dci_pkt_buf;
456 length = &driver->dci_pkt_length;
457 max_len = DCI_BUF_SIZE;
458 in_busy = &driver->in_busy_dcipktdata;
461 pr_err("diag: Invalid type %d in %s\n", type, __func__);
465 mutex_lock(&driver->diagchar_mutex);
466 if (CHK_OVERFLOW(ptr, ptr, ptr + max_len, len)) {
467 memcpy(ptr, temp , len);
471 pr_alert("diag: In %s, no space for response packet, len: %d, type: %d\n",
472 __func__, len, type);
474 mutex_unlock(&driver->diagchar_mutex);
477 void diag_update_userspace_clients(unsigned int type)
481 mutex_lock(&driver->diagchar_mutex);
482 for (i = 0; i < driver->num_clients; i++)
483 if (driver->client_map[i].pid != 0)
484 driver->data_ready[i] |= type;
485 wake_up_interruptible(&driver->wait_q);
486 mutex_unlock(&driver->diagchar_mutex);
489 void diag_update_md_clients(unsigned int type)
493 mutex_lock(&driver->diagchar_mutex);
494 for (i = 0; i < NUM_MD_SESSIONS; i++) {
495 if (driver->md_session_map[i] != NULL)
496 for (j = 0; j < driver->num_clients; j++) {
497 if (driver->client_map[j].pid != 0 &&
498 driver->client_map[j].pid ==
499 driver->md_session_map[i]->pid) {
500 driver->data_ready[j] |= type;
505 wake_up_interruptible(&driver->wait_q);
506 mutex_unlock(&driver->diagchar_mutex);
508 void diag_update_sleeping_process(int process_id, int data_type)
512 mutex_lock(&driver->diagchar_mutex);
513 for (i = 0; i < driver->num_clients; i++)
514 if (driver->client_map[i].pid == process_id) {
515 driver->data_ready[i] |= data_type;
518 wake_up_interruptible(&driver->wait_q);
519 mutex_unlock(&driver->diagchar_mutex);
522 static int diag_send_data(struct diag_cmd_reg_t *entry, unsigned char *buf,
528 if (entry->proc == APPS_DATA) {
529 diag_update_pkt_buffer(buf, len, PKT_TYPE);
530 diag_update_sleeping_process(entry->pid, PKT_TYPE);
534 return diagfwd_write(entry->proc, TYPE_CMD, buf, len);
537 void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type)
540 if (data_type >= PERIPHERAL_MODEM && data_type <= PERIPHERAL_SENSORS) {
541 if (driver->feature[data_type].stm_support) {
542 status = diag_send_stm_state(data_type, cmd);
544 driver->stm_state[data_type] = cmd;
546 driver->stm_state_requested[data_type] = cmd;
547 } else if (data_type == APPS_DATA) {
548 driver->stm_state[data_type] = cmd;
549 driver->stm_state_requested[data_type] = cmd;
553 int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
555 uint8_t version, mask, cmd;
556 uint8_t rsp_supported = 0;
557 uint8_t rsp_status = 0;
560 if (!buf || !dest_buf) {
561 pr_err("diag: Invalid pointers buf: %pK, dest_buf %pK in %s\n",
562 buf, dest_buf, __func__);
566 version = *(buf + STM_CMD_VERSION_OFFSET);
567 mask = *(buf + STM_CMD_MASK_OFFSET);
568 cmd = *(buf + STM_CMD_DATA_OFFSET);
571 * Check if command is valid. If the command is asking for
572 * status, then the processor mask field is to be ignored.
574 if ((version != 2) || (cmd > STATUS_STM) ||
575 ((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
576 /* Command is invalid. Send bad param message response */
577 dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
578 for (i = 0; i < STM_CMD_NUM_BYTES; i++)
579 dest_buf[i+1] = *(buf + i);
580 return STM_CMD_NUM_BYTES+1;
581 } else if (cmd != STATUS_STM) {
582 if (mask & DIAG_STM_MODEM)
583 diag_process_stm_mask(cmd, DIAG_STM_MODEM,
586 if (mask & DIAG_STM_LPASS)
587 diag_process_stm_mask(cmd, DIAG_STM_LPASS,
590 if (mask & DIAG_STM_WCNSS)
591 diag_process_stm_mask(cmd, DIAG_STM_WCNSS,
594 if (mask & DIAG_STM_SENSORS)
595 diag_process_stm_mask(cmd, DIAG_STM_SENSORS,
597 if (mask & DIAG_STM_WDSP)
598 diag_process_stm_mask(cmd, DIAG_STM_WDSP,
601 if (mask & DIAG_STM_APPS)
602 diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
605 for (i = 0; i < STM_CMD_NUM_BYTES; i++)
606 dest_buf[i] = *(buf + i);
608 /* Set mask denoting which peripherals support STM */
609 if (driver->feature[PERIPHERAL_MODEM].stm_support)
610 rsp_supported |= DIAG_STM_MODEM;
612 if (driver->feature[PERIPHERAL_LPASS].stm_support)
613 rsp_supported |= DIAG_STM_LPASS;
615 if (driver->feature[PERIPHERAL_WCNSS].stm_support)
616 rsp_supported |= DIAG_STM_WCNSS;
618 if (driver->feature[PERIPHERAL_SENSORS].stm_support)
619 rsp_supported |= DIAG_STM_SENSORS;
621 if (driver->feature[PERIPHERAL_WDSP].stm_support)
622 rsp_supported |= DIAG_STM_WDSP;
624 rsp_supported |= DIAG_STM_APPS;
626 /* Set mask denoting STM state/status for each peripheral/APSS */
627 if (driver->stm_state[PERIPHERAL_MODEM])
628 rsp_status |= DIAG_STM_MODEM;
630 if (driver->stm_state[PERIPHERAL_LPASS])
631 rsp_status |= DIAG_STM_LPASS;
633 if (driver->stm_state[PERIPHERAL_WCNSS])
634 rsp_status |= DIAG_STM_WCNSS;
636 if (driver->stm_state[PERIPHERAL_SENSORS])
637 rsp_status |= DIAG_STM_SENSORS;
639 if (driver->stm_state[PERIPHERAL_WDSP])
640 rsp_status |= DIAG_STM_WDSP;
642 if (driver->stm_state[APPS_DATA])
643 rsp_status |= DIAG_STM_APPS;
645 dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
646 dest_buf[STM_RSP_STATUS_INDEX] = rsp_status;
648 return STM_RSP_NUM_BYTES;
651 int diag_process_time_sync_query_cmd(unsigned char *src_buf, int src_len,
652 unsigned char *dest_buf, int dest_len)
655 struct diag_cmd_time_sync_query_req_t *req = NULL;
656 struct diag_cmd_time_sync_query_rsp_t rsp;
658 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
659 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
660 __func__, src_buf, src_len, dest_buf, dest_len);
664 req = (struct diag_cmd_time_sync_query_req_t *)src_buf;
665 rsp.header.cmd_code = req->header.cmd_code;
666 rsp.header.subsys_id = req->header.subsys_id;
667 rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
668 rsp.version = req->version;
669 rsp.time_api = driver->uses_time_api;
670 memcpy(dest_buf, &rsp, sizeof(rsp));
671 write_len = sizeof(rsp);
675 int diag_process_time_sync_switch_cmd(unsigned char *src_buf, int src_len,
676 unsigned char *dest_buf, int dest_len)
678 uint8_t peripheral, status = 0;
679 struct diag_cmd_time_sync_switch_req_t *req = NULL;
680 struct diag_cmd_time_sync_switch_rsp_t rsp;
681 struct diag_ctrl_msg_time_sync time_sync_msg;
682 int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
683 int err = 0, write_len = 0;
685 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
686 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
687 __func__, src_buf, src_len, dest_buf, dest_len);
691 req = (struct diag_cmd_time_sync_switch_req_t *)src_buf;
692 rsp.header.cmd_code = req->header.cmd_code;
693 rsp.header.subsys_id = req->header.subsys_id;
694 rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
695 rsp.version = req->version;
696 rsp.time_api = req->time_api;
697 if ((req->version > 1) || (req->time_api > 1) ||
698 (req->persist_time > 0)) {
699 dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
700 rsp.time_api_status = 0;
701 rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
702 memcpy(dest_buf + 1, &rsp, sizeof(rsp));
703 write_len = sizeof(rsp) + 1;
704 timestamp_switch = 0;
708 time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
709 time_sync_msg.ctrl_pkt_data_len = 5;
710 time_sync_msg.version = 1;
711 time_sync_msg.time_api = req->time_api;
713 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
714 err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg,
716 if (err && err != -ENODEV) {
717 pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
718 __func__, peripheral, TYPE_CNTL,
720 status |= (1 << peripheral);
724 driver->time_sync_enabled = 1;
725 driver->uses_time_api = req->time_api;
727 switch (req->time_api) {
729 timestamp_switch = 0;
732 timestamp_switch = 1;
735 timestamp_switch = 0;
739 rsp.time_api_status = status;
740 rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
741 memcpy(dest_buf, &rsp, sizeof(rsp));
742 write_len = sizeof(rsp);
746 int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
747 unsigned char *dest_buf, int dest_len)
750 struct diag_log_on_demand_rsp_t header;
752 if (!driver->diagfwd_cntl[PERIPHERAL_MODEM] ||
753 !driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open ||
754 !driver->log_on_demand_support)
757 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
758 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
759 __func__, src_buf, src_len, dest_buf, dest_len);
763 header.cmd_code = DIAG_CMD_LOG_ON_DMND;
764 header.log_code = *(uint16_t *)(src_buf + 1);
766 memcpy(dest_buf, &header, sizeof(struct diag_log_on_demand_rsp_t));
767 write_len += sizeof(struct diag_log_on_demand_rsp_t);
772 int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
773 unsigned char *dest_buf, int dest_len)
776 struct diag_pkt_header_t *header = NULL;
777 struct diag_cmd_ext_mobile_rsp_t rsp;
779 if (!src_buf || src_len != sizeof(*header) || !dest_buf ||
780 dest_len < sizeof(rsp))
783 header = (struct diag_pkt_header_t *)src_buf;
784 rsp.header.cmd_code = header->cmd_code;
785 rsp.header.subsys_id = header->subsys_id;
786 rsp.header.subsys_cmd_code = header->subsys_cmd_code;
792 rsp.chip_id = (uint32_t)socinfo_get_id();
794 memcpy(dest_buf, &rsp, sizeof(rsp));
795 write_len += sizeof(rsp);
800 int diag_check_common_cmd(struct diag_pkt_header_t *header)
807 for (i = 0; i < DIAG_NUM_COMMON_CMD; i++) {
808 if (header->cmd_code == common_cmds[i])
815 static int diag_cmd_chk_stats(unsigned char *src_buf, int src_len,
816 unsigned char *dest_buf, int dest_len)
820 struct diag_pkt_header_t *header = NULL;
821 struct diag_cmd_stats_rsp_t rsp;
823 if (!src_buf || src_len < sizeof(struct diag_pkt_header_t) ||
824 !dest_buf || dest_len < sizeof(rsp))
827 header = (struct diag_pkt_header_t *)src_buf;
829 if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
830 header->subsys_id != DIAG_SS_DIAG)
833 switch (header->subsys_cmd_code) {
834 case DIAG_CMD_OP_GET_MSG_ALLOC:
835 payload = driver->msg_stats.alloc_count;
837 case DIAG_CMD_OP_GET_MSG_DROP:
838 payload = driver->msg_stats.drop_count;
840 case DIAG_CMD_OP_RESET_MSG_STATS:
841 diag_record_stats(DATA_TYPE_F3, PKT_RESET);
843 case DIAG_CMD_OP_GET_LOG_ALLOC:
844 payload = driver->log_stats.alloc_count;
846 case DIAG_CMD_OP_GET_LOG_DROP:
847 payload = driver->log_stats.drop_count;
849 case DIAG_CMD_OP_RESET_LOG_STATS:
850 diag_record_stats(DATA_TYPE_LOG, PKT_RESET);
852 case DIAG_CMD_OP_GET_EVENT_ALLOC:
853 payload = driver->event_stats.alloc_count;
855 case DIAG_CMD_OP_GET_EVENT_DROP:
856 payload = driver->event_stats.drop_count;
858 case DIAG_CMD_OP_RESET_EVENT_STATS:
859 diag_record_stats(DATA_TYPE_EVENT, PKT_RESET);
865 memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
866 rsp.payload = payload;
867 write_len = sizeof(rsp);
868 memcpy(dest_buf, &rsp, sizeof(rsp));
873 static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
874 unsigned char *dest_buf, int dest_len)
876 struct diag_pkt_header_t *header = NULL;
877 struct diag_cmd_hdlc_disable_rsp_t rsp;
880 if (!src_buf || src_len < sizeof(*header) ||
881 !dest_buf || dest_len < sizeof(rsp)) {
885 header = (struct diag_pkt_header_t *)src_buf;
886 if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
887 header->subsys_id != DIAG_SS_DIAG ||
888 header->subsys_cmd_code != DIAG_CMD_OP_HDLC_DISABLE) {
892 memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
893 rsp.framing_version = 1;
895 write_len = sizeof(rsp);
896 memcpy(dest_buf, &rsp, sizeof(rsp));
901 void diag_send_error_rsp(unsigned char *buf, int len,
902 struct diag_md_session_t *info)
904 /* -1 to accomodate the first byte 0x13 */
905 if (len > (DIAG_MAX_RSP_SIZE - 1)) {
906 pr_err("diag: cannot send err rsp, huge length: %d\n", len);
910 *(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
911 memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
912 diag_send_rsp(driver->apps_rsp_buf, len + 1, info);
915 int diag_process_apps_pkt(unsigned char *buf, int len,
916 struct diag_md_session_t *info)
921 unsigned char *temp = NULL;
922 struct diag_cmd_reg_entry_t entry;
923 struct diag_cmd_reg_entry_t *temp_entry = NULL;
924 struct diag_cmd_reg_t *reg_item = NULL;
929 /* Check if the command is a supported mask command */
930 mask_ret = diag_process_apps_masks(buf, len, info);
932 diag_send_rsp(driver->apps_rsp_buf, mask_ret, info);
937 entry.cmd_code = (uint16_t)(*(uint8_t *)temp);
938 temp += sizeof(uint8_t);
939 entry.subsys_id = (uint16_t)(*(uint8_t *)temp);
940 temp += sizeof(uint8_t);
941 entry.cmd_code_hi = (uint16_t)(*(uint16_t *)temp);
942 entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
943 temp += sizeof(uint16_t);
945 pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
946 __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
948 if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
949 driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
950 write_len = diag_cmd_log_on_demand(buf, len,
951 driver->apps_rsp_buf,
954 diag_send_rsp(driver->apps_rsp_buf, write_len, info);
958 mutex_lock(&driver->cmd_reg_mutex);
959 temp_entry = diag_cmd_search(&entry, ALL_PROC);
961 reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
964 if (MD_PERIPHERAL_MASK(reg_item->proc) &
965 info->peripheral_mask)
966 write_len = diag_send_data(reg_item, buf, len);
968 if (MD_PERIPHERAL_MASK(reg_item->proc) &
969 driver->logging_mask)
970 diag_send_error_rsp(buf, len, info);
972 write_len = diag_send_data(reg_item, buf, len);
974 mutex_unlock(&driver->cmd_reg_mutex);
977 mutex_unlock(&driver->cmd_reg_mutex);
979 #if defined(CONFIG_DIAG_OVER_USB)
980 /* Check for the command/respond msg for the maximum packet length */
981 if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
982 (*(uint16_t *)(buf+2) == 0x0055)) {
983 for (i = 0; i < 4; i++)
984 *(driver->apps_rsp_buf+i) = *(buf+i);
985 *(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
986 diag_send_rsp(driver->apps_rsp_buf, 8, info);
988 } else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
989 (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
990 len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
992 diag_send_rsp(driver->apps_rsp_buf, len, info);
997 /* Check for time sync query command */
998 else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
999 (*(buf+1) == DIAG_SS_DIAG) &&
1000 (*(uint16_t *)(buf+2) == DIAG_GET_TIME_API)) {
1001 write_len = diag_process_time_sync_query_cmd(buf, len,
1002 driver->apps_rsp_buf,
1005 diag_send_rsp(driver->apps_rsp_buf, write_len, info);
1008 /* Check for time sync switch command */
1009 else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
1010 (*(buf+1) == DIAG_SS_DIAG) &&
1011 (*(uint16_t *)(buf+2) == DIAG_SET_TIME_API)) {
1012 write_len = diag_process_time_sync_switch_cmd(buf, len,
1013 driver->apps_rsp_buf,
1016 diag_send_rsp(driver->apps_rsp_buf, write_len, info);
1019 /* Check for download command */
1020 else if ((chk_apps_master()) && (*buf == 0x3A)) {
1021 /* send response back */
1022 driver->apps_rsp_buf[0] = *buf;
1023 diag_send_rsp(driver->apps_rsp_buf, 1, info);
1025 /* call download API */
1026 msm_set_restart_mode(RESTART_DLOAD);
1027 printk(KERN_CRIT "diag: download mode set, Rebooting SoC..\n");
1028 kernel_restart(NULL);
1029 /* Not required, represents that command isnt sent to modem */
1032 /* Check for polling for Apps only DIAG */
1033 else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
1034 (*(buf+2) == 0x03)) {
1035 /* If no one has registered for polling */
1036 if (chk_polling_response()) {
1037 /* Respond to polling for Apps only DIAG */
1038 for (i = 0; i < 3; i++)
1039 driver->apps_rsp_buf[i] = *(buf+i);
1040 for (i = 0; i < 13; i++)
1041 driver->apps_rsp_buf[i+3] = 0;
1043 diag_send_rsp(driver->apps_rsp_buf, 16, info);
1047 /* Return the Delayed Response Wrap Status */
1048 else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
1049 (*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
1050 memcpy(driver->apps_rsp_buf, buf, 4);
1051 driver->apps_rsp_buf[4] = wrap_enabled;
1052 diag_send_rsp(driver->apps_rsp_buf, 5, info);
1055 /* Wrap the Delayed Rsp ID */
1056 else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
1057 (*(buf+2) == 0x05) && (*(buf+3) == 0x0)) {
1058 wrap_enabled = true;
1059 memcpy(driver->apps_rsp_buf, buf, 4);
1060 driver->apps_rsp_buf[4] = wrap_count;
1061 diag_send_rsp(driver->apps_rsp_buf, 6, info);
1065 else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
1066 (*(buf+1) == DIAG_SS_PARAMS) &&
1067 (*(buf+2) == DIAG_EXT_MOBILE_ID) && (*(buf+3) == 0x0)) {
1068 write_len = diag_cmd_get_mobile_id(buf, len,
1069 driver->apps_rsp_buf,
1071 if (write_len > 0) {
1072 diag_send_rsp(driver->apps_rsp_buf, write_len, info);
1077 * If the apps processor is master and no other
1078 * processor has registered for polling command.
1079 * If modem is not up and we have not received feature
1080 * mask update from modem, in that case APPS should
1081 * respond for 0X7C command
1083 else if (chk_apps_master() &&
1084 !(driver->polling_reg_flag) &&
1085 !(driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
1086 !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
1087 /* respond to 0x0 command */
1089 for (i = 0; i < 55; i++)
1090 driver->apps_rsp_buf[i] = 0;
1092 diag_send_rsp(driver->apps_rsp_buf, 55, info);
1095 /* respond to 0x7c command */
1096 else if (*buf == 0x7c) {
1097 driver->apps_rsp_buf[0] = 0x7c;
1098 for (i = 1; i < 8; i++)
1099 driver->apps_rsp_buf[i] = 0;
1100 /* Tools ID for APQ 8060 */
1101 *(int *)(driver->apps_rsp_buf + 8) =
1102 chk_config_get_id();
1103 *(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
1104 *(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
1105 diag_send_rsp(driver->apps_rsp_buf, 14, info);
1109 write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
1111 if (write_len > 0) {
1112 diag_send_rsp(driver->apps_rsp_buf, write_len, info);
1115 write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
1117 if (write_len > 0) {
1119 * This mutex lock is necessary since we need to drain all the
1120 * pending buffers from peripherals which may be HDLC encoded
1121 * before disabling HDLC encoding on Apps processor.
1123 mutex_lock(&driver->hdlc_disable_mutex);
1124 diag_send_rsp(driver->apps_rsp_buf, write_len, info);
1126 * Set the value of hdlc_disabled after sending the response to
1127 * the tools. This is required since the tools is expecting a
1128 * HDLC encoded reponse for this request.
1130 pr_debug("diag: In %s, disabling HDLC encoding\n",
1133 info->hdlc_disabled = 1;
1135 driver->hdlc_disabled = 1;
1136 diag_update_md_clients(HDLC_SUPPORT_TYPE);
1137 mutex_unlock(&driver->hdlc_disable_mutex);
1142 /* We have now come to the end of the function. */
1143 if (chk_apps_only())
1144 diag_send_error_rsp(buf, len, info);
1149 void diag_process_hdlc_pkt(void *data, unsigned len,
1150 struct diag_md_session_t *info)
1155 if (len > DIAG_MAX_HDLC_BUF_SIZE) {
1156 pr_err("diag: In %s, invalid length: %d\n", __func__, len);
1160 mutex_lock(&driver->diag_hdlc_mutex);
1161 pr_debug("diag: In %s, received packet of length: %d, req_buf_len: %d\n",
1162 __func__, len, driver->hdlc_buf_len);
1164 if (driver->hdlc_buf_len >= DIAG_MAX_REQ_SIZE) {
1165 pr_err("diag: In %s, request length is more than supported len. Dropping packet.\n",
1170 hdlc_decode->dest_ptr = driver->hdlc_buf + driver->hdlc_buf_len;
1171 hdlc_decode->dest_size = DIAG_MAX_HDLC_BUF_SIZE - driver->hdlc_buf_len;
1172 hdlc_decode->src_ptr = data;
1173 hdlc_decode->src_size = len;
1174 hdlc_decode->src_idx = 0;
1175 hdlc_decode->dest_idx = 0;
1177 ret = diag_hdlc_decode(hdlc_decode);
1179 * driver->hdlc_buf is of size DIAG_MAX_HDLC_BUF_SIZE. But the decoded
1180 * packet should be within DIAG_MAX_REQ_SIZE.
1182 if (driver->hdlc_buf_len + hdlc_decode->dest_idx <= DIAG_MAX_REQ_SIZE) {
1183 driver->hdlc_buf_len += hdlc_decode->dest_idx;
1185 pr_err_ratelimited("diag: In %s, Dropping packet. pkt_size: %d, max: %d\n",
1187 driver->hdlc_buf_len + hdlc_decode->dest_idx,
1192 if (ret == HDLC_COMPLETE) {
1193 err = crc_check(driver->hdlc_buf, driver->hdlc_buf_len);
1195 /* CRC check failed. */
1196 pr_err_ratelimited("diag: In %s, bad CRC. Dropping packet\n",
1200 driver->hdlc_buf_len -= HDLC_FOOTER_LEN;
1202 if (driver->hdlc_buf_len < 1) {
1203 pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
1204 __func__, driver->hdlc_buf_len,
1205 hdlc_decode->dest_idx);
1209 err = diag_process_apps_pkt(driver->hdlc_buf,
1210 driver->hdlc_buf_len, info);
1217 driver->hdlc_buf_len = 0;
1218 mutex_unlock(&driver->diag_hdlc_mutex);
1223 * Tools needs to get a response in order to start its
1224 * recovery algorithm. Send an error response if the
1225 * packet is not in expected format.
1227 diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, info);
1228 driver->hdlc_buf_len = 0;
1230 mutex_unlock(&driver->diag_hdlc_mutex);
1233 static int diagfwd_mux_open(int id, int mode)
1236 unsigned long flags;
1240 driver->usb_connected = 1;
1242 case DIAG_MEMORY_DEVICE_MODE:
1248 if (driver->rsp_buf_busy) {
1250 * When a client switches from callback mode to USB mode
1251 * explicitly, there can be a situation when the last response
1252 * is not drained to the user space application. Reset the
1253 * in_busy flag in this case.
1255 spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
1256 driver->rsp_buf_busy = 0;
1257 spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
1259 for (i = 0; i < NUM_PERIPHERALS; i++) {
1260 diagfwd_open(i, TYPE_DATA);
1261 diagfwd_open(i, TYPE_CMD);
1263 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
1267 static int diagfwd_mux_close(int id, int mode)
1273 driver->usb_connected = 0;
1275 case DIAG_MEMORY_DEVICE_MODE:
1281 if ((driver->logging_mode == DIAG_MULTI_MODE &&
1282 driver->md_session_mode == DIAG_MD_NONE) ||
1283 (driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
1285 * This case indicates that the USB is removed
1286 * but there is a client running in background
1287 * with Memory Device mode.
1291 * With sysfs parameter to clear masks set,
1292 * peripheral masks are cleared on ODL exit and
1293 * USB disconnection and buffers are not marked busy.
1294 * This enables read and drop of stale packets.
1296 * With sysfs parameter to clear masks cleared,
1297 * masks are not cleared and buffers are to be marked
1298 * busy to ensure traffic generated by peripheral
1301 if (!(diag_mask_param())) {
1302 for (i = 0; i < NUM_PERIPHERALS; i++) {
1303 diagfwd_close(i, TYPE_DATA);
1304 diagfwd_close(i, TYPE_CMD);
1307 /* Re enable HDLC encoding */
1308 pr_debug("diag: In %s, re-enabling HDLC encoding\n",
1310 mutex_lock(&driver->hdlc_disable_mutex);
1311 if (driver->md_session_mode == DIAG_MD_NONE)
1312 driver->hdlc_disabled = 0;
1313 mutex_unlock(&driver->hdlc_disable_mutex);
1314 queue_work(driver->diag_wq,
1315 &(driver->update_user_clients));
1317 queue_work(driver->diag_real_time_wq,
1318 &driver->diag_real_time_work);
1322 static uint8_t hdlc_reset;
1324 static void hdlc_reset_timer_start(struct diag_md_session_t *info)
1326 mutex_lock(&driver->md_session_lock);
1327 if (!hdlc_timer_in_progress) {
1328 hdlc_timer_in_progress = 1;
1330 mod_timer(&info->hdlc_reset_timer,
1331 jiffies + msecs_to_jiffies(200));
1333 mod_timer(&driver->hdlc_reset_timer,
1334 jiffies + msecs_to_jiffies(200));
1336 mutex_unlock(&driver->md_session_lock);
1339 static void hdlc_reset_timer_func(unsigned long data)
1341 pr_debug("diag: In %s, re-enabling HDLC encoding\n",
1344 driver->hdlc_disabled = 0;
1345 queue_work(driver->diag_wq,
1346 &(driver->update_user_clients));
1348 hdlc_timer_in_progress = 0;
1351 void diag_md_hdlc_reset_timer_func(unsigned long pid)
1353 struct diag_md_session_t *session_info = NULL;
1355 pr_debug("diag: In %s, re-enabling HDLC encoding\n",
1358 session_info = diag_md_session_get_pid(pid);
1360 session_info->hdlc_disabled = 0;
1361 queue_work(driver->diag_wq,
1362 &(driver->update_md_clients));
1364 hdlc_timer_in_progress = 0;
1367 static void diag_hdlc_start_recovery(unsigned char *buf, int len,
1368 struct diag_md_session_t *info)
1371 static uint32_t bad_byte_counter;
1372 unsigned char *start_ptr = NULL;
1373 struct diag_pkt_frame_t *actual_pkt = NULL;
1376 hdlc_reset_timer_start(info);
1378 actual_pkt = (struct diag_pkt_frame_t *)buf;
1379 for (i = 0; i < len; i++) {
1380 if (actual_pkt->start == CONTROL_CHAR &&
1381 actual_pkt->version == 1 &&
1382 actual_pkt->length < len &&
1383 (*(uint8_t *)(buf + sizeof(struct diag_pkt_frame_t) +
1384 actual_pkt->length) == CONTROL_CHAR)) {
1385 start_ptr = &buf[i];
1389 if (bad_byte_counter > (DIAG_MAX_REQ_SIZE +
1390 sizeof(struct diag_pkt_frame_t) + 1)) {
1391 bad_byte_counter = 0;
1392 pr_err("diag: In %s, re-enabling HDLC encoding\n",
1394 mutex_lock(&driver->hdlc_disable_mutex);
1396 info->hdlc_disabled = 0;
1398 driver->hdlc_disabled = 0;
1399 mutex_unlock(&driver->hdlc_disable_mutex);
1400 diag_update_md_clients(HDLC_SUPPORT_TYPE);
1407 /* Discard any partial packet reads */
1408 mutex_lock(&driver->hdlc_recovery_mutex);
1409 driver->incoming_pkt.processing = 0;
1410 mutex_unlock(&driver->hdlc_recovery_mutex);
1411 diag_process_non_hdlc_pkt(start_ptr, len - i, info);
1415 void diag_process_non_hdlc_pkt(unsigned char *buf, int len,
1416 struct diag_md_session_t *info)
1419 uint16_t pkt_len = 0;
1420 uint32_t read_bytes = 0;
1421 const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
1422 struct diag_pkt_frame_t *actual_pkt = NULL;
1423 unsigned char *data_ptr = NULL;
1424 struct diag_partial_pkt_t *partial_pkt = NULL;
1426 mutex_lock(&driver->hdlc_recovery_mutex);
1427 if (!buf || len <= 0) {
1428 mutex_unlock(&driver->hdlc_recovery_mutex);
1431 partial_pkt = &driver->incoming_pkt;
1432 if (!partial_pkt->processing) {
1433 mutex_unlock(&driver->hdlc_recovery_mutex);
1437 if (partial_pkt->remaining > len) {
1438 if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
1439 pr_err("diag: Invalid length %d, %d received in %s\n",
1440 partial_pkt->read_len, len, __func__);
1441 mutex_unlock(&driver->hdlc_recovery_mutex);
1444 memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
1447 partial_pkt->read_len += len;
1448 partial_pkt->remaining -= len;
1450 if ((partial_pkt->read_len + partial_pkt->remaining) >
1451 partial_pkt->capacity) {
1452 pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
1453 partial_pkt->read_len,
1454 partial_pkt->remaining, __func__);
1455 mutex_unlock(&driver->hdlc_recovery_mutex);
1458 memcpy(partial_pkt->data + partial_pkt->read_len, buf,
1459 partial_pkt->remaining);
1460 read_bytes += partial_pkt->remaining;
1462 partial_pkt->read_len += partial_pkt->remaining;
1463 partial_pkt->remaining = 0;
1466 if (partial_pkt->remaining == 0) {
1467 actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
1468 data_ptr = partial_pkt->data + header_len;
1469 if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
1471 mutex_unlock(&driver->hdlc_recovery_mutex);
1472 diag_hdlc_start_recovery(buf, len, info);
1473 mutex_lock(&driver->hdlc_recovery_mutex);
1475 err = diag_process_apps_pkt(data_ptr,
1476 actual_pkt->length, info);
1478 pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
1480 mutex_unlock(&driver->hdlc_recovery_mutex);
1483 partial_pkt->read_len = 0;
1484 partial_pkt->total_len = 0;
1485 partial_pkt->processing = 0;
1486 mutex_unlock(&driver->hdlc_recovery_mutex);
1489 mutex_unlock(&driver->hdlc_recovery_mutex);
1493 while (read_bytes < len) {
1494 actual_pkt = (struct diag_pkt_frame_t *)buf;
1495 pkt_len = actual_pkt->length;
1497 if (actual_pkt->start != CONTROL_CHAR) {
1498 diag_hdlc_start_recovery(buf, len, info);
1499 diag_send_error_rsp(buf, len, info);
1502 mutex_lock(&driver->hdlc_recovery_mutex);
1503 if (pkt_len + header_len > partial_pkt->capacity) {
1504 pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
1506 mutex_unlock(&driver->hdlc_recovery_mutex);
1507 diag_hdlc_start_recovery(buf, len, info);
1510 if ((pkt_len + header_len) > (len - read_bytes)) {
1511 partial_pkt->read_len = len - read_bytes;
1512 partial_pkt->total_len = pkt_len + header_len;
1513 partial_pkt->remaining = partial_pkt->total_len -
1514 partial_pkt->read_len;
1515 partial_pkt->processing = 1;
1516 memcpy(partial_pkt->data, buf, partial_pkt->read_len);
1517 mutex_unlock(&driver->hdlc_recovery_mutex);
1520 data_ptr = buf + header_len;
1521 if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
1523 mutex_unlock(&driver->hdlc_recovery_mutex);
1524 diag_hdlc_start_recovery(buf, len, info);
1525 mutex_lock(&driver->hdlc_recovery_mutex);
1529 err = diag_process_apps_pkt(data_ptr,
1530 actual_pkt->length, info);
1532 mutex_unlock(&driver->hdlc_recovery_mutex);
1535 read_bytes += header_len + pkt_len + 1;
1536 buf += header_len + pkt_len + 1; /* advance to next pkt */
1537 mutex_unlock(&driver->hdlc_recovery_mutex);
1543 static int diagfwd_mux_read_done(unsigned char *buf, int len, int ctxt)
1545 if (!buf || len <= 0)
1548 if (!driver->hdlc_disabled)
1549 diag_process_hdlc_pkt(buf, len, NULL);
1551 diag_process_non_hdlc_pkt(buf, len, NULL);
1553 diag_mux_queue_read(ctxt);
1557 static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
1560 unsigned long flags;
1561 int peripheral = -1;
1565 if (!buf || len < 0)
1568 peripheral = GET_BUF_PERIPHERAL(buf_ctxt);
1569 type = GET_BUF_TYPE(buf_ctxt);
1570 num = GET_BUF_NUM(buf_ctxt);
1574 if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
1575 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1576 "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
1577 peripheral, type, num);
1578 diagfwd_write_done(peripheral, type, num);
1579 diag_ws_on_copy(DIAG_WS_MUX);
1580 } else if (peripheral == APPS_DATA) {
1581 diagmem_free(driver, (unsigned char *)buf,
1585 pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
1586 peripheral, __func__, type);
1590 if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
1591 diagfwd_write_done(peripheral, type, num);
1593 if (peripheral == APPS_DATA ||
1594 ctxt == DIAG_MEMORY_DEVICE_MODE) {
1595 spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
1596 driver->rsp_buf_busy = 0;
1597 driver->encoded_rsp_len = 0;
1598 spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
1603 pr_err_ratelimited("diag: Incorrect data type %d, buf_ctxt: %d in %s\n",
1604 type, buf_ctxt, __func__);
1611 static struct diag_mux_ops diagfwd_mux_ops = {
1612 .open = diagfwd_mux_open,
1613 .close = diagfwd_mux_close,
1614 .read_done = diagfwd_mux_read_done,
1615 .write_done = diagfwd_mux_write_done
1618 int diagfwd_init(void)
1625 driver->use_device_tree = has_device_tree();
1626 for (i = 0; i < DIAG_NUM_PROC; i++)
1627 driver->real_time_mode[i] = 1;
1628 driver->supports_separate_cmdrsp = 1;
1629 driver->supports_apps_hdlc_encoding = 1;
1630 driver->supports_apps_header_untagging = 1;
1631 driver->supports_pd_buffering = 1;
1632 for (i = 0; i < NUM_PERIPHERALS; i++)
1633 driver->peripheral_untag[i] = 0;
1634 mutex_init(&driver->diag_hdlc_mutex);
1635 mutex_init(&driver->diag_cntl_mutex);
1636 mutex_init(&driver->mode_lock);
1637 driver->encoded_rsp_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE +
1638 APF_DIAG_PADDING, GFP_KERNEL);
1639 if (!driver->encoded_rsp_buf)
1641 kmemleak_not_leak(driver->encoded_rsp_buf);
1642 hdlc_decode = kzalloc(sizeof(struct diag_hdlc_decode_type),
1646 setup_timer(&driver->hdlc_reset_timer, hdlc_reset_timer_func, 0);
1647 kmemleak_not_leak(hdlc_decode);
1648 driver->encoded_rsp_len = 0;
1649 driver->rsp_buf_busy = 0;
1650 spin_lock_init(&driver->rsp_buf_busy_lock);
1651 driver->user_space_data_busy = 0;
1652 driver->hdlc_buf_len = 0;
1653 INIT_LIST_HEAD(&driver->cmd_reg_list);
1654 driver->cmd_reg_count = 0;
1655 mutex_init(&driver->cmd_reg_mutex);
1657 for (i = 0; i < NUM_PERIPHERALS; i++) {
1658 driver->feature[i].separate_cmd_rsp = 0;
1659 driver->feature[i].stm_support = DISABLE_STM;
1660 driver->feature[i].rcvd_feature_mask = 0;
1661 driver->feature[i].peripheral_buffering = 0;
1662 driver->feature[i].pd_buffering = 0;
1663 driver->feature[i].encode_hdlc = 0;
1664 driver->feature[i].untag_header =
1665 DISABLE_PKT_HEADER_UNTAGGING;
1666 driver->feature[i].mask_centralization = 0;
1667 driver->feature[i].log_on_demand = 0;
1668 driver->feature[i].sent_feature_mask = 0;
1671 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1672 driver->buffering_mode[i].peripheral = i;
1673 driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
1674 driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
1675 driver->buffering_mode[i].low_wm_val = DEFAULT_LOW_WM_VAL;
1678 for (i = 0; i < NUM_STM_PROCESSORS; i++) {
1679 driver->stm_state_requested[i] = DISABLE_STM;
1680 driver->stm_state[i] = DISABLE_STM;
1683 if (driver->hdlc_buf == NULL) {
1684 driver->hdlc_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
1685 if (!driver->hdlc_buf)
1687 kmemleak_not_leak(driver->hdlc_buf);
1689 if (driver->user_space_data_buf == NULL)
1690 driver->user_space_data_buf = kzalloc(USER_SPACE_DATA,
1692 if (driver->user_space_data_buf == NULL)
1694 kmemleak_not_leak(driver->user_space_data_buf);
1695 if (driver->client_map == NULL &&
1696 (driver->client_map = kzalloc
1697 ((driver->num_clients) * sizeof(struct diag_client_map),
1698 GFP_KERNEL)) == NULL)
1700 kmemleak_not_leak(driver->client_map);
1701 if (driver->data_ready == NULL &&
1702 (driver->data_ready = kzalloc(driver->num_clients * sizeof(int)
1703 , GFP_KERNEL)) == NULL)
1705 kmemleak_not_leak(driver->data_ready);
1706 if (driver->apps_req_buf == NULL) {
1707 driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
1708 if (!driver->apps_req_buf)
1710 kmemleak_not_leak(driver->apps_req_buf);
1712 if (driver->dci_pkt_buf == NULL) {
1713 driver->dci_pkt_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
1714 if (!driver->dci_pkt_buf)
1716 kmemleak_not_leak(driver->dci_pkt_buf);
1718 if (driver->apps_rsp_buf == NULL) {
1719 driver->apps_rsp_buf = kzalloc(DIAG_MAX_RSP_SIZE, GFP_KERNEL);
1720 if (driver->apps_rsp_buf == NULL)
1722 kmemleak_not_leak(driver->apps_rsp_buf);
1724 driver->diag_wq = create_singlethread_workqueue("diag_wq");
1725 if (!driver->diag_wq)
1727 ret = diag_mux_register(DIAG_LOCAL_PROC, DIAG_LOCAL_PROC,
1730 pr_err("diag: Unable to register with USB, err: %d\n", ret);
1736 pr_err("diag: In %s, couldn't initialize diag\n", __func__);
1738 diag_usb_exit(DIAG_USB_LOCAL);
1739 kfree(driver->encoded_rsp_buf);
1740 kfree(driver->hdlc_buf);
1741 kfree(driver->client_map);
1742 kfree(driver->data_ready);
1743 kfree(driver->apps_req_buf);
1744 kfree(driver->dci_pkt_buf);
1745 kfree(driver->apps_rsp_buf);
1747 kfree(driver->user_space_data_buf);
1748 if (driver->diag_wq)
1749 destroy_workqueue(driver->diag_wq);
1753 void diagfwd_exit(void)
1755 kfree(driver->encoded_rsp_buf);
1756 kfree(driver->hdlc_buf);
1758 kfree(driver->client_map);
1759 kfree(driver->data_ready);
1760 kfree(driver->apps_req_buf);
1761 kfree(driver->dci_pkt_buf);
1762 kfree(driver->apps_rsp_buf);
1763 kfree(driver->user_space_data_buf);
1764 destroy_workqueue(driver->diag_wq);