OSDN Git Service

Merge android-4.4.179 (aab9adb) into msm-4.4
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / char / diag / diag_dci.c
1 /* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/uaccess.h>
16 #include <linux/diagchar.h>
17 #include <linux/sched.h>
18 #include <linux/err.h>
19 #include <linux/delay.h>
20 #include <linux/workqueue.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_wakeup.h>
24 #include <linux/spinlock.h>
25 #include <linux/ratelimit.h>
26 #include <linux/reboot.h>
27 #include <asm/current.h>
28 #include <soc/qcom/restart.h>
29 #include <linux/vmalloc.h>
30 #ifdef CONFIG_DIAG_OVER_USB
31 #include <linux/usb/usbdiag.h>
32 #endif
33 #include "diagchar_hdlc.h"
34 #include "diagmem.h"
35 #include "diagchar.h"
36 #include "diagfwd.h"
37 #include "diagfwd_cntl.h"
38 #include "diag_dci.h"
39 #include "diag_masks.h"
40 #include "diagfwd_bridge.h"
41 #include "diagfwd_peripheral.h"
42 #include "diag_ipc_logging.h"
43
44 static struct timer_list dci_drain_timer;
45 static int dci_timer_in_progress;
46 static struct work_struct dci_data_drain_work;
47
48 struct diag_dci_partial_pkt_t partial_pkt;
49
50 unsigned int dci_max_reg = 100;
51 unsigned int dci_max_clients = 10;
52 struct mutex dci_log_mask_mutex;
53 struct mutex dci_event_mask_mutex;
54
55 /*
56  * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
57  * connection status again.
58  *
59  * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
60  * connection status
61  */
62 #define DCI_HANDSHAKE_RETRY_TIME        500000
63 #define DCI_HANDSHAKE_WAIT_TIME         200
64
65 spinlock_t ws_lock;
66 unsigned long ws_lock_flags;
67
68 struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
69         {
70                 .ctx = 0,
71                 .send_log_mask = diag_send_dci_log_mask,
72                 .send_event_mask = diag_send_dci_event_mask,
73                 .peripheral_status = 0,
74                 .mempool = 0,
75         },
76 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
77         {
78                 .ctx = DIAGFWD_MDM_DCI,
79                 .send_log_mask = diag_send_dci_log_mask_remote,
80                 .send_event_mask = diag_send_dci_event_mask_remote,
81                 .peripheral_status = 0,
82                 .mempool = POOL_TYPE_MDM_DCI_WRITE,
83         }
84 #endif
85 };
86
87 struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
88         {
89                 .id = 0,
90                 .open = 0,
91                 .retry_count = 0
92         },
93 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
94         {
95                 .id = DIAGFWD_MDM_DCI,
96                 .open = 0,
97                 .retry_count = 0
98         }
99 #endif
100 };
101
102 /* Number of milliseconds anticipated to process the DCI data */
103 #define DCI_WAKEUP_TIMEOUT 1
104
105 #define DCI_CAN_ADD_BUF_TO_LIST(buf)                                    \
106         (buf && buf->data && !buf->in_busy && buf->data_len > 0)        \
107
108 #ifdef CONFIG_DEBUG_FS
109 struct diag_dci_data_info *dci_traffic;
110 struct mutex dci_stat_mutex;
111 void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
112                              uint8_t peripheral, uint8_t proc)
113 {
114         static int curr_dci_data;
115         static unsigned long iteration;
116         struct diag_dci_data_info *temp_data = dci_traffic;
117         if (!temp_data)
118                 return;
119         mutex_lock(&dci_stat_mutex);
120         if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
121                 curr_dci_data = 0;
122         temp_data += curr_dci_data;
123         temp_data->iteration = iteration + 1;
124         temp_data->data_size = read_bytes;
125         temp_data->peripheral = peripheral;
126         temp_data->ch_type = ch_type;
127         temp_data->proc = proc;
128         diag_get_timestamp(temp_data->time_stamp);
129         curr_dci_data++;
130         iteration++;
131         mutex_unlock(&dci_stat_mutex);
132 }
133 #else
134 void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
135                              uint8_t peripheral, uint8_t proc) { }
136 #endif
137
138 static int check_peripheral_dci_support(int peripheral_id, int dci_proc_id)
139 {
140         int dci_peripheral_list = 0;
141
142         if (dci_proc_id < 0 || dci_proc_id >= NUM_DCI_PROC) {
143                 pr_err("diag:In %s,not a supported DCI proc id\n", __func__);
144                 return 0;
145         }
146         if (peripheral_id < 0 || peripheral_id >= NUM_PERIPHERALS) {
147                 pr_err("diag:In %s,not a valid peripheral id\n", __func__);
148                 return 0;
149         }
150         dci_peripheral_list = dci_ops_tbl[dci_proc_id].peripheral_status;
151
152         if (dci_peripheral_list <= 0 || dci_peripheral_list > DIAG_CON_ALL) {
153                 pr_err("diag:In %s,not a valid dci peripheral mask\n",
154                          __func__);
155                 return 0;
156         }
157         /* Remove APSS bit mask information */
158         dci_peripheral_list = dci_peripheral_list >> 1;
159
160         if ((1 << peripheral_id) & (dci_peripheral_list))
161                 return 1;
162         else
163                 return 0;
164 }
165
166 static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
167 {
168         unsigned char *temp = mask;
169         uint8_t i;
170
171         if (!mask)
172                 return;
173
174         /* create hard coded table for log mask with 16 categories */
175         for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
176                 *temp = i;
177                 temp++;
178                 *temp = dirty ? 1 : 0;
179                 temp++;
180                 memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
181                 temp += DCI_MAX_ITEMS_PER_LOG_CODE;
182         }
183 }
184
185 static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
186 {
187         if (tbl_buf)
188                 memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
189 }
190
191 void dci_drain_data(unsigned long data)
192 {
193         queue_work(driver->diag_dci_wq, &dci_data_drain_work);
194 }
195
196 static void dci_check_drain_timer(void)
197 {
198         if (!dci_timer_in_progress) {
199                 dci_timer_in_progress = 1;
200                 mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
201         }
202 }
203
204 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
205 static void dci_handshake_work_fn(struct work_struct *work)
206 {
207         int err = 0;
208         int max_retries = 5;
209
210         struct dci_channel_status_t *status = container_of(work,
211                                                 struct dci_channel_status_t,
212                                                 handshake_work);
213
214         if (status->open) {
215                 pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
216                          __func__, status->id);
217                 return;
218         }
219
220         if (status->retry_count == max_retries) {
221                 status->retry_count = 0;
222                 pr_info("diag: dci channel connection handshake timed out, id: %d\n",
223                         status->id);
224                 err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
225                 if (err) {
226                         pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
227                                __func__, status->id, err);
228                 }
229                 return;
230         }
231         status->retry_count++;
232         /*
233          * Sleep for sometime to check for the connection status again. The
234          * value should be optimum to include a roundabout time for a small
235          * packet to the remote processor.
236          */
237         usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
238         mod_timer(&status->wait_time,
239                   jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
240 }
241
242 static void dci_chk_handshake(unsigned long data)
243 {
244         int index = (int)data;
245
246         if (index < 0 || index >= NUM_DCI_PROC)
247                 return;
248
249         queue_work(driver->diag_dci_wq,
250                    &dci_channel_status[index].handshake_work);
251 }
252 #endif
253
254 static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
255 {
256         if (!buffer || buffer->data)
257                 return -EINVAL;
258
259         switch (type) {
260         case DCI_BUF_PRIMARY:
261                 buffer->capacity = IN_BUF_SIZE;
262                 buffer->data = vzalloc(buffer->capacity);
263                 if (!buffer->data)
264                         return -ENOMEM;
265                 break;
266         case DCI_BUF_SECONDARY:
267                 buffer->data = NULL;
268                 buffer->capacity = IN_BUF_SIZE;
269                 break;
270         case DCI_BUF_CMD:
271                 buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
272                 buffer->data = vzalloc(buffer->capacity);
273                 if (!buffer->data)
274                         return -ENOMEM;
275                 break;
276         default:
277                 pr_err("diag: In %s, unknown type %d", __func__, type);
278                 return -EINVAL;
279         }
280
281         buffer->data_len = 0;
282         buffer->in_busy = 0;
283         buffer->buf_type = type;
284         mutex_init(&buffer->data_mutex);
285
286         return 0;
287 }
288
289 static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
290 {
291         if (!buf)
292                 return -EINVAL;
293
294         /* Return 1 if the buffer is not busy and can hold new data */
295         if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
296                 return 1;
297
298         return 0;
299 }
300
301 static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
302                                    struct diag_dci_buffer_t *buf)
303 {
304         if (!buf || !client || !buf->data)
305                 return;
306
307         if (buf->in_list || buf->data_len == 0)
308                 return;
309
310         mutex_lock(&client->write_buf_mutex);
311         list_add_tail(&buf->buf_track, &client->list_write_buf);
312         /*
313          * In the case of DCI, there can be multiple packets in one read. To
314          * calculate the wakeup source reference count, we must account for each
315          * packet in a single read.
316          */
317         diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
318         mutex_lock(&buf->data_mutex);
319         buf->in_busy = 1;
320         buf->in_list = 1;
321         mutex_unlock(&buf->data_mutex);
322         mutex_unlock(&client->write_buf_mutex);
323 }
324
325 static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
326                                int data_source, int len)
327 {
328         struct diag_dci_buffer_t *buf_primary = NULL;
329         struct diag_dci_buffer_t *buf_temp = NULL;
330         struct diag_dci_buffer_t *curr = NULL;
331
332         if (!client)
333                 return -EINVAL;
334         if (len < 0 || len > IN_BUF_SIZE)
335                 return -EINVAL;
336
337         curr = client->buffers[data_source].buf_curr;
338         buf_primary = client->buffers[data_source].buf_primary;
339
340         if (curr && diag_dci_check_buffer(curr, len) == 1)
341                 return 0;
342
343         dci_add_buffer_to_list(client, curr);
344         client->buffers[data_source].buf_curr = NULL;
345
346         if (diag_dci_check_buffer(buf_primary, len) == 1) {
347                 client->buffers[data_source].buf_curr = buf_primary;
348                 return 0;
349         }
350
351         buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
352         if (!buf_temp)
353                 return -EIO;
354
355         if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
356                 buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
357                                                POOL_TYPE_DCI);
358                 if (!buf_temp->data) {
359                         kfree(buf_temp);
360                         buf_temp = NULL;
361                         return -ENOMEM;
362                 }
363                 client->buffers[data_source].buf_curr = buf_temp;
364                 return 0;
365         }
366
367         kfree(buf_temp);
368         buf_temp = NULL;
369         return -EIO;
370 }
371
372 void diag_dci_wakeup_clients()
373 {
374         struct list_head *start, *temp;
375         struct diag_dci_client_tbl *entry = NULL;
376
377         mutex_lock(&driver->dci_mutex);
378         list_for_each_safe(start, temp, &driver->dci_client_list) {
379                 entry = list_entry(start, struct diag_dci_client_tbl, track);
380
381                 /*
382                  * Don't wake up the client when there is no pending buffer to
383                  * write or when it is writing to user space
384                  */
385                 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
386                         mutex_lock(&entry->write_buf_mutex);
387                         entry->in_service = 1;
388                         mutex_unlock(&entry->write_buf_mutex);
389                         diag_update_sleeping_process(entry->client->tgid,
390                                                      DCI_DATA_TYPE);
391                 }
392         }
393         mutex_unlock(&driver->dci_mutex);
394 }
395
396 void dci_data_drain_work_fn(struct work_struct *work)
397 {
398         int i;
399         struct list_head *start, *temp;
400         struct diag_dci_client_tbl *entry = NULL;
401         struct diag_dci_buf_peripheral_t *proc_buf = NULL;
402         struct diag_dci_buffer_t *buf_temp = NULL;
403
404         mutex_lock(&driver->dci_mutex);
405         list_for_each_safe(start, temp, &driver->dci_client_list) {
406                 entry = list_entry(start, struct diag_dci_client_tbl, track);
407                 for (i = 0; i < entry->num_buffers; i++) {
408                         proc_buf = &entry->buffers[i];
409
410                         mutex_lock(&proc_buf->buf_mutex);
411                         buf_temp = proc_buf->buf_primary;
412                         if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
413                                 dci_add_buffer_to_list(entry, buf_temp);
414
415                         buf_temp = proc_buf->buf_cmd;
416                         if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
417                                 dci_add_buffer_to_list(entry, buf_temp);
418
419                         buf_temp = proc_buf->buf_curr;
420                         if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
421                                 dci_add_buffer_to_list(entry, buf_temp);
422                                 proc_buf->buf_curr = NULL;
423                         }
424                         mutex_unlock(&proc_buf->buf_mutex);
425                 }
426                 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
427                         mutex_lock(&entry->write_buf_mutex);
428                         entry->in_service = 1;
429                         mutex_unlock(&entry->write_buf_mutex);
430                         diag_update_sleeping_process(entry->client->tgid,
431                                                      DCI_DATA_TYPE);
432                 }
433         }
434         mutex_unlock(&driver->dci_mutex);
435         dci_timer_in_progress = 0;
436 }
437
438 static int diag_process_single_dci_pkt(unsigned char *buf, int len,
439                                        int data_source, int token)
440 {
441         uint8_t cmd_code = 0;
442
443         if (!buf || len < 0) {
444                 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
445                         __func__, buf, len);
446                 return -EIO;
447         }
448
449         cmd_code = *(uint8_t *)buf;
450
451         switch (cmd_code) {
452         case LOG_CMD_CODE:
453                 extract_dci_log(buf, len, data_source, token, NULL);
454                 break;
455         case EVENT_CMD_CODE:
456                 extract_dci_events(buf, len, data_source, token, NULL);
457                 break;
458         case EXT_HDR_CMD_CODE:
459                 extract_dci_ext_pkt(buf, len, data_source, token);
460                 break;
461         case DCI_PKT_RSP_CODE:
462         case DCI_DELAYED_RSP_CODE:
463                 extract_dci_pkt_rsp(buf, len, data_source, token);
464                 break;
465         case DCI_CONTROL_PKT_CODE:
466                 extract_dci_ctrl_pkt(buf, len, token);
467                 break;
468         default:
469                 pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
470                         cmd_code, data_source);
471                 return -EINVAL;
472         }
473
474         return 0;
475 }
476
477 /* Process the data read from apps userspace client */
478 void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
479 {
480         int err = 0;
481
482         if (!buf) {
483                 pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
484                 return;
485         }
486
487         if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
488                                                 && data_type != DCI_PKT_TYPE) {
489                 pr_err("diag: In %s, unsupported data_type: 0x%x\n",
490                                 __func__, (unsigned int)data_type);
491                 return;
492         }
493
494         err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
495                                           DCI_LOCAL_PROC);
496         if (err)
497                 return;
498
499         /* wake up all sleeping DCI clients which have some data */
500         diag_dci_wakeup_clients();
501         dci_check_drain_timer();
502 }
503
504 void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
505 {
506         int read_bytes = 0, err = 0;
507         uint16_t dci_pkt_len;
508         struct diag_dci_header_t *header = NULL;
509         int header_len = sizeof(struct diag_dci_header_t);
510         int token = BRIDGE_TO_TOKEN(index);
511
512         if (!buf)
513                 return;
514
515         diag_dci_record_traffic(recd_bytes, 0, 0, token);
516
517         if (!partial_pkt.processing)
518                 goto start;
519
520         if (partial_pkt.remaining > recd_bytes) {
521                 if ((partial_pkt.read_len + recd_bytes) >
522                                                         (MAX_DCI_PACKET_SZ)) {
523                         pr_err("diag: Invalid length %d, %d received in %s\n",
524                                partial_pkt.read_len, recd_bytes, __func__);
525                         goto end;
526                 }
527                 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
528                                                                 recd_bytes);
529                 read_bytes += recd_bytes;
530                 buf += read_bytes;
531                 partial_pkt.read_len += recd_bytes;
532                 partial_pkt.remaining -= recd_bytes;
533         } else {
534                 if ((partial_pkt.read_len + partial_pkt.remaining) >
535                                                         (MAX_DCI_PACKET_SZ)) {
536                         pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
537                                partial_pkt.read_len,
538                                partial_pkt.remaining, __func__);
539                         goto end;
540                 }
541                 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
542                                                 partial_pkt.remaining);
543                 read_bytes += partial_pkt.remaining;
544                 buf += read_bytes;
545                 partial_pkt.read_len += partial_pkt.remaining;
546                 partial_pkt.remaining = 0;
547         }
548
549         if (partial_pkt.remaining == 0) {
550                 /*
551                  * Retrieve from the DCI control packet after the header = start
552                  * (1 byte) + version (1 byte) + length (2 bytes)
553                  */
554                 diag_process_single_dci_pkt(partial_pkt.data + 4,
555                                 partial_pkt.read_len - header_len,
556                                 DCI_REMOTE_DATA, token);
557                 partial_pkt.read_len = 0;
558                 partial_pkt.total_len = 0;
559                 partial_pkt.processing = 0;
560                 goto start;
561         }
562         goto end;
563
564 start:
565         while (read_bytes < recd_bytes) {
566                 header = (struct diag_dci_header_t *)buf;
567                 dci_pkt_len = header->length;
568
569                 if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
570                         driver->num_dci_client == 0) {
571                         read_bytes += header_len + dci_pkt_len;
572                         buf += header_len + dci_pkt_len;
573                         continue;
574                 }
575
576                 if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
577                         pr_err("diag: Invalid length in the dci packet field %d\n",
578                                                                 dci_pkt_len);
579                         break;
580                 }
581
582                 if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
583                         partial_pkt.read_len = recd_bytes - read_bytes;
584                         partial_pkt.total_len = dci_pkt_len + header_len;
585                         partial_pkt.remaining = partial_pkt.total_len -
586                                                 partial_pkt.read_len;
587                         partial_pkt.processing = 1;
588                         memcpy(partial_pkt.data, buf, partial_pkt.read_len);
589                         break;
590                 }
591                 /*
592                  * Retrieve from the DCI control packet after the header = start
593                  * (1 byte) + version (1 byte) + length (2 bytes)
594                  */
595                 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
596                                                  DCI_REMOTE_DATA, DCI_MDM_PROC);
597                 if (err)
598                         break;
599                 read_bytes += header_len + dci_pkt_len;
600                 buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
601         }
602 end:
603         if (err)
604                 return;
605         /* wake up all sleeping DCI clients which have some data */
606         diag_dci_wakeup_clients();
607         dci_check_drain_timer();
608         return;
609 }
610
611 /* Process the data read from the peripheral dci channels */
612 void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
613                                       int recd_bytes)
614 {
615         int read_bytes = 0, err = 0;
616         uint16_t dci_pkt_len;
617         struct diag_dci_pkt_header_t *header = NULL;
618         uint8_t recv_pkt_cmd_code;
619
620         if (!buf || !p_info)
621                 return;
622
623         /*
624          * Release wakeup source when there are no more clients to
625          * process DCI data
626          */
627         if (driver->num_dci_client == 0) {
628                 diag_ws_reset(DIAG_WS_DCI);
629                 return;
630         }
631
632         diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
633                                 DCI_LOCAL_PROC);
634         while (read_bytes < recd_bytes) {
635                 header = (struct diag_dci_pkt_header_t *)buf;
636                 recv_pkt_cmd_code = header->pkt_code;
637                 dci_pkt_len = header->len;
638
639                 /*
640                  * Check if the length of the current packet is lesser than the
641                  * remaining bytes in the received buffer. This includes space
642                  * for the Start byte (1), Version byte (1), length bytes (2)
643                  * and End byte (1)
644                  */
645                 if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
646                         pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
647                                 __func__, recd_bytes, dci_pkt_len);
648                         diag_ws_release();
649                         return;
650                 }
651                 /*
652                  * Retrieve from the DCI control packet after the header = start
653                  * (1 byte) + version (1 byte) + length (2 bytes)
654                  */
655                 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
656                                                   (int)p_info->peripheral,
657                                                   DCI_LOCAL_PROC);
658                 if (err) {
659                         diag_ws_release();
660                         break;
661                 }
662                 read_bytes += 5 + dci_pkt_len;
663                 buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
664         }
665
666         if (err)
667                 return;
668         /* wake up all sleeping DCI clients which have some data */
669         diag_dci_wakeup_clients();
670         dci_check_drain_timer();
671         return;
672 }
673
674 int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
675                             uint16_t log_code)
676 {
677         uint16_t item_num;
678         uint8_t equip_id, *log_mask_ptr, byte_mask;
679         int byte_index, offset;
680
681         if (!entry) {
682                 pr_err("diag: In %s, invalid client entry\n", __func__);
683                 return 0;
684         }
685
686         equip_id = LOG_GET_EQUIP_ID(log_code);
687         item_num = LOG_GET_ITEM_NUM(log_code);
688         byte_index = item_num/8 + 2;
689         byte_mask = 0x01 << (item_num % 8);
690         offset = equip_id * 514;
691
692         if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
693                 pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
694                                 __func__, offset, log_code, byte_index);
695                 return 0;
696         }
697
698         log_mask_ptr = entry->dci_log_mask;
699         log_mask_ptr = log_mask_ptr + offset + byte_index;
700         return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
701
702 }
703
704 int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
705                               uint16_t event_id)
706 {
707         uint8_t *event_mask_ptr, byte_mask;
708         int byte_index, bit_index;
709
710         if (!entry) {
711                 pr_err("diag: In %s, invalid client entry\n", __func__);
712                 return 0;
713         }
714
715         byte_index = event_id/8;
716         bit_index = event_id % 8;
717         byte_mask = 0x1 << bit_index;
718
719         if (byte_index >= DCI_EVENT_MASK_SIZE) {
720                 pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
721                                 __func__, event_id, byte_index);
722                 return 0;
723         }
724
725         event_mask_ptr = entry->dci_event_mask;
726         event_mask_ptr = event_mask_ptr + byte_index;
727         return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
728 }
729
730 static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
731 {
732         if (!header)
733                 return -ENOMEM;
734
735         switch (header->cmd_code) {
736         case 0x7d: /* Msg Mask Configuration */
737         case 0x73: /* Log Mask Configuration */
738         case 0x81: /* Event Mask Configuration */
739         case 0x82: /* Event Mask Change */
740         case 0x60: /* Event Mask Toggle */
741                 return 1;
742         }
743
744         if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
745                 switch (header->subsys_cmd_code) {
746                 case 0x60: /* Extended Event Mask Config */
747                 case 0x61: /* Extended Msg Mask Config */
748                 case 0x62: /* Extended Log Mask Config */
749                 case 0x20C: /* Set current Preset ID */
750                 case 0x20D: /* Get current Preset ID */
751                 case 0x218: /* HDLC Disabled Command */
752                         return 1;
753                 }
754         }
755
756         return 0;
757 }
758
759 static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
760                                                                  int client_id)
761 {
762         struct dci_pkt_req_entry_t *entry = NULL;
763         entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
764         if (!entry)
765                 return NULL;
766
767         driver->dci_tag++;
768         entry->client_id = client_id;
769         entry->uid = uid;
770         entry->tag = driver->dci_tag;
771         pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
772                                 entry->client_id, entry->uid, entry->tag);
773         list_add_tail(&entry->track, &driver->dci_req_list);
774
775         return entry;
776 }
777
778 static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
779 {
780         struct list_head *start, *temp;
781         struct dci_pkt_req_entry_t *entry = NULL;
782         list_for_each_safe(start, temp, &driver->dci_req_list) {
783                 entry = list_entry(start, struct dci_pkt_req_entry_t, track);
784                 if (entry->tag == tag)
785                         return entry;
786         }
787         return NULL;
788 }
789
790 static int diag_dci_remove_req_entry(unsigned char *buf, int len,
791                                      struct dci_pkt_req_entry_t *entry)
792 {
793         uint16_t rsp_count = 0, delayed_rsp_id = 0;
794         if (!buf || len <= 0 || !entry) {
795                 pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
796                         __func__, buf, len, entry);
797                 return -EIO;
798         }
799
800         /* It is an immediate response, delete it from the table */
801         if (*buf != 0x80) {
802                 list_del(&entry->track);
803                 kfree(entry);
804                 entry = NULL;
805                 return 1;
806         }
807
808         /* It is a delayed response. Check if the length is valid */
809         if (len < MIN_DELAYED_RSP_LEN) {
810                 pr_err("diag: Invalid delayed rsp packet length %d\n", len);
811                 return -EINVAL;
812         }
813
814         /*
815          * If the delayed response id field (uint16_t at byte 8) is 0 then
816          * there is only one response and we can remove the request entry.
817          */
818         delayed_rsp_id = *(uint16_t *)(buf + 8);
819         if (delayed_rsp_id == 0) {
820                 list_del(&entry->track);
821                 kfree(entry);
822                 entry = NULL;
823                 return 1;
824         }
825
826         /*
827          * Check the response count field (uint16 at byte 10). The request
828          * entry can be deleted it it is the last response in the sequence.
829          * It is the last response in the sequence if the response count
830          * is 1 or if the signed bit gets dropped.
831          */
832         rsp_count = *(uint16_t *)(buf + 10);
833         if (rsp_count > 0 && rsp_count < 0x1000) {
834                 list_del(&entry->track);
835                 kfree(entry);
836                 entry = NULL;
837                 return 1;
838         }
839
840         return 0;
841 }
842
843 static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
844 {
845         struct diag_ctrl_dci_status *header = NULL;
846         unsigned char *temp = buf;
847         uint32_t read_len = 0;
848         uint8_t i;
849         int peripheral_mask, status;
850
851         if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
852                 pr_err("diag: In %s, invalid buf %pK or length: %d\n",
853                        __func__, buf, len);
854                 return;
855         }
856
857         if (!VALID_DCI_TOKEN(token)) {
858                 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
859                 return;
860         }
861
862         header = (struct diag_ctrl_dci_status *)temp;
863         temp += sizeof(struct diag_ctrl_dci_status);
864         read_len += sizeof(struct diag_ctrl_dci_status);
865
866         for (i = 0; i < header->count; i++) {
867                 if (read_len > (len - 2)) {
868                         pr_err("diag: In %s, Invalid length len: %d\n",
869                                __func__, len);
870                         return;
871                 }
872
873                 switch (*(uint8_t *)temp) {
874                 case PERIPHERAL_MODEM:
875                         peripheral_mask = DIAG_CON_MPSS;
876                         break;
877                 case PERIPHERAL_LPASS:
878                         peripheral_mask = DIAG_CON_LPASS;
879                         break;
880                 case PERIPHERAL_WCNSS:
881                         peripheral_mask = DIAG_CON_WCNSS;
882                         break;
883                 case PERIPHERAL_SENSORS:
884                         peripheral_mask = DIAG_CON_SENSORS;
885                         break;
886                 default:
887                         pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
888                                 __func__, *(uint8_t *)temp);
889                         return;
890                 }
891                 temp += sizeof(uint8_t);
892                 read_len += sizeof(uint8_t);
893
894                 status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
895                                                         DIAG_STATUS_CLOSED;
896                 temp += sizeof(uint8_t);
897                 read_len += sizeof(uint8_t);
898                 diag_dci_notify_client(peripheral_mask, status, token);
899         }
900 }
901
902 static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
903                                            int token)
904 {
905         struct diag_ctrl_dci_handshake_pkt *header = NULL;
906         unsigned char *temp = buf;
907         int err = 0;
908
909         if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
910                 return;
911
912         if (!VALID_DCI_TOKEN(token))
913                 return;
914
915         header = (struct diag_ctrl_dci_handshake_pkt *)temp;
916         if (header->magic == DCI_MAGIC) {
917                 dci_channel_status[token].open = 1;
918                 err = dci_ops_tbl[token].send_log_mask(token);
919                 if (err) {
920                         pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
921                                __func__, token, err);
922                 }
923                 err = dci_ops_tbl[token].send_event_mask(token);
924                 if (err) {
925                         pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
926                                __func__, token, err);
927                 }
928         }
929 }
930
931 void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
932 {
933         unsigned char *temp = buf;
934         uint32_t ctrl_pkt_id;
935
936         diag_ws_on_read(DIAG_WS_DCI, len);
937         if (!buf) {
938                 pr_err("diag: Invalid buffer in %s\n", __func__);
939                 goto err;
940         }
941
942         if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
943                 pr_err("diag: In %s, invalid length %d\n", __func__, len);
944                 goto err;
945         }
946
947         /* Skip the Control packet command code */
948         temp += sizeof(uint8_t);
949         len -= sizeof(uint8_t);
950         ctrl_pkt_id = *(uint32_t *)temp;
951         switch (ctrl_pkt_id) {
952         case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
953                 dci_process_ctrl_status(temp, len, token);
954                 break;
955         case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
956                 dci_process_ctrl_handshake_pkt(temp, len, token);
957                 break;
958         default:
959                 pr_debug("diag: In %s, unknown control pkt %d\n",
960                          __func__, ctrl_pkt_id);
961                 break;
962         }
963
964 err:
965         /*
966          * DCI control packets are not consumed by the clients. Mimic client
967          * consumption by setting and clearing the wakeup source copy_count
968          * explicitly.
969          */
970         diag_ws_on_copy_fail(DIAG_WS_DCI);
971 }
972
973 void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
974                          int token)
975 {
976         int tag;
977         struct diag_dci_client_tbl *entry = NULL;
978         void *temp_buf = NULL;
979         uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
980         uint32_t rsp_len = 0;
981         struct diag_dci_buffer_t *rsp_buf = NULL;
982         struct dci_pkt_req_entry_t *req_entry = NULL;
983         unsigned char *temp = buf;
984         int save_req_uid = 0;
985         struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
986
987         if (!buf || len <= 0) {
988                 pr_err("diag: Invalid pointer in %s\n", __func__);
989                 return;
990         }
991         dci_cmd_code = *(uint8_t *)(temp);
992         if (dci_cmd_code == DCI_PKT_RSP_CODE) {
993                 cmd_code_len = sizeof(uint8_t);
994         } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
995                 cmd_code_len = sizeof(uint32_t);
996         } else {
997                 pr_err("diag: In %s, invalid command code %d\n", __func__,
998                                                                 dci_cmd_code);
999                 return;
1000         }
1001         if (len < (cmd_code_len + sizeof(int)))
1002                 return;
1003         temp += cmd_code_len;
1004         tag = *(int *)temp;
1005         temp += sizeof(int);
1006
1007         /*
1008          * The size of the response is (total length) - (length of the command
1009          * code, the tag (int)
1010          */
1011         if (len >= cmd_code_len + sizeof(int)) {
1012                 rsp_len = len - (cmd_code_len + sizeof(int));
1013                 if ((rsp_len == 0) || (rsp_len > (len - 5))) {
1014                         pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d\n",
1015                                         __func__, len, rsp_len);
1016                         return;
1017                 }
1018         } else {
1019                 pr_err("diag:%s: Invalid length(%d) for calculating rsp_len\n",
1020                         __func__, len);
1021                 return;
1022         }
1023
1024         mutex_lock(&driver->dci_mutex);
1025         req_entry = diag_dci_get_request_entry(tag);
1026         if (!req_entry) {
1027                 pr_err_ratelimited("diag: No matching client for DCI data\n");
1028                 mutex_unlock(&driver->dci_mutex);
1029                 return;
1030         }
1031
1032         entry = diag_dci_get_client_entry(req_entry->client_id);
1033         if (!entry) {
1034                 pr_err("diag: In %s, couldn't find client entry, id:%d\n",
1035                                                 __func__, req_entry->client_id);
1036                 mutex_unlock(&driver->dci_mutex);
1037                 return;
1038         }
1039
1040         save_req_uid = req_entry->uid;
1041         /* Remove the headers and send only the response to this function */
1042         delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
1043         if (delete_flag < 0) {
1044                 mutex_unlock(&driver->dci_mutex);
1045                 return;
1046         }
1047
1048         mutex_lock(&entry->buffers[data_source].buf_mutex);
1049         rsp_buf = entry->buffers[data_source].buf_cmd;
1050
1051         mutex_lock(&rsp_buf->data_mutex);
1052         /*
1053          * Check if we can fit the data in the rsp buffer. The total length of
1054          * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
1055          * + field for length (int) + delete_flag (uint8_t)
1056          */
1057         if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
1058                 pr_alert("diag: create capacity for pkt rsp\n");
1059                 rsp_buf->capacity += 9 + rsp_len;
1060                 temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
1061                                     GFP_KERNEL);
1062                 if (!temp_buf) {
1063                         pr_err("diag: DCI realloc failed\n");
1064                         mutex_unlock(&rsp_buf->data_mutex);
1065                         mutex_unlock(&entry->buffers[data_source].buf_mutex);
1066                         mutex_unlock(&driver->dci_mutex);
1067                         return;
1068                 } else {
1069                         rsp_buf->data = temp_buf;
1070                 }
1071         }
1072
1073         /* Fill in packet response header information */
1074         pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
1075         /* Packet Length = Response Length + Length of uid field (int) */
1076         pkt_rsp_header.length = rsp_len + sizeof(int);
1077         pkt_rsp_header.delete_flag = delete_flag;
1078         pkt_rsp_header.uid = save_req_uid;
1079         memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
1080                 sizeof(struct diag_dci_pkt_rsp_header_t));
1081         rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
1082         memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
1083         rsp_buf->data_len += rsp_len;
1084         rsp_buf->data_source = data_source;
1085
1086         mutex_unlock(&rsp_buf->data_mutex);
1087
1088         /*
1089          * Add directly to the list for writing responses to the
1090          * userspace as these shouldn't be buffered and shouldn't wait
1091          * for log and event buffers to be full
1092          */
1093         dci_add_buffer_to_list(entry, rsp_buf);
1094         mutex_unlock(&entry->buffers[data_source].buf_mutex);
1095         mutex_unlock(&driver->dci_mutex);
1096 }
1097
1098 static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
1099 {
1100         if (!data_buffer) {
1101                 pr_err("diag: In %s, data buffer is NULL", __func__);
1102                 return;
1103         }
1104
1105         *(int *)(data_buffer->data + data_buffer->data_len) =
1106                         DCI_EXT_HDR_TYPE;
1107         data_buffer->data_len += sizeof(int);
1108         memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
1109                         EXT_HDR_LEN);
1110         data_buffer->data_len += EXT_HDR_LEN;
1111 }
1112
1113 static void copy_dci_event(unsigned char *buf, int len,
1114                         struct diag_dci_client_tbl *client, int data_source,
1115                         void *ext_hdr)
1116 {
1117         struct diag_dci_buffer_t *data_buffer = NULL;
1118         struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1119         int err = 0, total_len = 0;
1120
1121         if (!buf || !client) {
1122                 pr_err("diag: Invalid pointers in %s", __func__);
1123                 return;
1124         }
1125
1126         total_len = sizeof(int) + len;
1127         if (ext_hdr)
1128                 total_len += sizeof(int) + EXT_HDR_LEN;
1129
1130         proc_buf = &client->buffers[data_source];
1131         mutex_lock(&proc_buf->buf_mutex);
1132         mutex_lock(&proc_buf->health_mutex);
1133         err = diag_dci_get_buffer(client, data_source, total_len);
1134         if (err) {
1135                 if (err == -ENOMEM)
1136                         proc_buf->health.dropped_events++;
1137                 else
1138                         pr_err("diag: In %s, invalid packet\n", __func__);
1139                 mutex_unlock(&proc_buf->health_mutex);
1140                 mutex_unlock(&proc_buf->buf_mutex);
1141                 return;
1142         }
1143
1144         data_buffer = proc_buf->buf_curr;
1145
1146         proc_buf->health.received_events++;
1147         mutex_unlock(&proc_buf->health_mutex);
1148         mutex_unlock(&proc_buf->buf_mutex);
1149
1150         mutex_lock(&data_buffer->data_mutex);
1151         if (ext_hdr)
1152                 copy_ext_hdr(data_buffer, ext_hdr);
1153
1154         *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
1155         data_buffer->data_len += sizeof(int);
1156         memcpy(data_buffer->data + data_buffer->data_len, buf, len);
1157         data_buffer->data_len += len;
1158         data_buffer->data_source = data_source;
1159         mutex_unlock(&data_buffer->data_mutex);
1160
1161 }
1162
1163 void extract_dci_events(unsigned char *buf, int len, int data_source,
1164                 int token, void *ext_hdr)
1165 {
1166         uint16_t event_id, event_id_packet, length, temp_len;
1167         uint8_t payload_len, payload_len_field;
1168         uint8_t timestamp[8] = {0}, timestamp_len;
1169         unsigned char event_data[MAX_EVENT_SIZE];
1170         unsigned int total_event_len;
1171         struct list_head *start, *temp;
1172         struct diag_dci_client_tbl *entry = NULL;
1173
1174         if (!buf) {
1175                 pr_err("diag: In %s buffer is NULL\n", __func__);
1176                 return;
1177         }
1178         /*
1179          * 1 byte for event code and 2 bytes for the length field.
1180          * The length field indicates the total length removing the cmd_code
1181          * and the lenght field. The event parsing in that case should happen
1182          * till the end.
1183          */
1184         if (len < 3) {
1185                 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1186                 return;
1187         }
1188         length = *(uint16_t *)(buf + 1); /* total length of event series */
1189         if ((length == 0) || (len != (length + 3))) {
1190                 pr_err("diag: Incoming dci event length: %d is invalid\n",
1191                         length);
1192                 return;
1193         }
1194         /*
1195          * Move directly to the start of the event series.
1196          * The event parsing should happen from start of event
1197          * series till the end.
1198          */
1199         temp_len = 3;
1200         while (temp_len < length) {
1201                 event_id_packet = *(uint16_t *)(buf + temp_len);
1202                 event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
1203                 if (event_id_packet & 0x8000) {
1204                         /* The packet has the two smallest byte of the
1205                          * timestamp
1206                          */
1207                         timestamp_len = 2;
1208                 } else {
1209                         /* The packet has the full timestamp. The first event
1210                          * will always have full timestamp. Save it in the
1211                          * timestamp buffer and use it for subsequent events if
1212                          * necessary.
1213                          */
1214                         timestamp_len = 8;
1215                         if ((temp_len + timestamp_len + 2) <= len)
1216                                 memcpy(timestamp, buf + temp_len + 2,
1217                                         timestamp_len);
1218                         else {
1219                                 pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
1220                                                 __func__, len, temp_len);
1221                                 return;
1222                         }
1223                 }
1224                 /* 13th and 14th bit represent the payload length */
1225                 if (((event_id_packet & 0x6000) >> 13) == 3) {
1226                         payload_len_field = 1;
1227                         if ((temp_len + timestamp_len + 3) <= len) {
1228                                 payload_len = *(uint8_t *)
1229                                         (buf + temp_len + 2 + timestamp_len);
1230                         } else {
1231                                 pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
1232                                                 __func__, len, temp_len);
1233                                 return;
1234                         }
1235                         if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
1236                         ((temp_len + timestamp_len + payload_len + 3) <= len)) {
1237                                 /*
1238                                  * Copy the payload length and the payload
1239                                  * after skipping temp_len bytes for already
1240                                  * parsed packet, timestamp_len for timestamp
1241                                  * buffer, 2 bytes for event_id_packet.
1242                                  */
1243                                 memcpy(event_data + 12, buf + temp_len + 2 +
1244                                                         timestamp_len, 1);
1245                                 memcpy(event_data + 13, buf + temp_len + 2 +
1246                                         timestamp_len + 1, payload_len);
1247                         } else {
1248                                 pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
1249                                 (MAX_EVENT_SIZE - 13), payload_len, temp_len);
1250                                 return;
1251                         }
1252                 } else {
1253                         payload_len_field = 0;
1254                         payload_len = (event_id_packet & 0x6000) >> 13;
1255                         /*
1256                          * Copy the payload after skipping temp_len bytes
1257                          * for already parsed packet, timestamp_len for
1258                          * timestamp buffer, 2 bytes for event_id_packet.
1259                          */
1260                         if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
1261                         ((temp_len + timestamp_len + payload_len + 2) <= len))
1262                                 memcpy(event_data + 12, buf + temp_len + 2 +
1263                                                 timestamp_len, payload_len);
1264                         else {
1265                                 pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
1266                                 (MAX_EVENT_SIZE - 12), payload_len, temp_len);
1267                                 return;
1268                         }
1269                 }
1270
1271                 /* Before copying the data to userspace, check if we are still
1272                  * within the buffer limit. This is an error case, don't count
1273                  * it towards the health statistics.
1274                  *
1275                  * Here, the offset of 2 bytes(uint16_t) is for the
1276                  * event_id_packet length
1277                  */
1278                 temp_len += sizeof(uint16_t) + timestamp_len +
1279                                                 payload_len_field + payload_len;
1280                 if (temp_len > len) {
1281                         pr_err("diag: Invalid length in %s, len: %d, read: %d",
1282                                                 __func__, len, temp_len);
1283                         return;
1284                 }
1285
1286                 /* 2 bytes for the event id & timestamp len is hard coded to 8,
1287                    as individual events have full timestamp */
1288                 *(uint16_t *)(event_data) = 10 +
1289                                         payload_len_field + payload_len;
1290                 *(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
1291                 memcpy(event_data + 4, timestamp, 8);
1292                 /* 2 bytes for the event length field which is added to
1293                    the event data */
1294                 total_event_len = 2 + 10 + payload_len_field + payload_len;
1295                 /* parse through event mask tbl of each client and check mask */
1296                 mutex_lock(&driver->dci_mutex);
1297                 list_for_each_safe(start, temp, &driver->dci_client_list) {
1298                         entry = list_entry(start, struct diag_dci_client_tbl,
1299                                                                         track);
1300                         if (entry->client_info.token != token)
1301                                 continue;
1302                         if (diag_dci_query_event_mask(entry, event_id)) {
1303                                 /* copy to client buffer */
1304                                 copy_dci_event(event_data, total_event_len,
1305                                                entry, data_source, ext_hdr);
1306                         }
1307                 }
1308                 mutex_unlock(&driver->dci_mutex);
1309         }
1310 }
1311
1312 static void copy_dci_log(unsigned char *buf, int len,
1313                          struct diag_dci_client_tbl *client, int data_source,
1314                          void *ext_hdr)
1315 {
1316         uint16_t log_length = 0;
1317         struct diag_dci_buffer_t *data_buffer = NULL;
1318         struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1319         int err = 0, total_len = 0;
1320
1321         if (!buf || !client) {
1322                 pr_err("diag: Invalid pointers in %s", __func__);
1323                 return;
1324         }
1325
1326         log_length = *(uint16_t *)(buf + 2);
1327         if (log_length > USHRT_MAX - 4) {
1328                 pr_err("diag: Integer overflow in %s, log_len: %d",
1329                                 __func__, log_length);
1330                 return;
1331         }
1332         total_len = sizeof(int) + log_length;
1333         if (ext_hdr)
1334                 total_len += sizeof(int) + EXT_HDR_LEN;
1335
1336         /* Check if we are within the len. The check should include the
1337          * first 4 bytes for the Log code(2) and the length bytes (2)
1338          */
1339         if ((log_length + sizeof(uint16_t) + 2) > len) {
1340                 pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
1341                                                 __func__, log_length, len);
1342                 return;
1343         }
1344
1345         proc_buf = &client->buffers[data_source];
1346         mutex_lock(&proc_buf->buf_mutex);
1347         mutex_lock(&proc_buf->health_mutex);
1348         err = diag_dci_get_buffer(client, data_source, total_len);
1349         if (err) {
1350                 if (err == -ENOMEM)
1351                         proc_buf->health.dropped_logs++;
1352                 else
1353                         pr_err("diag: In %s, invalid packet\n", __func__);
1354                 mutex_unlock(&proc_buf->health_mutex);
1355                 mutex_unlock(&proc_buf->buf_mutex);
1356                 return;
1357         }
1358
1359         data_buffer = proc_buf->buf_curr;
1360         proc_buf->health.received_logs++;
1361         mutex_unlock(&proc_buf->health_mutex);
1362         mutex_unlock(&proc_buf->buf_mutex);
1363
1364         mutex_lock(&data_buffer->data_mutex);
1365         if (!data_buffer->data) {
1366                 mutex_unlock(&data_buffer->data_mutex);
1367                 return;
1368         }
1369         if (ext_hdr)
1370                 copy_ext_hdr(data_buffer, ext_hdr);
1371
1372         *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
1373         data_buffer->data_len += sizeof(int);
1374         memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
1375                log_length);
1376         data_buffer->data_len += log_length;
1377         data_buffer->data_source = data_source;
1378         mutex_unlock(&data_buffer->data_mutex);
1379 }
1380
1381 void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
1382                         void *ext_hdr)
1383 {
1384         uint16_t log_code, read_bytes = 0;
1385         struct list_head *start, *temp;
1386         struct diag_dci_client_tbl *entry = NULL;
1387
1388         if (!buf) {
1389                 pr_err("diag: In %s buffer is NULL\n", __func__);
1390                 return;
1391         }
1392         /*
1393          * The first eight bytes for the incoming log packet contains
1394          * Command code (2), the length of the packet (2), the length
1395          * of the log (2) and log code (2)
1396          */
1397         if (len < 8) {
1398                 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1399                 return;
1400         }
1401
1402         log_code = *(uint16_t *)(buf + 6);
1403         read_bytes += sizeof(uint16_t) + 6;
1404
1405         /* parse through log mask table of each client and check mask */
1406         mutex_lock(&driver->dci_mutex);
1407         list_for_each_safe(start, temp, &driver->dci_client_list) {
1408                 entry = list_entry(start, struct diag_dci_client_tbl, track);
1409                 if (entry->client_info.token != token)
1410                         continue;
1411                 if (diag_dci_query_log_mask(entry, log_code)) {
1412                         pr_debug("\t log code %x needed by client %d",
1413                                  log_code, entry->client->tgid);
1414                         /* copy to client buffer */
1415                         copy_dci_log(buf, len, entry, data_source, ext_hdr);
1416                 }
1417         }
1418         mutex_unlock(&driver->dci_mutex);
1419 }
1420
1421 void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
1422                 int token)
1423 {
1424         uint8_t version, pkt_cmd_code = 0;
1425         unsigned char *pkt = NULL;
1426
1427         if (!buf) {
1428                 pr_err("diag: In %s buffer is NULL\n", __func__);
1429                 return;
1430         }
1431         if (len < (EXT_HDR_LEN + sizeof(uint8_t))) {
1432                 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1433                 return;
1434         }
1435
1436         version = *(uint8_t *)buf + 1;
1437         if (version < EXT_HDR_VERSION)  {
1438                 pr_err("diag: %s, Extended header with invalid version: %d\n",
1439                         __func__, version);
1440                 return;
1441         }
1442
1443         pkt = buf + EXT_HDR_LEN;
1444         pkt_cmd_code = *(uint8_t *)pkt;
1445         len -= EXT_HDR_LEN;
1446
1447         switch (pkt_cmd_code) {
1448         case LOG_CMD_CODE:
1449                 extract_dci_log(pkt, len, data_source, token, buf);
1450                 break;
1451         case EVENT_CMD_CODE:
1452                 extract_dci_events(pkt, len, data_source, token, buf);
1453                 break;
1454         default:
1455                 pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
1456                         __func__, pkt_cmd_code, data_source);
1457                 return;
1458         }
1459 }
1460
1461 void diag_dci_channel_open_work(struct work_struct *work)
1462 {
1463         int i, j;
1464         char dirty_bits[16];
1465         uint8_t *client_log_mask_ptr;
1466         uint8_t *log_mask_ptr;
1467         int ret;
1468         struct list_head *start, *temp;
1469         struct diag_dci_client_tbl *entry = NULL;
1470
1471         /* Update apps and peripheral(s) with the dci log and event masks */
1472         memset(dirty_bits, 0, 16 * sizeof(uint8_t));
1473
1474         /*
1475          * From each log entry used by each client, determine
1476          * which log entries in the cumulative logs that need
1477          * to be updated on the peripheral.
1478          */
1479         mutex_lock(&driver->dci_mutex);
1480         list_for_each_safe(start, temp, &driver->dci_client_list) {
1481                 entry = list_entry(start, struct diag_dci_client_tbl, track);
1482                 if (entry->client_info.token != DCI_LOCAL_PROC)
1483                         continue;
1484                 client_log_mask_ptr = entry->dci_log_mask;
1485                 for (j = 0; j < 16; j++) {
1486                         if (*(client_log_mask_ptr+1))
1487                                 dirty_bits[j] = 1;
1488                         client_log_mask_ptr += 514;
1489                 }
1490         }
1491         mutex_unlock(&driver->dci_mutex);
1492
1493         mutex_lock(&dci_log_mask_mutex);
1494         /* Update the appropriate dirty bits in the cumulative mask */
1495         log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
1496         for (i = 0; i < 16; i++) {
1497                 if (dirty_bits[i])
1498                         *(log_mask_ptr+1) = dirty_bits[i];
1499
1500                 log_mask_ptr += 514;
1501         }
1502         mutex_unlock(&dci_log_mask_mutex);
1503
1504         /* Send updated mask to userspace clients */
1505         diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
1506         /* Send updated log mask to peripherals */
1507         ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
1508
1509         /* Send updated event mask to userspace clients */
1510         diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
1511         /* Send updated event mask to peripheral */
1512         ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
1513 }
1514
1515 void diag_dci_notify_client(int peripheral_mask, int data, int proc)
1516 {
1517         int stat = 0;
1518         struct siginfo info;
1519         struct list_head *start, *temp;
1520         struct diag_dci_client_tbl *entry = NULL;
1521         struct pid *pid_struct = NULL;
1522         struct task_struct *dci_task = NULL;
1523
1524         memset(&info, 0, sizeof(struct siginfo));
1525         info.si_code = SI_QUEUE;
1526         info.si_int = (peripheral_mask | data);
1527         if (data == DIAG_STATUS_OPEN)
1528                 dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
1529         else
1530                 dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
1531
1532         /* Notify the DCI process that the peripheral DCI Channel is up */
1533         mutex_lock(&driver->dci_mutex);
1534         list_for_each_safe(start, temp, &driver->dci_client_list) {
1535                 entry = list_entry(start, struct diag_dci_client_tbl, track);
1536                 if (entry->client_info.token != proc)
1537                         continue;
1538                 if (entry->client_info.notification_list & peripheral_mask) {
1539                         info.si_signo = entry->client_info.signal_type;
1540                         pid_struct = find_get_pid(entry->tgid);
1541                         if (pid_struct) {
1542                                 dci_task = get_pid_task(pid_struct,
1543                                                 PIDTYPE_PID);
1544                                 if (!dci_task) {
1545                                         DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1546                                                 "diag: dci client with pid = %d Exited..\n",
1547                                                 entry->tgid);
1548                                         put_pid(pid_struct);
1549                                         mutex_unlock(&driver->dci_mutex);
1550                                         return;
1551                                 }
1552                                 if (entry->client &&
1553                                         entry->tgid == dci_task->tgid) {
1554                                         DIAG_LOG(DIAG_DEBUG_DCI,
1555                                                 "entry tgid = %d, dci client tgid = %d\n",
1556                                                 entry->tgid, dci_task->tgid);
1557                                         stat = send_sig_info(
1558                                                 entry->client_info.signal_type,
1559                                                 &info, dci_task);
1560                                         if (stat)
1561                                                 pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
1562                                                         info.si_int, stat);
1563                                 } else {
1564                                         pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
1565                                                 info.si_int, stat);
1566                                 }
1567                                 put_task_struct(dci_task);
1568                                 put_pid(pid_struct);
1569                         }
1570                 }
1571         }
1572         mutex_unlock(&driver->dci_mutex);
1573 }
1574
1575 static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
1576                              unsigned char *buf, int len, int tag)
1577 {
1578         int i, status = DIAG_DCI_NO_ERROR;
1579         uint32_t write_len = 0;
1580         struct diag_dci_pkt_header_t header;
1581
1582         if (!entry)
1583                 return -EIO;
1584
1585         if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
1586                 pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
1587                        __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
1588                 return -EIO;
1589         }
1590
1591         if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
1592                 pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
1593                        __func__, len, DIAG_MAX_REQ_SIZE);
1594                 return -EIO;
1595         }
1596
1597         mutex_lock(&driver->dci_mutex);
1598         /* prepare DCI packet */
1599         header.start = CONTROL_CHAR;
1600         header.version = 1;
1601         header.len = len + sizeof(int) + sizeof(uint8_t);
1602         header.pkt_code = DCI_PKT_RSP_CODE;
1603         header.tag = tag;
1604         memcpy(driver->apps_dci_buf, &header, sizeof(header));
1605         write_len += sizeof(header);
1606         memcpy(driver->apps_dci_buf + write_len , buf, len);
1607         write_len += len;
1608         *(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
1609         write_len += sizeof(uint8_t);
1610
1611         /* This command is registered locally on the Apps */
1612         if (entry->proc == APPS_DATA) {
1613                 diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
1614                                        DCI_PKT_TYPE);
1615                 diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
1616                 mutex_unlock(&driver->dci_mutex);
1617                 return DIAG_DCI_NO_ERROR;
1618         }
1619
1620         for (i = 0; i < NUM_PERIPHERALS; i++)
1621                 if (entry->proc == i) {
1622                         status = 1;
1623                         break;
1624                 }
1625
1626         if (status) {
1627                 status = diag_dci_write_proc(entry->proc,
1628                                              DIAG_DATA_TYPE,
1629                                              driver->apps_dci_buf,
1630                                              write_len);
1631         } else {
1632                 pr_err("diag: Cannot send packet to peripheral %d",
1633                        entry->proc);
1634                 status = DIAG_DCI_SEND_DATA_FAIL;
1635         }
1636         mutex_unlock(&driver->dci_mutex);
1637         return status;
1638 }
1639
1640 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1641 unsigned char *dci_get_buffer_from_bridge(int token)
1642 {
1643         uint8_t retries = 0, max_retries = 3;
1644         unsigned char *buf = NULL;
1645
1646         do {
1647                 buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
1648                                     dci_ops_tbl[token].mempool);
1649                 if (!buf) {
1650                         usleep_range(5000, 5100);
1651                         retries++;
1652                 } else
1653                         break;
1654         } while (retries < max_retries);
1655
1656         return buf;
1657 }
1658
1659 int diag_dci_write_bridge(int token, unsigned char *buf, int len)
1660 {
1661         return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
1662 }
1663
1664 int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
1665 {
1666         int token = BRIDGE_TO_TOKEN(index);
1667         if (!VALID_DCI_TOKEN(token)) {
1668                 pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
1669                 return -EINVAL;
1670         }
1671         diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1672         return 0;
1673 }
1674 #endif
1675
1676 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1677 static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1678                                     int token)
1679 {
1680         unsigned char *buf = NULL;
1681         struct diag_dci_header_t dci_header;
1682         int dci_header_size = sizeof(struct diag_dci_header_t);
1683         int ret = DIAG_DCI_NO_ERROR;
1684         uint32_t write_len = 0;
1685
1686         if (!data)
1687                 return -EIO;
1688
1689         buf = dci_get_buffer_from_bridge(token);
1690         if (!buf) {
1691                 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1692                         __func__);
1693                 return -EAGAIN;
1694         }
1695
1696         dci_header.start = CONTROL_CHAR;
1697         dci_header.version = 1;
1698         /*
1699          * The Length of the DCI packet = length of the command + tag (int) +
1700          * the command code size (uint8_t)
1701          */
1702         dci_header.length = len + sizeof(int) + sizeof(uint8_t);
1703         dci_header.cmd_code = DCI_PKT_RSP_CODE;
1704
1705         memcpy(buf + write_len, &dci_header, dci_header_size);
1706         write_len += dci_header_size;
1707         *(int *)(buf + write_len) = tag;
1708         write_len += sizeof(int);
1709         memcpy(buf + write_len, data, len);
1710         write_len += len;
1711         *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
1712         write_len += sizeof(uint8_t);
1713
1714         ret = diag_dci_write_bridge(token, buf, write_len);
1715         if (ret) {
1716                 pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
1717                         token, ret);
1718                 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1719         } else {
1720                 ret = DIAG_DCI_NO_ERROR;
1721         }
1722
1723         return ret;
1724 }
1725 #else
1726 static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1727                                     int token)
1728 {
1729         return DIAG_DCI_NO_ERROR;
1730 }
1731 #endif
1732
1733 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1734 int diag_dci_send_handshake_pkt(int index)
1735 {
1736         int err = 0;
1737         int token = BRIDGE_TO_TOKEN(index);
1738         int write_len = 0;
1739         struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
1740         unsigned char *buf = NULL;
1741         struct diag_dci_header_t dci_header;
1742
1743         if (!VALID_DCI_TOKEN(token)) {
1744                 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
1745                 return -EINVAL;
1746         }
1747
1748         buf = dci_get_buffer_from_bridge(token);
1749         if (!buf) {
1750                 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1751                         __func__);
1752                 return -EAGAIN;
1753         }
1754
1755         dci_header.start = CONTROL_CHAR;
1756         dci_header.version = 1;
1757         /* Include the cmd code (uint8_t) in the length */
1758         dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
1759         dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
1760         memcpy(buf, &dci_header, sizeof(dci_header));
1761         write_len += sizeof(dci_header);
1762
1763         ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
1764         /*
1765          *  The control packet data length accounts for the version (uint32_t)
1766          *  of the packet and the magic number (uint32_t).
1767          */
1768         ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
1769         ctrl_pkt.version = 1;
1770         ctrl_pkt.magic = DCI_MAGIC;
1771         memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
1772         write_len += sizeof(ctrl_pkt);
1773
1774         *(uint8_t *)(buf + write_len) = CONTROL_CHAR;
1775         write_len += sizeof(uint8_t);
1776
1777         err = diag_dci_write_bridge(token, buf, write_len);
1778         if (err) {
1779                 pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
1780                        token, err);
1781                 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1782                 return err;
1783         }
1784
1785         mod_timer(&(dci_channel_status[token].wait_time),
1786                   jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
1787
1788         return 0;
1789 }
1790 #else
1791 int diag_dci_send_handshake_pkt(int index)
1792 {
1793         return 0;
1794 }
1795 #endif
1796
1797 static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
1798                                      unsigned char *req_buf, int req_len,
1799                                      int tag)
1800 {
1801         uint8_t cmd_code, subsys_id, i, goto_download = 0;
1802         uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
1803         uint16_t ss_cmd_code;
1804         uint32_t write_len = 0;
1805         unsigned char *dest_buf = driver->apps_dci_buf;
1806         unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
1807         struct diag_dci_pkt_header_t dci_header;
1808
1809         if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
1810                 return -EIO;
1811
1812         cmd_code = pkt_header->cmd_code;
1813         subsys_id = pkt_header->subsys_id;
1814         ss_cmd_code = pkt_header->subsys_cmd_code;
1815
1816         if (cmd_code == DIAG_CMD_DOWNLOAD) {
1817                 *payload_ptr = DIAG_CMD_DOWNLOAD;
1818                 write_len = sizeof(uint8_t);
1819                 goto_download = 1;
1820                 goto fill_buffer;
1821         } else if (cmd_code == DIAG_CMD_VERSION) {
1822                 if (chk_polling_response()) {
1823                         for (i = 0; i < 55; i++, write_len++, payload_ptr++)
1824                                 *(payload_ptr) = 0;
1825                         goto fill_buffer;
1826                 }
1827         } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
1828                 if (chk_polling_response()) {
1829                         *payload_ptr = DIAG_CMD_EXT_BUILD;
1830                         write_len = sizeof(uint8_t);
1831                         payload_ptr += sizeof(uint8_t);
1832                         for (i = 0; i < 8; i++, write_len++, payload_ptr++)
1833                                 *(payload_ptr) = 0;
1834                         *(int *)(payload_ptr) = chk_config_get_id();
1835                         write_len += sizeof(int);
1836                         goto fill_buffer;
1837                 }
1838         } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
1839                 write_len = diag_cmd_log_on_demand(req_buf, req_len,
1840                                                    payload_ptr,
1841                                                    APPS_BUF_SIZE - header_len);
1842                 goto fill_buffer;
1843         } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
1844                 return DIAG_DCI_TABLE_ERR;
1845         }
1846
1847         if (subsys_id == DIAG_SS_DIAG) {
1848                 if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
1849                         memcpy(payload_ptr, pkt_header,
1850                                         sizeof(struct diag_pkt_header_t));
1851                         write_len = sizeof(struct diag_pkt_header_t);
1852                         *(uint32_t *)(payload_ptr + write_len) =
1853                                                         DIAG_MAX_REQ_SIZE;
1854                         write_len += sizeof(uint32_t);
1855                 } else if (ss_cmd_code == DIAG_DIAG_STM) {
1856                         write_len = diag_process_stm_cmd(req_buf, payload_ptr);
1857                 }
1858         } else if (subsys_id == DIAG_SS_PARAMS) {
1859                 if (ss_cmd_code == DIAG_DIAG_POLL) {
1860                         if (chk_polling_response()) {
1861                                 memcpy(payload_ptr, pkt_header,
1862                                         sizeof(struct diag_pkt_header_t));
1863                                 write_len = sizeof(struct diag_pkt_header_t);
1864                                 payload_ptr += write_len;
1865                                 for (i = 0; i < 12; i++, write_len++) {
1866                                         *(payload_ptr) = 0;
1867                                         payload_ptr++;
1868                                 }
1869                         }
1870                 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
1871                         memcpy(payload_ptr, pkt_header,
1872                                         sizeof(struct diag_pkt_header_t));
1873                         write_len = sizeof(struct diag_pkt_header_t);
1874                         *(int *)(payload_ptr + write_len) = wrap_enabled;
1875                         write_len += sizeof(int);
1876                 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
1877                         wrap_enabled = true;
1878                         memcpy(payload_ptr, pkt_header,
1879                                         sizeof(struct diag_pkt_header_t));
1880                         write_len = sizeof(struct diag_pkt_header_t);
1881                         *(uint16_t *)(payload_ptr + write_len) = wrap_count;
1882                         write_len += sizeof(uint16_t);
1883                 } else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
1884                         write_len = diag_cmd_get_mobile_id(req_buf, req_len,
1885                                                    payload_ptr,
1886                                                    APPS_BUF_SIZE - header_len);
1887                 }
1888         }
1889
1890 fill_buffer:
1891         if (write_len > 0) {
1892                 /* Check if we are within the range of the buffer*/
1893                 if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
1894                         pr_err("diag: In %s, invalid length %d\n", __func__,
1895                                                 write_len + header_len);
1896                         return -ENOMEM;
1897                 }
1898                 dci_header.start = CONTROL_CHAR;
1899                 dci_header.version = 1;
1900                 /*
1901                  * Length of the rsp pkt = actual data len + pkt rsp code
1902                  * (uint8_t) + tag (int)
1903                  */
1904                 dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
1905                 dci_header.pkt_code = DCI_PKT_RSP_CODE;
1906                 dci_header.tag = tag;
1907                 driver->in_busy_dcipktdata = 1;
1908                 memcpy(dest_buf, &dci_header, header_len);
1909                 diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
1910                                                 dci_header.len);
1911                 driver->in_busy_dcipktdata = 0;
1912
1913                 if (goto_download) {
1914                         /*
1915                          * Sleep for sometime so that the response reaches the
1916                          * client. The value 5000 empirically as an optimum
1917                          * time for the response to reach the client.
1918                          */
1919                         usleep_range(5000, 5100);
1920                         /* call download API */
1921                         msm_set_restart_mode(RESTART_DLOAD);
1922                         pr_alert("diag: download mode set, Rebooting SoC..\n");
1923                         kernel_restart(NULL);
1924                 }
1925                 return DIAG_DCI_NO_ERROR;
1926         }
1927
1928         return DIAG_DCI_TABLE_ERR;
1929 }
1930
1931 static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
1932 {
1933         int ret = DIAG_DCI_TABLE_ERR;
1934         int common_cmd = 0;
1935         struct diag_pkt_header_t *header = NULL;
1936         unsigned char *temp = buf;
1937         unsigned char *req_buf = NULL;
1938         uint8_t retry_count = 0, max_retries = 3;
1939         uint32_t read_len = 0, req_len = len;
1940         struct dci_pkt_req_entry_t *req_entry = NULL;
1941         struct diag_dci_client_tbl *dci_entry = NULL;
1942         struct dci_pkt_req_t req_hdr;
1943         struct diag_cmd_reg_t *reg_item;
1944         struct diag_cmd_reg_entry_t reg_entry;
1945         struct diag_cmd_reg_entry_t *temp_entry;
1946
1947         if (!buf)
1948                 return -EIO;
1949
1950         if (len <= (sizeof(struct dci_pkt_req_t) +
1951                 sizeof(struct diag_pkt_header_t)) ||
1952                 len > DCI_REQ_BUF_SIZE) {
1953                 pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
1954                 return -EIO;
1955         }
1956
1957         req_hdr = *(struct dci_pkt_req_t *)temp;
1958         temp += sizeof(struct dci_pkt_req_t);
1959         read_len += sizeof(struct dci_pkt_req_t);
1960         req_len -= sizeof(struct dci_pkt_req_t);
1961         req_buf = temp; /* Start of the Request */
1962         header = (struct diag_pkt_header_t *)temp;
1963         temp += sizeof(struct diag_pkt_header_t);
1964         read_len += sizeof(struct diag_pkt_header_t);
1965         if (read_len >= DCI_REQ_BUF_SIZE) {
1966                 pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
1967                        read_len);
1968                 return -EIO;
1969         }
1970
1971         mutex_lock(&driver->dci_mutex);
1972         dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
1973         if (!dci_entry) {
1974                 pr_err("diag: Invalid client %d in %s\n",
1975                        req_hdr.client_id, __func__);
1976                 mutex_unlock(&driver->dci_mutex);
1977                 return DIAG_DCI_NO_REG;
1978         }
1979
1980         /* Check if the command is allowed on DCI */
1981         if (diag_dci_filter_commands(header)) {
1982                 pr_debug("diag: command not supported %d %d %d",
1983                          header->cmd_code, header->subsys_id,
1984                          header->subsys_cmd_code);
1985                 mutex_unlock(&driver->dci_mutex);
1986                 return DIAG_DCI_SEND_DATA_FAIL;
1987         }
1988
1989         common_cmd = diag_check_common_cmd(header);
1990         if (common_cmd < 0) {
1991                 pr_debug("diag: error in checking common command, %d\n",
1992                          common_cmd);
1993                 mutex_unlock(&driver->dci_mutex);
1994                 return DIAG_DCI_SEND_DATA_FAIL;
1995         }
1996
1997         /*
1998          * Previous packet is yet to be consumed by the client. Wait
1999          * till the buffer is free.
2000          */
2001         while (retry_count < max_retries) {
2002                 retry_count++;
2003                 if (driver->in_busy_dcipktdata)
2004                         usleep_range(10000, 10100);
2005                 else
2006                         break;
2007         }
2008         /* The buffer is still busy */
2009         if (driver->in_busy_dcipktdata) {
2010                 pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
2011                                                                 __func__);
2012                 mutex_unlock(&driver->dci_mutex);
2013                 return -EAGAIN;
2014         }
2015
2016         /* Register this new DCI packet */
2017         req_entry = diag_register_dci_transaction(req_hdr.uid,
2018                                                   req_hdr.client_id);
2019         if (!req_entry) {
2020                 pr_alert("diag: registering new DCI transaction failed\n");
2021                 mutex_unlock(&driver->dci_mutex);
2022                 return DIAG_DCI_NO_REG;
2023         }
2024         mutex_unlock(&driver->dci_mutex);
2025
2026         /*
2027          * If the client has registered for remote data, route the packet to the
2028          * remote processor
2029          */
2030         if (dci_entry->client_info.token > 0) {
2031                 ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
2032                                                dci_entry->client_info.token);
2033                 return ret;
2034         }
2035
2036         /* Check if it is a dedicated Apps command */
2037         ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
2038                                         req_entry->tag);
2039         if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
2040                 return ret;
2041
2042         reg_entry.cmd_code = header->cmd_code;
2043         reg_entry.subsys_id = header->subsys_id;
2044         reg_entry.cmd_code_hi = header->subsys_cmd_code;
2045         reg_entry.cmd_code_lo = header->subsys_cmd_code;
2046
2047         mutex_lock(&driver->cmd_reg_mutex);
2048         temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
2049         if (temp_entry) {
2050                 reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
2051                                                                 entry);
2052                 ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
2053                                         req_entry->tag);
2054         } else {
2055                 DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
2056                                 reg_entry.cmd_code, reg_entry.subsys_id,
2057                                 reg_entry.cmd_code_hi);
2058         }
2059         mutex_unlock(&driver->cmd_reg_mutex);
2060
2061         return ret;
2062 }
2063
2064 int diag_process_dci_transaction(unsigned char *buf, int len)
2065 {
2066         unsigned char *temp = buf;
2067         uint16_t log_code, item_num;
2068         int ret = -1, found = 0, client_id = 0, client_token = 0;
2069         int count, set_mask, num_codes, bit_index, event_id, offset = 0;
2070         unsigned int byte_index, read_len = 0;
2071         uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
2072         uint8_t *event_mask_ptr;
2073         struct diag_dci_client_tbl *dci_entry = NULL;
2074
2075         if (!temp) {
2076                 pr_err("diag: Invalid buffer in %s\n", __func__);
2077                 return -ENOMEM;
2078         }
2079
2080         /* This is Pkt request/response transaction */
2081         if (*(int *)temp > 0) {
2082                 return diag_process_dci_pkt_rsp(buf, len);
2083         } else if (*(int *)temp == DCI_LOG_TYPE) {
2084                 /* Minimum length of a log mask config is 12 + 2 bytes for
2085                    atleast one log code to be set or reset */
2086                 if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
2087                         pr_err("diag: dci: Invalid length in %s\n", __func__);
2088                         return -EIO;
2089                 }
2090
2091                 /* Extract each log code and put in client table */
2092                 temp += sizeof(int);
2093                 read_len += sizeof(int);
2094                 client_id = *(int *)temp;
2095                 temp += sizeof(int);
2096                 read_len += sizeof(int);
2097                 set_mask = *(int *)temp;
2098                 temp += sizeof(int);
2099                 read_len += sizeof(int);
2100                 num_codes = *(int *)temp;
2101                 temp += sizeof(int);
2102                 read_len += sizeof(int);
2103
2104                 /* find client table entry */
2105                 mutex_lock(&driver->dci_mutex);
2106                 dci_entry = diag_dci_get_client_entry(client_id);
2107                 if (!dci_entry) {
2108                         pr_err("diag: In %s, invalid client\n", __func__);
2109                         mutex_unlock(&driver->dci_mutex);
2110                         return ret;
2111                 }
2112                 client_token = dci_entry->client_info.token;
2113
2114                 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2115                         pr_err("diag: dci: Invalid number of log codes %d\n",
2116                                                                 num_codes);
2117                         mutex_unlock(&driver->dci_mutex);
2118                         return -EIO;
2119                 }
2120
2121                 head_log_mask_ptr = dci_entry->dci_log_mask;
2122                 if (!head_log_mask_ptr) {
2123                         pr_err("diag: dci: Invalid Log mask pointer in %s\n",
2124                                                                 __func__);
2125                         mutex_unlock(&driver->dci_mutex);
2126                         return -ENOMEM;
2127                 }
2128                 pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
2129                 count = 0; /* iterator for extracting log codes */
2130
2131                 while (count < num_codes) {
2132                         if (read_len >= USER_SPACE_DATA) {
2133                                 pr_err("diag: dci: Invalid length for log type in %s",
2134                                                                 __func__);
2135                                 mutex_unlock(&driver->dci_mutex);
2136                                 return -EIO;
2137                         }
2138                         log_code = *(uint16_t *)temp;
2139                         equip_id = LOG_GET_EQUIP_ID(log_code);
2140                         item_num = LOG_GET_ITEM_NUM(log_code);
2141                         byte_index = item_num/8 + 2;
2142                         if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
2143                                 pr_err("diag: dci: Log type, invalid byte index\n");
2144                                 mutex_unlock(&driver->dci_mutex);
2145                                 return ret;
2146                         }
2147                         byte_mask = 0x01 << (item_num % 8);
2148                         /*
2149                          * Parse through log mask table and find
2150                          * relevant range
2151                          */
2152                         log_mask_ptr = head_log_mask_ptr;
2153                         found = 0;
2154                         offset = 0;
2155                         while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
2156                                 if (*log_mask_ptr == equip_id) {
2157                                         found = 1;
2158                                         pr_debug("diag: find equip id = %x at %pK\n",
2159                                                  equip_id, log_mask_ptr);
2160                                         break;
2161                                 } else {
2162                                         pr_debug("diag: did not find equip id = %x at %d\n",
2163                                                  equip_id, *log_mask_ptr);
2164                                         log_mask_ptr += 514;
2165                                         offset += 514;
2166                                 }
2167                         }
2168                         if (!found) {
2169                                 pr_err("diag: dci equip id not found\n");
2170                                 mutex_unlock(&driver->dci_mutex);
2171                                 return ret;
2172                         }
2173                         *(log_mask_ptr+1) = 1; /* set the dirty byte */
2174                         log_mask_ptr = log_mask_ptr + byte_index;
2175                         if (set_mask)
2176                                 *log_mask_ptr |= byte_mask;
2177                         else
2178                                 *log_mask_ptr &= ~byte_mask;
2179                         /* add to cumulative mask */
2180                         update_dci_cumulative_log_mask(
2181                                 offset, byte_index,
2182                                 byte_mask, client_token);
2183                         temp += 2;
2184                         read_len += 2;
2185                         count++;
2186                         ret = DIAG_DCI_NO_ERROR;
2187                 }
2188                 /* send updated mask to userspace clients */
2189                 if (client_token == DCI_LOCAL_PROC)
2190                         diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2191                 /* send updated mask to peripherals */
2192                 ret = dci_ops_tbl[client_token].send_log_mask(client_token);
2193                 mutex_unlock(&driver->dci_mutex);
2194         } else if (*(int *)temp == DCI_EVENT_TYPE) {
2195                 /* Minimum length of a event mask config is 12 + 4 bytes for
2196                   atleast one event id to be set or reset. */
2197                 if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
2198                         pr_err("diag: dci: Invalid length in %s\n", __func__);
2199                         return -EIO;
2200                 }
2201
2202                 /* Extract each event id and put in client table */
2203                 temp += sizeof(int);
2204                 read_len += sizeof(int);
2205                 client_id = *(int *)temp;
2206                 temp += sizeof(int);
2207                 read_len += sizeof(int);
2208                 set_mask = *(int *)temp;
2209                 temp += sizeof(int);
2210                 read_len += sizeof(int);
2211                 num_codes = *(int *)temp;
2212                 temp += sizeof(int);
2213                 read_len += sizeof(int);
2214
2215                 /* find client table entry */
2216                 mutex_lock(&driver->dci_mutex);
2217                 dci_entry = diag_dci_get_client_entry(client_id);
2218                 if (!dci_entry) {
2219                         pr_err("diag: In %s, invalid client\n", __func__);
2220                         mutex_unlock(&driver->dci_mutex);
2221                         return ret;
2222                 }
2223                 client_token = dci_entry->client_info.token;
2224
2225                 /* Check for positive number of event ids. Also, the number of
2226                    event ids should fit in the buffer along with set_mask and
2227                    num_codes which are 4 bytes each */
2228                 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2229                         pr_err("diag: dci: Invalid number of event ids %d\n",
2230                                                                 num_codes);
2231                         mutex_unlock(&driver->dci_mutex);
2232                         return -EIO;
2233                 }
2234
2235                 event_mask_ptr = dci_entry->dci_event_mask;
2236                 if (!event_mask_ptr) {
2237                         pr_err("diag: dci: Invalid event mask pointer in %s\n",
2238                                                                 __func__);
2239                         mutex_unlock(&driver->dci_mutex);
2240                         return -ENOMEM;
2241                 }
2242                 pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
2243                 count = 0; /* iterator for extracting log codes */
2244                 while (count < num_codes) {
2245                         if (read_len >= USER_SPACE_DATA) {
2246                                 pr_err("diag: dci: Invalid length for event type in %s",
2247                                                                 __func__);
2248                                 mutex_unlock(&driver->dci_mutex);
2249                                 return -EIO;
2250                         }
2251                         event_id = *(int *)temp;
2252                         byte_index = event_id/8;
2253                         if (byte_index >= DCI_EVENT_MASK_SIZE) {
2254                                 pr_err("diag: dci: Event type, invalid byte index\n");
2255                                 mutex_unlock(&driver->dci_mutex);
2256                                 return ret;
2257                         }
2258                         bit_index = event_id % 8;
2259                         byte_mask = 0x1 << bit_index;
2260                         /*
2261                          * Parse through event mask table and set
2262                          * relevant byte & bit combination
2263                          */
2264                         if (set_mask)
2265                                 *(event_mask_ptr + byte_index) |= byte_mask;
2266                         else
2267                                 *(event_mask_ptr + byte_index) &= ~byte_mask;
2268                         /* add to cumulative mask */
2269                         update_dci_cumulative_event_mask(byte_index, byte_mask,
2270                                                          client_token);
2271                         temp += sizeof(int);
2272                         read_len += sizeof(int);
2273                         count++;
2274                         ret = DIAG_DCI_NO_ERROR;
2275                 }
2276                 /* send updated mask to userspace clients */
2277                 if (dci_entry->client_info.token == DCI_LOCAL_PROC)
2278                         diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2279                 /* send updated mask to peripherals */
2280                 ret = dci_ops_tbl[client_token].send_event_mask(client_token);
2281                 mutex_unlock(&driver->dci_mutex);
2282         } else {
2283                 pr_alert("diag: Incorrect DCI transaction\n");
2284         }
2285         return ret;
2286 }
2287
2288
2289 struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
2290 {
2291         struct list_head *start, *temp;
2292         struct diag_dci_client_tbl *entry = NULL;
2293         list_for_each_safe(start, temp, &driver->dci_client_list) {
2294                 entry = list_entry(start, struct diag_dci_client_tbl, track);
2295                 if (entry->client_info.client_id == client_id)
2296                         return entry;
2297         }
2298         return NULL;
2299 }
2300
2301 struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
2302 {
2303         struct list_head *start, *temp;
2304         struct diag_dci_client_tbl *entry = NULL;
2305         struct pid *pid_struct = NULL;
2306         struct task_struct *task_s = NULL;
2307
2308         list_for_each_safe(start, temp, &driver->dci_client_list) {
2309                 entry = list_entry(start, struct diag_dci_client_tbl, track);
2310                 pid_struct = find_get_pid(entry->tgid);
2311                 if (!pid_struct) {
2312                         DIAG_LOG(DIAG_DEBUG_DCI,
2313                         "diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
2314                         tgid, entry->tgid);
2315                         continue;
2316                 }
2317                 task_s = get_pid_task(pid_struct, PIDTYPE_PID);
2318                 if (!task_s) {
2319                         DIAG_LOG(DIAG_DEBUG_DCI,
2320                                 "diag: valid task doesn't exist for pid = %d\n",
2321                                 entry->tgid);
2322                         put_pid(pid_struct);
2323                         continue;
2324                 }
2325                 if (task_s == entry->client) {
2326                         if (entry->client->tgid == tgid) {
2327                                 put_task_struct(task_s);
2328                                 put_pid(pid_struct);
2329                                 return entry;
2330                         }
2331                 }
2332                 put_task_struct(task_s);
2333                 put_pid(pid_struct);
2334         }
2335         return NULL;
2336 }
2337
2338 void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
2339 {
2340         uint8_t *event_mask_ptr, *update_ptr = NULL;
2341         struct list_head *start, *temp;
2342         struct diag_dci_client_tbl *entry = NULL;
2343         bool is_set = false;
2344
2345         mutex_lock(&dci_event_mask_mutex);
2346         update_ptr = dci_ops_tbl[token].event_mask_composite;
2347         if (!update_ptr) {
2348                 mutex_unlock(&dci_event_mask_mutex);
2349                 return;
2350         }
2351         update_ptr += offset;
2352         list_for_each_safe(start, temp, &driver->dci_client_list) {
2353                 entry = list_entry(start, struct diag_dci_client_tbl, track);
2354                 if (entry->client_info.token != token)
2355                         continue;
2356                 event_mask_ptr = entry->dci_event_mask;
2357                 event_mask_ptr += offset;
2358                 if ((*event_mask_ptr & byte_mask) == byte_mask) {
2359                         is_set = true;
2360                         /* break even if one client has the event mask set */
2361                         break;
2362                 }
2363         }
2364         if (is_set == false)
2365                 *update_ptr &= ~byte_mask;
2366         else
2367                 *update_ptr |= byte_mask;
2368         mutex_unlock(&dci_event_mask_mutex);
2369 }
2370
2371 void diag_dci_invalidate_cumulative_event_mask(int token)
2372 {
2373         int i = 0;
2374         struct list_head *start, *temp;
2375         struct diag_dci_client_tbl *entry = NULL;
2376         uint8_t *event_mask_ptr, *update_ptr = NULL;
2377
2378         mutex_lock(&dci_event_mask_mutex);
2379         update_ptr = dci_ops_tbl[token].event_mask_composite;
2380         if (!update_ptr) {
2381                 mutex_unlock(&dci_event_mask_mutex);
2382                 return;
2383         }
2384
2385         create_dci_event_mask_tbl(update_ptr);
2386         list_for_each_safe(start, temp, &driver->dci_client_list) {
2387                 entry = list_entry(start, struct diag_dci_client_tbl, track);
2388                 if (entry->client_info.token != token)
2389                         continue;
2390                 event_mask_ptr = entry->dci_event_mask;
2391                 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
2392                         *(update_ptr+i) |= *(event_mask_ptr+i);
2393         }
2394         mutex_unlock(&dci_event_mask_mutex);
2395 }
2396
2397 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2398 int diag_send_dci_event_mask_remote(int token)
2399 {
2400         unsigned char *buf = NULL;
2401         struct diag_dci_header_t dci_header;
2402         struct diag_ctrl_event_mask event_mask;
2403         int dci_header_size = sizeof(struct diag_dci_header_t);
2404         int event_header_size = sizeof(struct diag_ctrl_event_mask);
2405         int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2406         unsigned char *event_mask_ptr = NULL;
2407         uint32_t write_len = 0;
2408
2409         mutex_lock(&dci_event_mask_mutex);
2410         event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
2411         if (!event_mask_ptr) {
2412                 mutex_unlock(&dci_event_mask_mutex);
2413                 return -EINVAL;
2414         }
2415         buf = dci_get_buffer_from_bridge(token);
2416         if (!buf) {
2417                 pr_err("diag: In %s, unable to get dci buffers to write data\n",
2418                         __func__);
2419                 mutex_unlock(&dci_event_mask_mutex);
2420                 return -EAGAIN;
2421         }
2422
2423         /* Frame the DCI header */
2424         dci_header.start = CONTROL_CHAR;
2425         dci_header.version = 1;
2426         dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
2427         dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2428
2429         event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2430         event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2431         event_mask.stream_id = DCI_MASK_STREAM;
2432         event_mask.status = DIAG_CTRL_MASK_VALID;
2433         event_mask.event_config = 0; /* event config */
2434         event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
2435         for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2436                 if (event_mask_ptr[i] != 0) {
2437                         event_mask.event_config = 1;
2438                         break;
2439                 }
2440         }
2441         memcpy(buf + write_len, &dci_header, dci_header_size);
2442         write_len += dci_header_size;
2443         memcpy(buf + write_len, &event_mask, event_header_size);
2444         write_len += event_header_size;
2445         memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2446         write_len += DCI_EVENT_MASK_SIZE;
2447         *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2448         write_len += sizeof(uint8_t);
2449         err = diag_dci_write_bridge(token, buf, write_len);
2450         if (err) {
2451                 pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
2452                        token, err);
2453                 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2454                 ret = err;
2455         } else {
2456                 ret = DIAG_DCI_NO_ERROR;
2457         }
2458         mutex_unlock(&dci_event_mask_mutex);
2459         return ret;
2460 }
2461 #endif
2462
2463 int diag_send_dci_event_mask(int token)
2464 {
2465         void *buf = event_mask.update_buf;
2466         struct diag_ctrl_event_mask header;
2467         int header_size = sizeof(struct diag_ctrl_event_mask);
2468         int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
2469         unsigned char *event_mask_ptr = NULL;
2470
2471         mutex_lock(&dci_event_mask_mutex);
2472         event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
2473         if (!event_mask_ptr) {
2474                 mutex_unlock(&dci_event_mask_mutex);
2475                 return -EINVAL;
2476         }
2477
2478         mutex_lock(&event_mask.lock);
2479         /* send event mask update */
2480         header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2481         header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2482         header.stream_id = DCI_MASK_STREAM;
2483         header.status = DIAG_CTRL_MASK_VALID;
2484         header.event_config = 0; /* event config */
2485         header.event_mask_size = DCI_EVENT_MASK_SIZE;
2486         for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2487                 if (event_mask_ptr[i] != 0) {
2488                         header.event_config = 1;
2489                         break;
2490                 }
2491         }
2492         memcpy(buf, &header, header_size);
2493         memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2494         for (i = 0; i < NUM_PERIPHERALS; i++) {
2495                 /*
2496                  * Don't send to peripheral if its regular channel
2497                  * is down. It may also mean that the peripheral doesn't
2498                  * support DCI.
2499                  */
2500                 if (check_peripheral_dci_support(i, DCI_LOCAL_PROC)) {
2501                         err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
2502                                   header_size + DCI_EVENT_MASK_SIZE);
2503                         if (err != DIAG_DCI_NO_ERROR)
2504                                 ret = DIAG_DCI_SEND_DATA_FAIL;
2505                 }
2506         }
2507
2508         mutex_unlock(&event_mask.lock);
2509         mutex_unlock(&dci_event_mask_mutex);
2510
2511         return ret;
2512 }
2513
2514 void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
2515                                                 uint8_t byte_mask, int token)
2516 {
2517         uint8_t *log_mask_ptr, *update_ptr = NULL;
2518         bool is_set = false;
2519         struct list_head *start, *temp;
2520         struct diag_dci_client_tbl *entry = NULL;
2521
2522         mutex_lock(&dci_log_mask_mutex);
2523         update_ptr = dci_ops_tbl[token].log_mask_composite;
2524         if (!update_ptr) {
2525                 mutex_unlock(&dci_log_mask_mutex);
2526                 return;
2527         }
2528
2529         update_ptr += offset;
2530         /* update the dirty bit */
2531         *(update_ptr+1) = 1;
2532         update_ptr = update_ptr + byte_index;
2533         list_for_each_safe(start, temp, &driver->dci_client_list) {
2534                 entry = list_entry(start, struct diag_dci_client_tbl, track);
2535                 if (entry->client_info.token != token)
2536                         continue;
2537                 log_mask_ptr = entry->dci_log_mask;
2538                 log_mask_ptr = log_mask_ptr + offset + byte_index;
2539                 if ((*log_mask_ptr & byte_mask) == byte_mask) {
2540                         is_set = true;
2541                         /* break even if one client has the log mask set */
2542                         break;
2543                 }
2544         }
2545
2546         if (is_set == false)
2547                 *update_ptr &= ~byte_mask;
2548         else
2549                 *update_ptr |= byte_mask;
2550         mutex_unlock(&dci_log_mask_mutex);
2551 }
2552
2553 void diag_dci_invalidate_cumulative_log_mask(int token)
2554 {
2555         int i = 0;
2556         struct list_head *start, *temp;
2557         struct diag_dci_client_tbl *entry = NULL;
2558         uint8_t *log_mask_ptr, *update_ptr = NULL;
2559
2560         /* Clear the composite mask and redo all the masks */
2561         mutex_lock(&dci_log_mask_mutex);
2562         update_ptr = dci_ops_tbl[token].log_mask_composite;
2563         if (!update_ptr) {
2564                 mutex_unlock(&dci_log_mask_mutex);
2565                 return;
2566         }
2567
2568         create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
2569         list_for_each_safe(start, temp, &driver->dci_client_list) {
2570                 entry = list_entry(start, struct diag_dci_client_tbl, track);
2571                 if (entry->client_info.token != token)
2572                         continue;
2573                 log_mask_ptr = entry->dci_log_mask;
2574                 for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
2575                         *(update_ptr+i) |= *(log_mask_ptr+i);
2576         }
2577         mutex_unlock(&dci_log_mask_mutex);
2578 }
2579
2580 static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
2581 {
2582         struct diag_ctrl_log_mask header;
2583         int header_len = sizeof(struct diag_ctrl_log_mask);
2584
2585         header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
2586         header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
2587         header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
2588         header.stream_id = DCI_MASK_STREAM;
2589         header.status = 3;
2590         header.equip_id = *src_ptr;
2591         header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
2592         memcpy(dest_ptr, &header, header_len);
2593         memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
2594
2595         return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
2596 }
2597
2598 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2599 int diag_send_dci_log_mask_remote(int token)
2600 {
2601
2602         unsigned char *buf = NULL;
2603         struct diag_dci_header_t dci_header;
2604         int dci_header_size = sizeof(struct diag_dci_header_t);
2605         int log_header_size = sizeof(struct diag_ctrl_log_mask);
2606         uint8_t *log_mask_ptr = NULL;
2607         int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2608         int updated;
2609         uint32_t write_len = 0;
2610
2611         mutex_lock(&dci_log_mask_mutex);
2612         log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
2613         if (!log_mask_ptr) {
2614                 mutex_unlock(&dci_log_mask_mutex);
2615                 return -EINVAL;
2616         }
2617
2618         /* DCI header is common to all equipment IDs */
2619         dci_header.start = CONTROL_CHAR;
2620         dci_header.version = 1;
2621         dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
2622         dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2623
2624         for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
2625                 updated = 1;
2626                 write_len = 0;
2627                 if (!*(log_mask_ptr + 1)) {
2628                         log_mask_ptr += 514;
2629                         continue;
2630                 }
2631
2632                 buf = dci_get_buffer_from_bridge(token);
2633                 if (!buf) {
2634                         pr_err("diag: In %s, unable to get dci buffers to write data\n",
2635                                 __func__);
2636                         mutex_unlock(&dci_log_mask_mutex);
2637                         return -EAGAIN;
2638                 }
2639
2640                 memcpy(buf + write_len, &dci_header, dci_header_size);
2641                 write_len += dci_header_size;
2642                 write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
2643                 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2644                 write_len += sizeof(uint8_t);
2645                 err = diag_dci_write_bridge(token, buf, write_len);
2646                 if (err) {
2647                         pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
2648                                i, token, err);
2649                         diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2650                         updated = 0;
2651                 }
2652                 if (updated)
2653                         *(log_mask_ptr + 1) = 0; /* clear dirty byte */
2654                 log_mask_ptr += 514;
2655         }
2656         mutex_unlock(&dci_log_mask_mutex);
2657         return ret;
2658 }
2659 #endif
2660
2661 int diag_send_dci_log_mask(int token)
2662 {
2663         void *buf = log_mask.update_buf;
2664         int write_len = 0;
2665         uint8_t *log_mask_ptr = NULL;
2666         int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2667         int updated;
2668
2669
2670         mutex_lock(&dci_log_mask_mutex);
2671         log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
2672         if (!log_mask_ptr) {
2673                 mutex_unlock(&dci_log_mask_mutex);
2674                 return -EINVAL;
2675         }
2676
2677         mutex_lock(&log_mask.lock);
2678         for (i = 0; i < 16; i++) {
2679                 updated = 1;
2680                 /* Dirty bit is set don't update the mask for this equip id */
2681                 if (!(*(log_mask_ptr + 1))) {
2682                         log_mask_ptr += 514;
2683                         continue;
2684                 }
2685                 write_len = dci_fill_log_mask(buf, log_mask_ptr);
2686                 for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
2687                         if (check_peripheral_dci_support(j, DCI_LOCAL_PROC)) {
2688                                 err = diag_dci_write_proc(j, DIAG_CNTL_TYPE,
2689                                         buf, write_len);
2690                                 if (err != DIAG_DCI_NO_ERROR) {
2691                                         updated = 0;
2692                                         ret = DIAG_DCI_SEND_DATA_FAIL;
2693                                 }
2694                         }
2695                 }
2696                 if (updated)
2697                         *(log_mask_ptr+1) = 0; /* clear dirty byte */
2698                 log_mask_ptr += 514;
2699         }
2700         mutex_unlock(&log_mask.lock);
2701         mutex_unlock(&dci_log_mask_mutex);
2702         return ret;
2703 }
2704
2705 static int diag_dci_init_local(void)
2706 {
2707         struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
2708
2709         create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
2710         create_dci_event_mask_tbl(temp->event_mask_composite);
2711         temp->peripheral_status |= DIAG_CON_APSS;
2712
2713         return 0;
2714 }
2715
2716 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2717 static void diag_dci_init_handshake_remote(void)
2718 {
2719         int i;
2720         struct dci_channel_status_t *temp = NULL;
2721
2722         for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
2723                 temp = &dci_channel_status[i];
2724                 temp->id = i;
2725                 setup_timer(&temp->wait_time, dci_chk_handshake, i);
2726                 INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
2727         }
2728 }
2729
2730 static int diag_dci_init_remote(void)
2731 {
2732         int i;
2733         struct dci_ops_tbl_t *temp = NULL;
2734
2735         diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
2736
2737         for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
2738                 temp = &dci_ops_tbl[i];
2739                 create_dci_log_mask_tbl(temp->log_mask_composite,
2740                                         DCI_LOG_MASK_CLEAN);
2741                 create_dci_event_mask_tbl(temp->event_mask_composite);
2742         }
2743
2744         partial_pkt.data = vzalloc(MAX_DCI_PACKET_SZ);
2745         if (!partial_pkt.data) {
2746                 pr_err("diag: Unable to create partial pkt data\n");
2747                 return -ENOMEM;
2748         }
2749
2750         partial_pkt.total_len = 0;
2751         partial_pkt.read_len = 0;
2752         partial_pkt.remaining = 0;
2753         partial_pkt.processing = 0;
2754
2755         diag_dci_init_handshake_remote();
2756
2757         return 0;
2758 }
2759 #else
2760 static int diag_dci_init_remote(void)
2761 {
2762         return 0;
2763 }
2764 #endif
2765
2766 static int diag_dci_init_ops_tbl(void)
2767 {
2768         int err = 0;
2769
2770         err = diag_dci_init_local();
2771         if (err)
2772                 goto err;
2773         err = diag_dci_init_remote();
2774         if (err)
2775                 goto err;
2776
2777         return 0;
2778
2779 err:
2780         return -ENOMEM;
2781 }
2782
2783 int diag_dci_init(void)
2784 {
2785         int ret = 0;
2786
2787         driver->dci_tag = 0;
2788         driver->dci_client_id = 0;
2789         driver->num_dci_client = 0;
2790         mutex_init(&driver->dci_mutex);
2791         mutex_init(&dci_log_mask_mutex);
2792         mutex_init(&dci_event_mask_mutex);
2793         spin_lock_init(&ws_lock);
2794
2795         ret = diag_dci_init_ops_tbl();
2796         if (ret)
2797                 goto err;
2798
2799         if (driver->apps_dci_buf == NULL) {
2800                 driver->apps_dci_buf = vzalloc(DCI_BUF_SIZE);
2801                 if (driver->apps_dci_buf == NULL)
2802                         goto err;
2803         }
2804         INIT_LIST_HEAD(&driver->dci_client_list);
2805         INIT_LIST_HEAD(&driver->dci_req_list);
2806
2807         driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
2808         if (!driver->diag_dci_wq)
2809                 goto err;
2810
2811         INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
2812
2813         setup_timer(&dci_drain_timer, dci_drain_data, 0);
2814         return DIAG_DCI_NO_ERROR;
2815 err:
2816         pr_err("diag: Could not initialize diag DCI buffers");
2817         vfree(driver->apps_dci_buf);
2818         driver->apps_dci_buf = NULL;
2819
2820         if (driver->diag_dci_wq)
2821                 destroy_workqueue(driver->diag_dci_wq);
2822         vfree(partial_pkt.data);
2823         partial_pkt.data = NULL;
2824         mutex_destroy(&driver->dci_mutex);
2825         mutex_destroy(&dci_log_mask_mutex);
2826         mutex_destroy(&dci_event_mask_mutex);
2827         return DIAG_DCI_NO_REG;
2828 }
2829
2830 void diag_dci_channel_init(void)
2831 {
2832         uint8_t peripheral;
2833
2834         for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
2835                 diagfwd_open(peripheral, TYPE_DCI);
2836                 diagfwd_open(peripheral, TYPE_DCI_CMD);
2837         }
2838 }
2839
2840 void diag_dci_exit(void)
2841 {
2842         vfree(partial_pkt.data);
2843         partial_pkt.data = NULL;
2844         vfree(driver->apps_dci_buf);
2845         driver->apps_dci_buf = NULL;
2846         mutex_destroy(&driver->dci_mutex);
2847         mutex_destroy(&dci_log_mask_mutex);
2848         mutex_destroy(&dci_event_mask_mutex);
2849         destroy_workqueue(driver->diag_dci_wq);
2850 }
2851
2852 int diag_dci_clear_log_mask(int client_id)
2853 {
2854         int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2855         uint8_t *update_ptr;
2856         struct diag_dci_client_tbl *entry = NULL;
2857
2858         entry = diag_dci_get_client_entry(client_id);
2859         if (!entry) {
2860                 pr_err("diag: In %s, invalid client entry\n", __func__);
2861                 return DIAG_DCI_TABLE_ERR;
2862         }
2863         token = entry->client_info.token;
2864         update_ptr = dci_ops_tbl[token].log_mask_composite;
2865
2866         create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2867         diag_dci_invalidate_cumulative_log_mask(token);
2868
2869         /*
2870          * Send updated mask to userspace clients only if the client
2871          * is registered on the local processor
2872          */
2873         if (token == DCI_LOCAL_PROC)
2874                 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2875         /* Send updated mask to peripherals */
2876         err = dci_ops_tbl[token].send_log_mask(token);
2877         return err;
2878 }
2879
2880 int diag_dci_clear_event_mask(int client_id)
2881 {
2882         int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2883         uint8_t *update_ptr;
2884         struct diag_dci_client_tbl *entry = NULL;
2885
2886         entry = diag_dci_get_client_entry(client_id);
2887         if (!entry) {
2888                 pr_err("diag: In %s, invalid client entry\n", __func__);
2889                 return DIAG_DCI_TABLE_ERR;
2890         }
2891         token = entry->client_info.token;
2892         update_ptr = dci_ops_tbl[token].event_mask_composite;
2893
2894         create_dci_event_mask_tbl(entry->dci_event_mask);
2895         diag_dci_invalidate_cumulative_event_mask(token);
2896
2897         /*
2898          * Send updated mask to userspace clients only if the client is
2899          * registerted on the local processor
2900          */
2901         if (token == DCI_LOCAL_PROC)
2902                 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2903         /* Send updated mask to peripherals */
2904         err = dci_ops_tbl[token].send_event_mask(token);
2905         return err;
2906 }
2907
2908 uint8_t diag_dci_get_cumulative_real_time(int token)
2909 {
2910         uint8_t real_time = MODE_NONREALTIME;
2911         struct list_head *start, *temp;
2912         struct diag_dci_client_tbl *entry = NULL;
2913
2914         list_for_each_safe(start, temp, &driver->dci_client_list) {
2915                 entry = list_entry(start, struct diag_dci_client_tbl, track);
2916                 if (entry->real_time == MODE_REALTIME &&
2917                                         entry->client_info.token == token) {
2918                         real_time = 1;
2919                         break;
2920                 }
2921         }
2922         return real_time;
2923 }
2924
2925 int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
2926 {
2927         if (!entry) {
2928                 pr_err("diag: In %s, invalid client entry\n", __func__);
2929                 return 0;
2930         }
2931         entry->real_time = real_time;
2932         return 1;
2933 }
2934
2935 int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
2936 {
2937         int i, err = 0;
2938         struct diag_dci_client_tbl *new_entry = NULL;
2939         struct diag_dci_buf_peripheral_t *proc_buf = NULL;
2940
2941         if (!reg_entry)
2942                 return DIAG_DCI_NO_REG;
2943         if (!VALID_DCI_TOKEN(reg_entry->token)) {
2944                 pr_alert("diag: Invalid DCI client token, %d\n",
2945                                                 reg_entry->token);
2946                 return DIAG_DCI_NO_REG;
2947         }
2948
2949         if (driver->dci_state == DIAG_DCI_NO_REG)
2950                 return DIAG_DCI_NO_REG;
2951
2952         if (driver->num_dci_client >= MAX_DCI_CLIENTS)
2953                 return DIAG_DCI_NO_REG;
2954
2955         new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
2956         if (new_entry == NULL) {
2957                 pr_err("diag: unable to alloc memory\n");
2958                 return DIAG_DCI_NO_REG;
2959         }
2960
2961         mutex_lock(&driver->dci_mutex);
2962
2963         get_task_struct(current);
2964         new_entry->client = current;
2965         new_entry->tgid = current->tgid;
2966         new_entry->client_info.notification_list =
2967                                 reg_entry->notification_list;
2968         new_entry->client_info.signal_type =
2969                                 reg_entry->signal_type;
2970         new_entry->client_info.token = reg_entry->token;
2971         switch (reg_entry->token) {
2972         case DCI_LOCAL_PROC:
2973                 new_entry->num_buffers = NUM_DCI_PERIPHERALS;
2974                 break;
2975         case DCI_MDM_PROC:
2976                 new_entry->num_buffers = 1;
2977                 break;
2978         }
2979
2980         new_entry->buffers = NULL;
2981         new_entry->real_time = MODE_REALTIME;
2982         new_entry->in_service = 0;
2983         INIT_LIST_HEAD(&new_entry->list_write_buf);
2984         mutex_init(&new_entry->write_buf_mutex);
2985         new_entry->dci_log_mask =  vzalloc(DCI_LOG_MASK_SIZE);
2986         if (!new_entry->dci_log_mask) {
2987                 pr_err("diag: Unable to create log mask for client, %d",
2988                                                         driver->dci_client_id);
2989                 goto fail_alloc;
2990         }
2991         create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2992
2993         new_entry->dci_event_mask =  vzalloc(DCI_EVENT_MASK_SIZE);
2994         if (!new_entry->dci_event_mask) {
2995                 pr_err("diag: Unable to create event mask for client, %d",
2996                                                         driver->dci_client_id);
2997                 goto fail_alloc;
2998         }
2999         create_dci_event_mask_tbl(new_entry->dci_event_mask);
3000
3001         new_entry->buffers = kzalloc(new_entry->num_buffers *
3002                                      sizeof(struct diag_dci_buf_peripheral_t),
3003                                         GFP_KERNEL);
3004         if (!new_entry->buffers) {
3005                 pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
3006                                                                 __func__);
3007                 goto fail_alloc;
3008         }
3009
3010         for (i = 0; i < new_entry->num_buffers; i++) {
3011                 proc_buf = &new_entry->buffers[i];
3012                 if (!proc_buf)
3013                         goto fail_alloc;
3014
3015                 mutex_init(&proc_buf->health_mutex);
3016                 mutex_init(&proc_buf->buf_mutex);
3017                 proc_buf->health.dropped_events = 0;
3018                 proc_buf->health.dropped_logs = 0;
3019                 proc_buf->health.received_events = 0;
3020                 proc_buf->health.received_logs = 0;
3021                 proc_buf->buf_primary = kzalloc(
3022                                         sizeof(struct diag_dci_buffer_t),
3023                                         GFP_KERNEL);
3024                 if (!proc_buf->buf_primary)
3025                         goto fail_alloc;
3026                 proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
3027                                         GFP_KERNEL);
3028                 if (!proc_buf->buf_cmd)
3029                         goto fail_alloc;
3030                 err = diag_dci_init_buffer(proc_buf->buf_primary,
3031                                            DCI_BUF_PRIMARY);
3032                 if (err)
3033                         goto fail_alloc;
3034                 err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
3035                 if (err)
3036                         goto fail_alloc;
3037                 proc_buf->buf_curr = proc_buf->buf_primary;
3038         }
3039
3040         list_add_tail(&new_entry->track, &driver->dci_client_list);
3041         driver->dci_client_id++;
3042         new_entry->client_info.client_id = driver->dci_client_id;
3043         reg_entry->client_id = driver->dci_client_id;
3044         driver->num_dci_client++;
3045         if (driver->num_dci_client == 1)
3046                 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
3047         queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
3048         mutex_unlock(&driver->dci_mutex);
3049
3050         return driver->dci_client_id;
3051
3052 fail_alloc:
3053         if (new_entry) {
3054                 for (i = 0; ((i < new_entry->num_buffers) &&
3055                         new_entry->buffers); i++) {
3056                         proc_buf = &new_entry->buffers[i];
3057                         if (proc_buf) {
3058                                 mutex_destroy(&proc_buf->health_mutex);
3059                                 if (proc_buf->buf_primary) {
3060                                         vfree(proc_buf->buf_primary->data);
3061                                         proc_buf->buf_primary->data = NULL;
3062                                         mutex_destroy(
3063                                            &proc_buf->buf_primary->data_mutex);
3064                                 }
3065                                 kfree(proc_buf->buf_primary);
3066                                 proc_buf->buf_primary = NULL;
3067                                 if (proc_buf->buf_cmd) {
3068                                         vfree(proc_buf->buf_cmd->data);
3069                                         proc_buf->buf_cmd->data = NULL;
3070                                         mutex_destroy(
3071                                            &proc_buf->buf_cmd->data_mutex);
3072                                 }
3073                                 kfree(proc_buf->buf_cmd);
3074                                 proc_buf->buf_cmd = NULL;
3075                         }
3076                 }
3077                 vfree(new_entry->dci_event_mask);
3078                 new_entry->dci_event_mask = NULL;
3079                 vfree(new_entry->dci_log_mask);
3080                 new_entry->dci_log_mask = NULL;
3081                 kfree(new_entry->buffers);
3082                 new_entry->buffers = NULL;
3083                 kfree(new_entry);
3084                 new_entry = NULL;
3085         }
3086         mutex_unlock(&driver->dci_mutex);
3087         return DIAG_DCI_NO_REG;
3088 }
3089
3090 int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
3091 {
3092         int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
3093         struct diag_dci_buf_peripheral_t *proc_buf = NULL;
3094         struct diag_dci_buffer_t *buf_entry, *temp;
3095         struct list_head *start, *req_temp;
3096         struct dci_pkt_req_entry_t *req_entry = NULL;
3097         int token = DCI_LOCAL_PROC;
3098
3099         if (!entry)
3100                 return DIAG_DCI_NOT_SUPPORTED;
3101
3102         token = entry->client_info.token;
3103         /*
3104          * Remove the entry from the list before freeing the buffers
3105          * to ensure that we don't have any invalid access.
3106          */
3107         if (!list_empty(&entry->track))
3108                 list_del(&entry->track);
3109         driver->num_dci_client--;
3110
3111         put_task_struct(entry->client);
3112         entry->client = NULL;
3113         /*
3114          * Clear the client's log and event masks, update the cumulative
3115          * masks and send the masks to peripherals
3116          */
3117         vfree(entry->dci_log_mask);
3118         entry->dci_log_mask = NULL;
3119         diag_dci_invalidate_cumulative_log_mask(token);
3120         if (token == DCI_LOCAL_PROC)
3121                 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
3122         ret = dci_ops_tbl[token].send_log_mask(token);
3123         if (ret != DIAG_DCI_NO_ERROR) {
3124                 return ret;
3125         }
3126         vfree(entry->dci_event_mask);
3127         entry->dci_event_mask = NULL;
3128         diag_dci_invalidate_cumulative_event_mask(token);
3129         if (token == DCI_LOCAL_PROC)
3130                 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
3131         ret = dci_ops_tbl[token].send_event_mask(token);
3132         if (ret != DIAG_DCI_NO_ERROR) {
3133                 return ret;
3134         }
3135
3136         list_for_each_safe(start, req_temp, &driver->dci_req_list) {
3137                 req_entry = list_entry(start, struct dci_pkt_req_entry_t,
3138                                        track);
3139                 if (req_entry->client_id == entry->client_info.client_id) {
3140                         if (!list_empty(&req_entry->track))
3141                                 list_del(&req_entry->track);
3142                         kfree(req_entry);
3143                         req_entry = NULL;
3144                 }
3145         }
3146
3147         /* Clean up any buffer that is pending write */
3148         mutex_lock(&entry->write_buf_mutex);
3149         list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
3150                                                         buf_track) {
3151                 if (!list_empty(&buf_entry->buf_track))
3152                         list_del(&buf_entry->buf_track);
3153                 if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
3154                         mutex_lock(&buf_entry->data_mutex);
3155                         diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3156                         buf_entry->data = NULL;
3157                         mutex_unlock(&buf_entry->data_mutex);
3158                         kfree(buf_entry);
3159                         buf_entry = NULL;
3160                 } else if (buf_entry->buf_type == DCI_BUF_CMD) {
3161                         peripheral = buf_entry->data_source;
3162                         if (peripheral == APPS_DATA)
3163                                 continue;
3164                 }
3165                 /*
3166                  * These are buffers that can't be written to the client which
3167                  * means that the copy cannot be completed. Make sure that we
3168                  * remove those references in DCI wakeup source.
3169                  */
3170                 diag_ws_on_copy_fail(DIAG_WS_DCI);
3171         }
3172         mutex_unlock(&entry->write_buf_mutex);
3173
3174         for (i = 0; i < entry->num_buffers; i++) {
3175                 proc_buf = &entry->buffers[i];
3176                 buf_entry = proc_buf->buf_curr;
3177                 mutex_lock(&proc_buf->buf_mutex);
3178                 /* Clean up secondary buffer from mempool that is active */
3179                 if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
3180                         mutex_lock(&buf_entry->data_mutex);
3181                         diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3182                         buf_entry->data = NULL;
3183                         mutex_unlock(&buf_entry->data_mutex);
3184                         mutex_destroy(&buf_entry->data_mutex);
3185                         kfree(buf_entry);
3186                         buf_entry = NULL;
3187                 }
3188
3189                 mutex_lock(&proc_buf->buf_primary->data_mutex);
3190                 vfree(proc_buf->buf_primary->data);
3191                 proc_buf->buf_primary->data = NULL;
3192                 mutex_unlock(&proc_buf->buf_primary->data_mutex);
3193
3194                 mutex_lock(&proc_buf->buf_cmd->data_mutex);
3195                 vfree(proc_buf->buf_cmd->data);
3196                 proc_buf->buf_cmd->data = NULL;
3197                 mutex_unlock(&proc_buf->buf_cmd->data_mutex);
3198
3199                 mutex_destroy(&proc_buf->health_mutex);
3200                 mutex_destroy(&proc_buf->buf_primary->data_mutex);
3201                 mutex_destroy(&proc_buf->buf_cmd->data_mutex);
3202
3203                 kfree(proc_buf->buf_primary);
3204                 proc_buf->buf_primary = NULL;
3205                 kfree(proc_buf->buf_cmd);
3206                 proc_buf->buf_cmd = NULL;
3207                 mutex_unlock(&proc_buf->buf_mutex);
3208         }
3209         mutex_destroy(&entry->write_buf_mutex);
3210
3211         kfree(entry->buffers);
3212         entry->buffers = NULL;
3213         kfree(entry);
3214         entry = NULL;
3215
3216         if (driver->num_dci_client == 0) {
3217                 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
3218         } else {
3219                 real_time = diag_dci_get_cumulative_real_time(token);
3220                 diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
3221         }
3222         queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
3223
3224         return DIAG_DCI_NO_ERROR;
3225 }
3226
3227 int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
3228 {
3229         uint8_t dest_channel = TYPE_DATA;
3230         int err = 0;
3231
3232         if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
3233             !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
3234                 DIAG_LOG(DIAG_DEBUG_DCI,
3235                         "buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
3236                         buf, peripheral, len,
3237                         driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
3238                 return -EINVAL;
3239         }
3240
3241         if (pkt_type == DIAG_DATA_TYPE) {
3242                 dest_channel = TYPE_DCI_CMD;
3243         } else if (pkt_type == DIAG_CNTL_TYPE) {
3244                 dest_channel = TYPE_CNTL;
3245         } else {
3246                 pr_err("diag: Invalid DCI pkt type in %s", __func__);
3247                 return -EINVAL;
3248         }
3249
3250         err = diagfwd_write(peripheral, dest_channel, buf, len);
3251         if (err && err != -ENODEV) {
3252                 pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
3253                        __func__, peripheral, dest_channel, len, err);
3254         } else {
3255                 err = DIAG_DCI_NO_ERROR;
3256         }
3257
3258         return err;
3259 }
3260
3261 int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
3262 {
3263         struct diag_dci_client_tbl *entry = NULL;
3264         struct diag_dci_health_t *health = NULL;
3265         struct diag_dci_health_stats *stats = NULL;
3266         int i, proc;
3267
3268         if (!stats_proc)
3269                 return -EINVAL;
3270
3271         stats = &stats_proc->health;
3272         proc = stats_proc->proc;
3273         if (proc < ALL_PROC || proc > APPS_DATA)
3274                 return -EINVAL;
3275
3276         entry = diag_dci_get_client_entry(stats_proc->client_id);
3277         if (!entry)
3278                 return DIAG_DCI_NOT_SUPPORTED;
3279
3280         /*
3281          * If the client has registered for remote processor, the
3282          * proc field doesn't have any effect as they have only one buffer.
3283          */
3284         if (entry->client_info.token)
3285                 proc = 0;
3286
3287         stats->stats.dropped_logs = 0;
3288         stats->stats.dropped_events = 0;
3289         stats->stats.received_logs = 0;
3290         stats->stats.received_events = 0;
3291
3292         if (proc != ALL_PROC) {
3293                 health = &entry->buffers[proc].health;
3294                 stats->stats.dropped_logs = health->dropped_logs;
3295                 stats->stats.dropped_events = health->dropped_events;
3296                 stats->stats.received_logs = health->received_logs;
3297                 stats->stats.received_events = health->received_events;
3298                 if (stats->reset_status) {
3299                         mutex_lock(&entry->buffers[proc].health_mutex);
3300                         health->dropped_logs = 0;
3301                         health->dropped_events = 0;
3302                         health->received_logs = 0;
3303                         health->received_events = 0;
3304                         mutex_unlock(&entry->buffers[proc].health_mutex);
3305                 }
3306                 return DIAG_DCI_NO_ERROR;
3307         }
3308
3309         for (i = 0; i < entry->num_buffers; i++) {
3310                 health = &entry->buffers[i].health;
3311                 stats->stats.dropped_logs += health->dropped_logs;
3312                 stats->stats.dropped_events += health->dropped_events;
3313                 stats->stats.received_logs += health->received_logs;
3314                 stats->stats.received_events += health->received_events;
3315                 if (stats->reset_status) {
3316                         mutex_lock(&entry->buffers[i].health_mutex);
3317                         health->dropped_logs = 0;
3318                         health->dropped_events = 0;
3319                         health->received_logs = 0;
3320                         health->received_events = 0;
3321                         mutex_unlock(&entry->buffers[i].health_mutex);
3322                 }
3323         }
3324         return DIAG_DCI_NO_ERROR;
3325 }
3326
3327 int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
3328 {
3329         if (!support_list)
3330                 return -ENOMEM;
3331
3332         if (!VALID_DCI_TOKEN(support_list->proc))
3333                 return -EIO;
3334
3335         support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
3336         return DIAG_DCI_NO_ERROR;
3337 }