1 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/spinlock.h>
18 #include <linux/mutex.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/qdsp6v2/apr.h>
22 #include <soc/qcom/smd.h>
23 #include <sound/q6core.h>
24 #include <sound/audio_cal_utils.h>
25 #include <sound/adsp_err.h>
27 #define TIMEOUT_MS 1000
29 * AVS bring up in the modem is optimitized for the new
30 * Sub System Restart design and 100 milliseconds timeout
31 * is sufficient to make sure the Q6 will be ready.
33 #define Q6_READY_TIMEOUT_MS 100
41 enum ver_query_status {
42 VER_QUERY_UNATTEMPTED,
43 VER_QUERY_UNSUPPORTED,
47 struct q6core_avcs_ver_info {
48 enum ver_query_status status;
49 struct avcs_fwk_ver_info *ver_info;
53 struct apr_svc *core_handle_q;
54 wait_queue_head_t bus_bw_req_wait;
55 wait_queue_head_t cmd_req_wait;
56 wait_queue_head_t avcs_fwk_ver_req_wait;
57 u32 bus_bw_resp_received;
60 FLAG_CMDRSP_LICENSE_RESULT
61 } cmd_resp_received_flag;
62 u32 avcs_fwk_ver_resp_received;
63 struct mutex cmd_lock;
64 struct mutex ver_lock;
66 struct avcs_cmdrsp_get_license_validation_result
67 cmdrsp_license_result;
70 struct cal_type_data *cal_data[CORE_MAX_CAL];
71 uint32_t mem_map_cal_handle;
73 struct q6core_avcs_ver_info q6core_avcs_ver_info;
76 static struct q6core_str q6core_lcl;
78 struct generic_get_data_ {
83 static struct generic_get_data_ *generic_get_data;
85 static int parse_fwk_version_info(uint32_t *payload)
90 pr_debug("%s: Payload info num services %d\n",
91 __func__, payload[4]);
94 * payload1[4] is the number of services running on DSP
95 * Based on this info, we copy the payload into core
96 * avcs version info structure.
98 num_services = payload[4];
99 if (num_services > VSS_MAX_AVCS_NUM_SERVICES) {
100 pr_err("%s: num_services: %d greater than max services: %d\n",
101 __func__, num_services, VSS_MAX_AVCS_NUM_SERVICES);
106 * Dynamically allocate memory for all
107 * the services based on num_services
109 ver_size = sizeof(struct avcs_get_fwk_version) +
110 num_services * sizeof(struct avs_svc_api_info);
112 q6core_lcl.q6core_avcs_ver_info.ver_info =
113 kzalloc(ver_size, GFP_ATOMIC);
114 if (q6core_lcl.q6core_avcs_ver_info.ver_info == NULL)
117 memcpy(q6core_lcl.q6core_avcs_ver_info.ver_info, (uint8_t *) payload,
122 static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
128 pr_err("%s: data argument is null\n", __func__);
132 pr_debug("%s: core msg: payload len = %u, apr resp opcode = 0x%x\n",
134 data->payload_size, data->opcode);
136 switch (data->opcode) {
138 case APR_BASIC_RSP_RESULT:{
140 if (data->payload_size == 0) {
141 pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
146 payload1 = data->payload;
148 switch (payload1[0]) {
150 case AVCS_CMD_SHARED_MEM_UNMAP_REGIONS:
151 pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS status[0x%x]\n",
152 __func__, payload1[1]);
153 q6core_lcl.bus_bw_resp_received = 1;
154 wake_up(&q6core_lcl.bus_bw_req_wait);
156 case AVCS_CMD_SHARED_MEM_MAP_REGIONS:
157 pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_MAP_REGIONS status[0x%x]\n",
158 __func__, payload1[1]);
159 q6core_lcl.bus_bw_resp_received = 1;
160 wake_up(&q6core_lcl.bus_bw_req_wait);
162 case AVCS_CMD_REGISTER_TOPOLOGIES:
163 pr_debug("%s: Cmd = AVCS_CMD_REGISTER_TOPOLOGIES status[0x%x]\n",
164 __func__, payload1[1]);
165 /* -ADSP status to match Linux error standard */
166 q6core_lcl.adsp_status = -payload1[1];
167 q6core_lcl.bus_bw_resp_received = 1;
168 wake_up(&q6core_lcl.bus_bw_req_wait);
170 case AVCS_CMD_DEREGISTER_TOPOLOGIES:
171 pr_debug("%s: Cmd = AVCS_CMD_DEREGISTER_TOPOLOGIES status[0x%x]\n",
172 __func__, payload1[1]);
173 q6core_lcl.bus_bw_resp_received = 1;
174 wake_up(&q6core_lcl.bus_bw_req_wait);
176 case AVCS_CMD_ADD_POOL_PAGES:
177 pr_debug("%s: Cmd = AVCS_CMD_ADD_POOL_PAGES status[0x%x]\n",
178 __func__, payload1[1]);
179 q6core_lcl.bus_bw_resp_received = 1;
180 wake_up(&q6core_lcl.bus_bw_req_wait);
182 case AVCS_CMD_REMOVE_POOL_PAGES:
183 pr_debug("%s: Cmd = AVCS_CMD_REMOVE_POOL_PAGES status[0x%x]\n",
184 __func__, payload1[1]);
185 q6core_lcl.bus_bw_resp_received = 1;
186 wake_up(&q6core_lcl.bus_bw_req_wait);
188 case AVCS_CMD_GET_FWK_VERSION:
189 pr_debug("%s: Cmd = AVCS_CMD_GET_FWK_VERSION status[%s]\n",
190 __func__, adsp_err_get_err_str(payload1[1]));
191 /* ADSP status to match Linux error standard */
192 q6core_lcl.adsp_status = -payload1[1];
193 if (payload1[1] == ADSP_EUNSUPPORTED)
194 q6core_lcl.q6core_avcs_ver_info.status =
195 VER_QUERY_UNSUPPORTED;
196 q6core_lcl.avcs_fwk_ver_resp_received = 1;
197 wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
200 pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
202 payload1[0], payload1[1], data->opcode);
209 pr_debug("%s: Reset event received in Core service\n",
211 apr_reset(q6core_lcl.core_handle_q);
212 q6core_lcl.core_handle_q = NULL;
215 case AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS:
216 payload1 = data->payload;
217 pr_debug("%s: AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS handle %d\n",
218 __func__, payload1[0]);
219 q6core_lcl.mem_map_cal_handle = payload1[0];
220 q6core_lcl.bus_bw_resp_received = 1;
221 wake_up(&q6core_lcl.bus_bw_req_wait);
223 case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
224 payload1 = data->payload;
225 q6core_lcl.param = payload1[0];
226 pr_debug("%s: Received ADSP get state response 0x%x\n",
227 __func__, q6core_lcl.param);
228 /* ensure .param is updated prior to .bus_bw_resp_received */
230 q6core_lcl.bus_bw_resp_received = 1;
231 wake_up(&q6core_lcl.bus_bw_req_wait);
233 case AVCS_CMDRSP_GET_LICENSE_VALIDATION_RESULT:
234 payload1 = data->payload;
235 pr_debug("%s: cmd = LICENSE_VALIDATION_RESULT, result = 0x%x\n",
236 __func__, payload1[0]);
237 q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result
239 q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
240 wake_up(&q6core_lcl.cmd_req_wait);
242 case AVCS_CMDRSP_GET_FWK_VERSION:
243 pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
245 payload1 = data->payload;
246 ret = parse_fwk_version_info(payload1);
248 q6core_lcl.adsp_status = ret;
249 pr_err("%s: Failed to parse payload:%d\n",
252 q6core_lcl.q6core_avcs_ver_info.status =
255 q6core_lcl.avcs_fwk_ver_resp_received = 1;
256 wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
259 pr_err("%s: Message id from adsp core svc: 0x%x\n",
260 __func__, data->opcode);
261 if (generic_get_data) {
262 generic_get_data->valid = 1;
263 generic_get_data->size_in_ints =
264 data->payload_size/sizeof(int);
265 pr_debug("callback size = %i\n",
267 memcpy(generic_get_data->ints, data->payload,
269 q6core_lcl.bus_bw_resp_received = 1;
270 wake_up(&q6core_lcl.bus_bw_req_wait);
279 void ocm_core_open(void)
281 if (q6core_lcl.core_handle_q == NULL)
282 q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
283 aprv2_core_fn_q, 0xFFFFFFFF, NULL);
284 pr_debug("%s: Open_q %pK\n", __func__, q6core_lcl.core_handle_q);
285 if (q6core_lcl.core_handle_q == NULL)
286 pr_err("%s: Unable to register CORE\n", __func__);
289 struct cal_block_data *cal_utils_get_cal_block_by_key(
290 struct cal_type_data *cal_type, uint32_t key)
292 struct list_head *ptr, *next;
293 struct cal_block_data *cal_block = NULL;
294 struct audio_cal_info_metainfo *metainfo;
296 list_for_each_safe(ptr, next,
297 &cal_type->cal_blocks) {
299 cal_block = list_entry(ptr,
300 struct cal_block_data, list);
301 metainfo = (struct audio_cal_info_metainfo *)
303 if (metainfo->nKey != key) {
304 pr_debug("%s: metainfo key mismatch!!! found:%x, needed:%x\n",
305 __func__, metainfo->nKey, key);
307 pr_debug("%s: metainfo key match found", __func__);
314 static int q6core_send_get_avcs_fwk_ver_cmd(void)
316 struct apr_hdr avcs_ver_cmd;
319 avcs_ver_cmd.hdr_field =
320 APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
322 avcs_ver_cmd.pkt_size = sizeof(struct apr_hdr);
323 avcs_ver_cmd.src_port = 0;
324 avcs_ver_cmd.dest_port = 0;
325 avcs_ver_cmd.token = 0;
326 avcs_ver_cmd.opcode = AVCS_CMD_GET_FWK_VERSION;
328 q6core_lcl.adsp_status = 0;
329 q6core_lcl.avcs_fwk_ver_resp_received = 0;
331 ret = apr_send_pkt(q6core_lcl.core_handle_q,
332 (uint32_t *) &avcs_ver_cmd);
334 pr_err("%s: failed to send apr packet, ret=%d\n", __func__,
339 ret = wait_event_timeout(q6core_lcl.avcs_fwk_ver_req_wait,
340 (q6core_lcl.avcs_fwk_ver_resp_received == 1),
341 msecs_to_jiffies(TIMEOUT_MS));
343 pr_err("%s: wait_event timeout for AVCS fwk version info\n",
349 if (q6core_lcl.adsp_status < 0) {
351 * adsp_err_get_err_str expects a positive value but we store
352 * the DSP error as negative to match the Linux error standard.
353 * Pass in the negated value so adsp_err_get_err_str returns
354 * the correct string.
356 pr_err("%s: DSP returned error[%s]\n", __func__,
357 adsp_err_get_err_str(-q6core_lcl.adsp_status));
358 ret = adsp_err_get_lnx_err_code(q6core_lcl.adsp_status);
368 int q6core_get_service_version(uint32_t service_id,
369 struct avcs_fwk_ver_info *ver_info,
372 struct avcs_fwk_ver_info *cached_ver_info = NULL;
374 uint32_t num_services;
378 if (ver_info == NULL) {
379 pr_err("%s: ver_info is NULL\n", __func__);
383 ret = q6core_get_fwk_version_size(service_id);
385 pr_err("%s: Failed to get service size for service id %d with error %d\n",
386 __func__, service_id, ret);
391 if (ver_size != size) {
392 pr_err("%s: Expected size %zu and provided size %zu do not match\n",
393 __func__, ver_size, size);
397 cached_ver_info = q6core_lcl.q6core_avcs_ver_info.ver_info;
398 num_services = cached_ver_info->avcs_fwk_version.num_services;
400 if (service_id == AVCS_SERVICE_ID_ALL) {
401 memcpy(ver_info, cached_ver_info, ver_size);
405 ver_info->avcs_fwk_version = cached_ver_info->avcs_fwk_version;
406 for (i = 0; i < num_services; i++) {
407 if (cached_ver_info->services[i].service_id == service_id) {
408 ver_info->services[0] = cached_ver_info->services[i];
412 pr_err("%s: No service matching service ID %d\n", __func__, service_id);
415 EXPORT_SYMBOL(q6core_get_service_version);
417 size_t q6core_get_fwk_version_size(uint32_t service_id)
420 uint32_t num_services;
422 mutex_lock(&(q6core_lcl.ver_lock));
423 pr_debug("%s: q6core_avcs_ver_info.status(%d)\n", __func__,
424 q6core_lcl.q6core_avcs_ver_info.status);
426 switch (q6core_lcl.q6core_avcs_ver_info.status) {
427 case VER_QUERY_SUPPORTED:
428 pr_debug("%s: AVCS FWK version query already attempted\n",
431 case VER_QUERY_UNSUPPORTED:
434 case VER_QUERY_UNATTEMPTED:
435 pr_debug("%s: Attempting AVCS FWK version query\n", __func__);
436 if (q6core_is_adsp_ready()) {
437 ret = q6core_send_get_avcs_fwk_ver_cmd();
439 pr_err("%s: ADSP is not ready to query version\n",
445 pr_err("%s: Invalid version query status %d\n", __func__,
446 q6core_lcl.q6core_avcs_ver_info.status);
450 mutex_unlock(&(q6core_lcl.ver_lock));
455 if (q6core_lcl.q6core_avcs_ver_info.ver_info != NULL) {
456 num_services = q6core_lcl.q6core_avcs_ver_info.ver_info
457 ->avcs_fwk_version.num_services;
459 pr_err("%s: ver_info is NULL\n", __func__);
464 ret = sizeof(struct avcs_get_fwk_version);
465 if (service_id == AVCS_SERVICE_ID_ALL)
466 ret += num_services * sizeof(struct avs_svc_api_info);
468 ret += sizeof(struct avs_svc_api_info);
472 EXPORT_SYMBOL(q6core_get_fwk_version_size);
474 int32_t core_set_license(uint32_t key, uint32_t module_id)
476 struct avcs_cmd_set_license *cmd_setl = NULL;
477 struct cal_block_data *cal_block = NULL;
478 int rc = 0, packet_size = 0;
480 pr_debug("%s: key:0x%x, id:0x%x\n", __func__, key, module_id);
482 mutex_lock(&(q6core_lcl.cmd_lock));
483 if (q6core_lcl.cal_data[META_CAL] == NULL) {
484 pr_err("%s: cal_data not initialized yet!!\n", __func__);
489 mutex_lock(&((q6core_lcl.cal_data[META_CAL])->lock));
490 cal_block = cal_utils_get_cal_block_by_key(
491 q6core_lcl.cal_data[META_CAL], key);
492 if (cal_block == NULL ||
493 cal_block->cal_data.kvaddr == NULL ||
494 cal_block->cal_data.size <= 0) {
495 pr_err("%s: Invalid cal block to send", __func__);
497 goto cal_data_unlock;
500 packet_size = sizeof(struct avcs_cmd_set_license) +
501 cal_block->cal_data.size;
502 /*round up total packet_size to next 4 byte boundary*/
503 packet_size = ((packet_size + 0x3)>>2)<<2;
505 cmd_setl = kzalloc(packet_size, GFP_KERNEL);
506 if (cmd_setl == NULL) {
507 pr_err("%s: kzalloc for cmd_set_license failed for size %d\n",
508 __func__, packet_size);
510 goto cal_data_unlock;
514 if (q6core_lcl.core_handle_q == NULL) {
515 pr_err("%s: apr registration for CORE failed\n", __func__);
520 cmd_setl->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
521 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
522 cmd_setl->hdr.pkt_size = packet_size;
523 cmd_setl->hdr.src_port = 0;
524 cmd_setl->hdr.dest_port = 0;
525 cmd_setl->hdr.token = 0;
526 cmd_setl->hdr.opcode = AVCS_CMD_SET_LICENSE;
527 cmd_setl->id = module_id;
528 cmd_setl->overwrite = 1;
529 cmd_setl->size = cal_block->cal_data.size;
530 memcpy((uint8_t *)cmd_setl + sizeof(struct avcs_cmd_set_license),
531 cal_block->cal_data.kvaddr,
532 cal_block->cal_data.size);
533 pr_info("%s: Set license opcode=0x%x, id =0x%x, size = %d\n",
534 __func__, cmd_setl->hdr.opcode,
535 cmd_setl->id, cmd_setl->size);
536 rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)cmd_setl);
538 pr_err("%s: SET_LICENSE failed op[0x%x]rc[%d]\n",
539 __func__, cmd_setl->hdr.opcode, rc);
544 mutex_unlock(&((q6core_lcl.cal_data[META_CAL])->lock));
546 mutex_unlock(&(q6core_lcl.cmd_lock));
551 int32_t core_get_license_status(uint32_t module_id)
553 struct avcs_cmd_get_license_validation_result get_lvr_cmd;
556 pr_debug("%s: module_id 0x%x", __func__, module_id);
558 mutex_lock(&(q6core_lcl.cmd_lock));
560 if (q6core_lcl.core_handle_q == NULL) {
561 pr_err("%s: apr registration for CORE failed\n", __func__);
566 get_lvr_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
567 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
568 get_lvr_cmd.hdr.pkt_size =
569 sizeof(struct avcs_cmd_get_license_validation_result);
571 get_lvr_cmd.hdr.src_port = 0;
572 get_lvr_cmd.hdr.dest_port = 0;
573 get_lvr_cmd.hdr.token = 0;
574 get_lvr_cmd.hdr.opcode = AVCS_CMD_GET_LICENSE_VALIDATION_RESULT;
575 get_lvr_cmd.id = module_id;
578 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &get_lvr_cmd);
580 pr_err("%s: license_validation request failed, err %d\n",
586 q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
587 mutex_unlock(&(q6core_lcl.cmd_lock));
588 ret = wait_event_timeout(q6core_lcl.cmd_req_wait,
589 (q6core_lcl.cmd_resp_received_flag ==
590 FLAG_CMDRSP_LICENSE_RESULT),
591 msecs_to_jiffies(TIMEOUT_MS));
592 mutex_lock(&(q6core_lcl.cmd_lock));
594 pr_err("%s: wait_event timeout for CMDRSP_LICENSE_RESULT\n",
599 q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
600 ret = q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result;
603 mutex_unlock(&(q6core_lcl.cmd_lock));
604 pr_info("%s: cmdrsp_license_result.result = 0x%x for module 0x%x\n",
605 __func__, ret, module_id);
609 uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
611 struct adsp_dolby_manufacturer_id payload;
613 pr_debug("%s: manufacturer_id :%d\n", __func__, manufacturer_id);
614 mutex_lock(&(q6core_lcl.cmd_lock));
616 if (q6core_lcl.core_handle_q) {
617 payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
618 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
619 payload.hdr.pkt_size =
620 sizeof(struct adsp_dolby_manufacturer_id);
621 payload.hdr.src_port = 0;
622 payload.hdr.dest_port = 0;
623 payload.hdr.token = 0;
624 payload.hdr.opcode = ADSP_CMD_SET_DOLBY_MANUFACTURER_ID;
625 payload.manufacturer_id = manufacturer_id;
626 pr_debug("%s: Send Dolby security opcode=0x%x manufacturer ID = %d\n",
628 payload.hdr.opcode, payload.manufacturer_id);
629 rc = apr_send_pkt(q6core_lcl.core_handle_q,
630 (uint32_t *)&payload);
632 pr_err("%s: SET_DOLBY_MANUFACTURER_ID failed op[0x%x]rc[%d]\n",
633 __func__, payload.hdr.opcode, rc);
635 mutex_unlock(&(q6core_lcl.cmd_lock));
639 bool q6core_is_adsp_ready(void)
645 pr_debug("%s: enter\n", __func__);
646 memset(&hdr, 0, sizeof(hdr));
647 hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
648 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
649 hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0);
650 hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
652 mutex_lock(&(q6core_lcl.cmd_lock));
654 if (q6core_lcl.core_handle_q) {
655 q6core_lcl.bus_bw_resp_received = 0;
656 rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr);
658 pr_err("%s: Get ADSP state APR packet send event %d\n",
663 rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
664 (q6core_lcl.bus_bw_resp_received == 1),
665 msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
666 if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
667 /* ensure to read updated param by callback thread */
669 ret = !!q6core_lcl.param;
673 pr_debug("%s: leave, rc %d, adsp ready %d\n", __func__, rc, ret);
674 mutex_unlock(&(q6core_lcl.cmd_lock));
679 static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
680 uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
682 struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
683 struct avs_shared_map_region_payload *mregions = NULL;
684 void *mmap_region_cmd = NULL;
685 void *payload = NULL;
690 cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
691 + sizeof(struct avs_shared_map_region_payload)
694 mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
695 if (mmap_region_cmd == NULL)
698 mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
699 mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
700 APR_HDR_LEN(APR_HDR_SIZE),
702 mmap_regions->hdr.pkt_size = cmd_size;
703 mmap_regions->hdr.src_port = 0;
704 mmap_regions->hdr.dest_port = 0;
705 mmap_regions->hdr.token = 0;
706 mmap_regions->hdr.opcode = AVCS_CMD_SHARED_MEM_MAP_REGIONS;
707 mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
708 mmap_regions->num_regions = bufcnt & 0x00ff;
709 mmap_regions->property_flag = 0x00;
711 payload = ((u8 *) mmap_region_cmd +
712 sizeof(struct avs_cmd_shared_mem_map_regions));
713 mregions = (struct avs_shared_map_region_payload *)payload;
715 for (i = 0; i < bufcnt; i++) {
716 mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
717 mregions->shm_addr_msw =
718 msm_audio_populate_upper_32_bits(buf_add[i]);
719 mregions->mem_size_bytes = bufsz[i];
723 pr_debug("%s: sending memory map, addr %pK, size %d, bufcnt = %d\n",
724 __func__, buf_add, bufsz[0], mmap_regions->num_regions);
727 q6core_lcl.bus_bw_resp_received = 0;
728 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
731 pr_err("%s: mmap regions failed %d\n",
737 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
738 (q6core_lcl.bus_bw_resp_received == 1),
739 msecs_to_jiffies(TIMEOUT_MS));
741 pr_err("%s: timeout. waited for memory map\n", __func__);
746 *map_handle = q6core_lcl.mem_map_cal_handle;
748 kfree(mmap_region_cmd);
752 static int q6core_memory_unmap_regions(uint32_t mem_map_handle)
754 struct avs_cmd_shared_mem_unmap_regions unmap_regions;
757 memset(&unmap_regions, 0, sizeof(unmap_regions));
758 unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
759 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
760 unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
761 unmap_regions.hdr.src_svc = APR_SVC_ADSP_CORE;
762 unmap_regions.hdr.src_domain = APR_DOMAIN_APPS;
763 unmap_regions.hdr.src_port = 0;
764 unmap_regions.hdr.dest_svc = APR_SVC_ADSP_CORE;
765 unmap_regions.hdr.dest_domain = APR_DOMAIN_ADSP;
766 unmap_regions.hdr.dest_port = 0;
767 unmap_regions.hdr.token = 0;
768 unmap_regions.hdr.opcode = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS;
769 unmap_regions.mem_map_handle = mem_map_handle;
771 q6core_lcl.bus_bw_resp_received = 0;
773 pr_debug("%s: unmap regions map handle %d\n",
774 __func__, mem_map_handle);
776 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
779 pr_err("%s: unmap regions failed %d\n",
785 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
786 (q6core_lcl.bus_bw_resp_received == 1),
787 msecs_to_jiffies(TIMEOUT_MS));
789 pr_err("%s: timeout. waited for memory_unmap\n",
798 int q6core_add_remove_pool_pages(ion_phys_addr_t buf_add, uint32_t bufsz,
799 uint32_t mempool_id, bool add_pages)
801 struct avs_mem_assign_region mem_pool;
805 mem_pool.hdr.opcode = AVCS_CMD_ADD_POOL_PAGES;
807 mem_pool.hdr.opcode = AVCS_CMD_REMOVE_POOL_PAGES;
809 /* get payload length */
810 sz = sizeof(struct avs_mem_assign_region);
811 mem_pool.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
812 APR_HDR_LEN(sizeof(struct apr_hdr)),
814 mem_pool.hdr.src_port = 0;
815 mem_pool.hdr.dest_port = 0;
816 mem_pool.hdr.token = 0;
817 mem_pool.hdr.pkt_size = sz;
818 mem_pool.pool_id = mempool_id;
819 mem_pool.size = bufsz;
820 mem_pool.addr_lsw = lower_32_bits(buf_add);
821 mem_pool.addr_msw = msm_audio_populate_upper_32_bits(buf_add);
822 pr_debug("%s: sending memory map, size %d\n",
825 q6core_lcl.bus_bw_resp_received = 0;
826 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&mem_pool);
828 pr_err("%s: library map region failed %d\n",
834 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
835 (q6core_lcl.bus_bw_resp_received == 1),
836 msecs_to_jiffies(TIMEOUT_MS));
838 pr_err("%s: timeout. waited for library memory map\n",
848 static int q6core_dereg_all_custom_topologies(void)
851 struct avcs_cmd_deregister_topologies dereg_top;
853 memset(&dereg_top, 0, sizeof(dereg_top));
854 dereg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
855 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
856 dereg_top.hdr.pkt_size = sizeof(dereg_top);
857 dereg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
858 dereg_top.hdr.src_domain = APR_DOMAIN_APPS;
859 dereg_top.hdr.src_port = 0;
860 dereg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
861 dereg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
862 dereg_top.hdr.dest_port = 0;
863 dereg_top.hdr.token = 0;
864 dereg_top.hdr.opcode = AVCS_CMD_DEREGISTER_TOPOLOGIES;
865 dereg_top.payload_addr_lsw = 0;
866 dereg_top.payload_addr_msw = 0;
867 dereg_top.mem_map_handle = 0;
868 dereg_top.payload_size = 0;
869 dereg_top.mode = AVCS_MODE_DEREGISTER_ALL_CUSTOM_TOPOLOGIES;
871 q6core_lcl.bus_bw_resp_received = 0;
873 pr_debug("%s: Deregister topologies mode %d\n",
874 __func__, dereg_top.mode);
876 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &dereg_top);
878 pr_err("%s: Deregister topologies failed %d\n",
883 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
884 (q6core_lcl.bus_bw_resp_received == 1),
885 msecs_to_jiffies(TIMEOUT_MS));
887 pr_err("%s: wait_event timeout for Deregister topologies\n",
895 static int q6core_send_custom_topologies(void)
899 struct cal_block_data *cal_block = NULL;
900 struct avcs_cmd_register_topologies reg_top;
902 if (!q6core_is_adsp_ready()) {
903 pr_err("%s: ADSP is not ready!\n", __func__);
907 memset(®_top, 0, sizeof(reg_top));
908 mutex_lock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
909 mutex_lock(&q6core_lcl.cmd_lock);
911 cal_block = cal_utils_get_only_cal_block(
912 q6core_lcl.cal_data[CUST_TOP_CAL]);
913 if (cal_block == NULL) {
914 pr_debug("%s: cal block is NULL!\n", __func__);
917 if (cal_block->cal_data.size <= 0) {
918 pr_debug("%s: cal size is %zd not sending\n",
919 __func__, cal_block->cal_data.size);
923 q6core_dereg_all_custom_topologies();
925 ret = q6core_map_memory_regions(&cal_block->cal_data.paddr, 0,
926 (uint32_t *)&cal_block->map_data.map_size, 1,
927 &cal_block->map_data.q6map_handle);
929 pr_err("%s: q6core_map_memory_regions failed\n", __func__);
933 reg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
934 APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
935 reg_top.hdr.pkt_size = sizeof(reg_top);
936 reg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
937 reg_top.hdr.src_domain = APR_DOMAIN_APPS;
938 reg_top.hdr.src_port = 0;
939 reg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
940 reg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
941 reg_top.hdr.dest_port = 0;
942 reg_top.hdr.token = 0;
943 reg_top.hdr.opcode = AVCS_CMD_REGISTER_TOPOLOGIES;
944 reg_top.payload_addr_lsw =
945 lower_32_bits(cal_block->cal_data.paddr);
946 reg_top.payload_addr_msw =
947 msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
948 reg_top.mem_map_handle = cal_block->map_data.q6map_handle;
949 reg_top.payload_size = cal_block->cal_data.size;
951 q6core_lcl.adsp_status = 0;
952 q6core_lcl.bus_bw_resp_received = 0;
954 pr_debug("%s: Register topologies addr %pK, size %zd, map handle %d\n",
955 __func__, &cal_block->cal_data.paddr, cal_block->cal_data.size,
956 cal_block->map_data.q6map_handle);
958 ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) ®_top);
960 pr_err("%s: Register topologies failed %d\n",
965 ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
966 (q6core_lcl.bus_bw_resp_received == 1),
967 msecs_to_jiffies(TIMEOUT_MS));
969 pr_err("%s: wait_event timeout for Register topologies\n",
974 if (q6core_lcl.adsp_status < 0)
975 ret = q6core_lcl.adsp_status;
977 ret2 = q6core_memory_unmap_regions(cal_block->map_data.q6map_handle);
979 pr_err("%s: q6core_memory_unmap_regions failed for map handle %d\n",
980 __func__, cal_block->map_data.q6map_handle);
986 mutex_unlock(&q6core_lcl.cmd_lock);
987 mutex_unlock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
992 static int get_cal_type_index(int32_t cal_type)
997 case AUDIO_CORE_METAINFO_CAL_TYPE:
1000 case CORE_CUSTOM_TOPOLOGIES_CAL_TYPE:
1004 pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
1009 static int q6core_alloc_cal(int32_t cal_type,
1010 size_t data_size, void *data)
1015 cal_index = get_cal_type_index(cal_type);
1016 if (cal_index < 0) {
1017 pr_err("%s: could not get cal index %d!\n",
1018 __func__, cal_index);
1024 ret = cal_utils_alloc_cal(data_size, data,
1025 q6core_lcl.cal_data[cal_index], 0, NULL);
1027 pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
1028 __func__, ret, cal_type);
1035 static int q6core_dealloc_cal(int32_t cal_type,
1036 size_t data_size, void *data)
1041 cal_index = get_cal_type_index(cal_type);
1042 if (cal_index < 0) {
1043 pr_err("%s: could not get cal index %d!\n",
1044 __func__, cal_index);
1050 ret = cal_utils_dealloc_cal(data_size, data,
1051 q6core_lcl.cal_data[cal_index]);
1053 pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
1054 __func__, ret, cal_type);
1061 static int q6core_set_cal(int32_t cal_type,
1062 size_t data_size, void *data)
1067 cal_index = get_cal_type_index(cal_type);
1068 if (cal_index < 0) {
1069 pr_err("%s: could not get cal index %d!\n",
1070 __func__, cal_index);
1076 ret = cal_utils_set_cal(data_size, data,
1077 q6core_lcl.cal_data[cal_index], 0, NULL);
1079 pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
1080 __func__, ret, cal_type);
1084 if (cal_index == CUST_TOP_CAL)
1085 ret = q6core_send_custom_topologies();
1090 static void q6core_delete_cal_data(void)
1092 pr_debug("%s:\n", __func__);
1094 cal_utils_destroy_cal_types(CORE_MAX_CAL, q6core_lcl.cal_data);
1099 static int q6core_init_cal_data(void)
1102 struct cal_type_info cal_type_info[] = {
1103 {{AUDIO_CORE_METAINFO_CAL_TYPE,
1104 {q6core_alloc_cal, q6core_dealloc_cal, NULL,
1105 q6core_set_cal, NULL, NULL} },
1106 {NULL, NULL, cal_utils_match_buf_num} },
1108 {{CORE_CUSTOM_TOPOLOGIES_CAL_TYPE,
1109 {q6core_alloc_cal, q6core_dealloc_cal, NULL,
1110 q6core_set_cal, NULL, NULL} },
1111 {NULL, NULL, cal_utils_match_buf_num} }
1113 pr_debug("%s:\n", __func__);
1115 ret = cal_utils_create_cal_types(CORE_MAX_CAL,
1116 q6core_lcl.cal_data, cal_type_info);
1118 pr_err("%s: could not create cal type!\n",
1125 q6core_delete_cal_data();
1129 static int __init core_init(void)
1131 memset(&q6core_lcl, 0, sizeof(struct q6core_str));
1132 init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
1133 init_waitqueue_head(&q6core_lcl.cmd_req_wait);
1134 init_waitqueue_head(&q6core_lcl.avcs_fwk_ver_req_wait);
1135 q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
1136 mutex_init(&q6core_lcl.cmd_lock);
1137 mutex_init(&q6core_lcl.ver_lock);
1139 q6core_init_cal_data();
1143 module_init(core_init);
1145 static void __exit core_exit(void)
1147 mutex_destroy(&q6core_lcl.cmd_lock);
1148 mutex_destroy(&q6core_lcl.ver_lock);
1149 q6core_delete_cal_data();
1151 module_exit(core_exit);
1152 MODULE_DESCRIPTION("ADSP core driver");
1153 MODULE_LICENSE("GPL v2");