From 3565c0340c40306ea973a264e9eaa27036ec9b49 Mon Sep 17 00:00:00 2001 From: Mohit Aggarwal Date: Fri, 17 Jul 2015 16:43:48 +0530 Subject: [PATCH] memshare: Port and add snapshot of changes from msm-3.10 Port the memshare driver and apply the following memshare driver changes taken from msm-3.10 kernel branch as of msm-3.10 commit 4493220f memshare: Boot time allocation and handling multiple clients 2ae4997a memshare: Donot re-allocate the memory for the clients 059dcd59 memshare: Do not overwrite the response for the failure case ed6d183f memshare: Change the compatible property field for child node b473fc4e2 memshare: Free the memory after XPU unlocking is done 95c114c39 memshare: Add query size api support for clients 60f310d4e memshare: Change dma attribute to DMA_ATTR_NO_KERNEL_MAPPING 73075545 memshare: Remove local connection status variable 98dd2908 memshare: Place error check to prevent out of bound access Change-Id: Iecf0a9828efd1d56c309a2af882c13ce36e7fc06 Signed-off-by: Katish Paran Signed-off-by: Mohit Aggarwal --- drivers/soc/qcom/Kconfig | 2 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/memshare/Kconfig | 9 + drivers/soc/qcom/memshare/Makefile | 1 + drivers/soc/qcom/memshare/heap_mem_ext_v01.c | 472 +++++++++++++++ drivers/soc/qcom/memshare/heap_mem_ext_v01.h | 356 +++++++++++ drivers/soc/qcom/memshare/msm_memshare.c | 847 +++++++++++++++++++++++++++ drivers/soc/qcom/memshare/msm_memshare.h | 57 ++ 8 files changed, 1745 insertions(+) create mode 100644 drivers/soc/qcom/memshare/Kconfig create mode 100644 drivers/soc/qcom/memshare/Makefile create mode 100644 drivers/soc/qcom/memshare/heap_mem_ext_v01.c create mode 100644 drivers/soc/qcom/memshare/heap_mem_ext_v01.h create mode 100644 drivers/soc/qcom/memshare/msm_memshare.c create mode 100644 drivers/soc/qcom/memshare/msm_memshare.h diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 64ceb0511bdb..613f0ade8fe8 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -390,3 +390,5 @@ config MSM_EVENT_TIMER need to be monitored by the PM. The enables the PM code to monitor events that require the core to be awake and ready to handle the event. + +source "drivers/soc/qcom/memshare/Kconfig" diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index a710a5d76421..bf67d9fa3621 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT) += ipc_router_hsic_xprt.o obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT) += ipc_router_mhi_xprt.o obj-$(CONFIG_MSM_IPC_ROUTER_GLINK_XPRT) += ipc_router_glink_xprt.o +obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) += memshare/ obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o obj-$(CONFIG_MSM_PIL) += peripheral-loader.o diff --git a/drivers/soc/qcom/memshare/Kconfig b/drivers/soc/qcom/memshare/Kconfig new file mode 100644 index 000000000000..7eb1415b350b --- /dev/null +++ b/drivers/soc/qcom/memshare/Kconfig @@ -0,0 +1,9 @@ +config MEM_SHARE_QMI_SERVICE + depends on MSM_QMI_INTERFACE + bool "Shared Heap for external processors" + help + Memory Share Kernel Qualcomm Messaging Interface Service + receives requests from Modem Processor Sub System + for heap alloc/free from Application Processor + Sub System and send a response back to client with + proper handle/address. diff --git a/drivers/soc/qcom/memshare/Makefile b/drivers/soc/qcom/memshare/Makefile new file mode 100644 index 000000000000..cf49fbcfdb21 --- /dev/null +++ b/drivers/soc/qcom/memshare/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) := heap_mem_ext_v01.o msm_memshare.o \ No newline at end of file diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.c b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c new file mode 100644 index 000000000000..717c4d80d999 --- /dev/null +++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c @@ -0,0 +1,472 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include "heap_mem_ext_v01.h" + +struct elem_info mem_alloc_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_alloc_req_msg_v01, + num_bytes), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_req_msg_v01, + block_alignment_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_req_msg_v01, + block_alignment), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_alloc_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + resp), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + handle_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + handle), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + num_bytes_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + num_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_req_msg_v01, + handle), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_resp_msg_v01, + resp), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dhms_mem_alloc_addr_info_type_v01, + phy_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dhms_mem_alloc_addr_info_type_v01, + num_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_alloc_generic_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + num_bytes), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + client_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + proc_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x04, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + sequence_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + alloc_contiguous_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + alloc_contiguous), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + block_alignment_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + block_alignment), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + sequence_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + sequence_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + dhms_mem_alloc_addr_info_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + dhms_mem_alloc_addr_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = MAX_ARR_CNT_V01, + .elem_size = sizeof(struct + dhms_mem_alloc_addr_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + dhms_mem_alloc_addr_info), + .ei_array = dhms_mem_alloc_addr_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_generic_req_msg_data_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + dhms_mem_alloc_addr_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = MAX_ARR_CNT_V01, + .elem_size = sizeof(struct + dhms_mem_alloc_addr_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + dhms_mem_alloc_addr_info), + .ei_array = dhms_mem_alloc_addr_info_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + client_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + proc_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + proc_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + mem_free_generic_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_query_size_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_query_size_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_req_msg_v01, + proc_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_req_msg_v01, + proc_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_query_size_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + mem_query_size_rsp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_rsp_msg_v01, + size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_rsp_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.h b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h new file mode 100644 index 000000000000..b64ad1004aa2 --- /dev/null +++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h @@ -0,0 +1,356 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HEAP_MEM_EXT_SERVICE_01_H +#define HEAP_MEM_EXT_SERVICE_01_H + +#include + +#define MEM_ALLOC_REQ_MAX_MSG_LEN_V01 255 +#define MEM_FREE_REQ_MAX_MSG_LEN_V01 255 +#define MAX_ARR_CNT_V01 64 + +struct dhms_mem_alloc_addr_info_type_v01 { + uint64_t phy_addr; + uint32_t num_bytes; +}; + +enum dhms_mem_proc_id_v01 { + /* To force a 32 bit signed enum. Do not change or use */ + DHMS_MEM_PROC_ID_MIN_ENUM_VAL_V01 = -2147483647, + /* Request from MPSS processor */ + DHMS_MEM_PROC_MPSS_V01 = 0, + /* Request from ADSP processor */ + DHMS_MEM_PROC_ADSP_V01 = 1, + /* Request from WCNSS processor */ + DHMS_MEM_PROC_WCNSS_V01 = 2, + /* To force a 32 bit signed enum. Do not change or use */ + DHMS_MEM_PROC_ID_MAX_ENUM_VAL_V01 = 2147483647 +}; + +enum dhms_mem_client_id_v01 { + /*To force a 32 bit signed enum. Do not change or use*/ + DHMS_MEM_CLIENT_ID_MIN_ENUM_VAL_V01 = -2147483647, + /* Request from GPS Client */ + DHMS_MEM_CLIENT_GPS_V01 = 0, + /* Invalid Client */ + DHMS_MEM_CLIENT_INVALID = 1000, + /* To force a 32 bit signed enum. Do not change or use */ + DHMS_MEM_CLIENT_ID_MAX_ENUM_VAL_V01 = 2147483647 +}; + +enum dhms_mem_block_align_enum_v01 { + /* To force a 32 bit signed enum. Do not change or use + */ + DHMS_MEM_BLOCK_ALIGN_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* Align allocated memory by 2 bytes */ + DHMS_MEM_BLOCK_ALIGN_2_V01 = 0, + /* Align allocated memory by 4 bytes */ + DHMS_MEM_BLOCK_ALIGN_4_V01 = 1, + /**< Align allocated memory by 8 bytes */ + DHMS_MEM_BLOCK_ALIGN_8_V01 = 2, + /**< Align allocated memory by 16 bytes */ + DHMS_MEM_BLOCK_ALIGN_16_V01 = 3, + /**< Align allocated memory by 32 bytes */ + DHMS_MEM_BLOCK_ALIGN_32_V01 = 4, + /**< Align allocated memory by 64 bytes */ + DHMS_MEM_BLOCK_ALIGN_64_V01 = 5, + /**< Align allocated memory by 128 bytes */ + DHMS_MEM_BLOCK_ALIGN_128_V01 = 6, + /**< Align allocated memory by 256 bytes */ + DHMS_MEM_BLOCK_ALIGN_256_V01 = 7, + /**< Align allocated memory by 512 bytes */ + DHMS_MEM_BLOCK_ALIGN_512_V01 = 8, + /**< Align allocated memory by 1024 bytes */ + DHMS_MEM_BLOCK_ALIGN_1K_V01 = 9, + /**< Align allocated memory by 2048 bytes */ + DHMS_MEM_BLOCK_ALIGN_2K_V01 = 10, + /**< Align allocated memory by 4096 bytes */ + DHMS_MEM_BLOCK_ALIGN_4K_V01 = 11, + DHMS_MEM_BLOCK_ALIGN_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use + */ +}; + +/* Request Message; This command is used for getting + * the multiple physically contiguous + * memory blocks from the server memory subsystem + */ +struct mem_alloc_req_msg_v01 { + + /* Mandatory */ + /*requested size*/ + uint32_t num_bytes; + + /* Optional */ + /* Must be set to true if block_alignment + * is being passed + */ + uint8_t block_alignment_valid; + /* The block alignment for the memory block to be allocated + */ + enum dhms_mem_block_align_enum_v01 block_alignment; +}; /* Message */ + +/* Response Message; This command is used for getting + * the multiple physically contiguous memory blocks + * from the server memory subsystem + */ +struct mem_alloc_resp_msg_v01 { + + /* Mandatory */ + /* Result Code */ + /* The result of the requested memory operation + */ + enum qmi_result_type_v01 resp; + /* Optional */ + /* Memory Block Handle + */ + /* Must be set to true if handle is being passed + */ + uint8_t handle_valid; + /* The physical address of the memory allocated on the HLOS + */ + uint64_t handle; + /* Optional */ + /* Memory block size */ + /* Must be set to true if num_bytes is being passed + */ + uint8_t num_bytes_valid; + /* The number of bytes actually allocated for the request. + * This value can be smaller than the size requested in + * QMI_DHMS_MEM_ALLOC_REQ_MSG. + */ + uint32_t num_bytes; +}; /* Message */ + +/* Request Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_req_msg_v01 { + + /* Mandatory */ + /* Physical address of memory to be freed + */ + uint32_t handle; +}; /* Message */ + +/* Response Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_resp_msg_v01 { + + /* Mandatory */ + /* Result of the requested memory operation, todo, + * need to check the async operation for free + */ + enum qmi_result_type_v01 resp; +}; /* Message */ + +/* Request Message; This command is used for getting + * the multiple physically contiguous + * memory blocks from the server memory subsystem + */ +struct mem_alloc_generic_req_msg_v01 { + + /* Mandatory */ + /*requested size*/ + uint32_t num_bytes; + + /* Mandatory */ + /* client id */ + enum dhms_mem_client_id_v01 client_id; + + /* Mandatory */ + /* Peripheral Id*/ + enum dhms_mem_proc_id_v01 proc_id; + + /* Mandatory */ + /* Sequence id */ + uint32_t sequence_id; + + /* Optional */ + /* alloc_contiguous */ + /* Must be set to true if alloc_contiguous is being passed */ + uint8_t alloc_contiguous_valid; + + /* Alloc_contiguous is used to identify that clients are requesting + * for contiguous or non contiguous memory, default is contiguous + * 0 = non contiguous else contiguous + */ + uint8_t alloc_contiguous; + + /* Optional */ + /* Must be set to true if block_alignment + * is being passed + */ + uint8_t block_alignment_valid; + + /* The block alignment for the memory block to be allocated + */ + enum dhms_mem_block_align_enum_v01 block_alignment; + +}; /* Message */ + +/* Response Message; This command is used for getting + * the multiple physically contiguous memory blocks + * from the server memory subsystem + */ +struct mem_alloc_generic_resp_msg_v01 { + + /* Mandatory */ + /* Result Code */ + /* The result of the requested memory operation + */ + struct qmi_response_type_v01 resp; + + /* Optional */ + /* Sequence ID */ + /* Must be set to true if sequence_id is being passed */ + uint8_t sequence_id_valid; + + + /* Mandatory */ + /* Sequence id */ + uint32_t sequence_id; + + /* Optional */ + /* Memory Block Handle + */ + /* Must be set to true if handle is being passed + */ + uint8_t dhms_mem_alloc_addr_info_valid; + + /* Optional */ + /* Handle Size */ + uint32_t dhms_mem_alloc_addr_info_len; + + /* Optional */ + /* The physical address of the memory allocated on the HLOS + */ + struct dhms_mem_alloc_addr_info_type_v01 + dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01]; + +}; /* Message */ + +/* Request Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_generic_req_msg_v01 { + + /* Mandatory */ + /* Must be set to # of elments in array*/ + uint32_t dhms_mem_alloc_addr_info_len; + + /* Mandatory */ + /* Physical address and size of the memory allocated + * on the HLOS to be freed. + */ + struct dhms_mem_alloc_addr_info_type_v01 + dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01]; + + /* Optional */ + /* Client ID */ + /* Must be set to true if client_id is being passed */ + uint8_t client_id_valid; + + /* Optional */ + /* Client Id */ + enum dhms_mem_client_id_v01 client_id; + + /* Optional */ + /* Proc ID */ + /* Must be set to true if proc_id is being passed */ + uint8_t proc_id_valid; + + /* Optional */ + /* Peripheral */ + enum dhms_mem_proc_id_v01 proc_id; + +}; /* Message */ + +/* Response Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_generic_resp_msg_v01 { + + /* + * Mandatory + * Result of the requested memory operation, todo, + * need to check the async operation for free + */ + struct qmi_response_type_v01 resp; + +}; /* Message */ + +struct mem_query_size_req_msg_v01 { + + /* Mandatory */ + enum dhms_mem_client_id_v01 client_id; + + /* + * Optional + * Proc ID + * proc_id_valid must be set to true if proc_id is being passed + */ + uint8_t proc_id_valid; + + enum dhms_mem_proc_id_v01 proc_id; +}; /* Message */ + +struct mem_query_size_rsp_msg_v01 { + + /* + * Mandatory + * Result Code + */ + struct qmi_response_type_v01 resp; + + /* + * Optional + * size_valid must be set to true if size is being passed + */ + uint8_t size_valid; + + uint32_t size; +}; /* Message */ + + +extern struct elem_info mem_alloc_req_msg_data_v01_ei[]; +extern struct elem_info mem_alloc_resp_msg_data_v01_ei[]; +extern struct elem_info mem_free_req_msg_data_v01_ei[]; +extern struct elem_info mem_free_resp_msg_data_v01_ei[]; +extern struct elem_info mem_alloc_generic_req_msg_data_v01_ei[]; +extern struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[]; +extern struct elem_info mem_free_generic_req_msg_data_v01_ei[]; +extern struct elem_info mem_free_generic_resp_msg_data_v01_ei[]; +extern struct elem_info mem_query_size_req_msg_data_v01_ei[]; +extern struct elem_info mem_query_size_resp_msg_data_v01_ei[]; + +/*Service Message Definition*/ +#define MEM_ALLOC_REQ_MSG_V01 0x0020 +#define MEM_ALLOC_RESP_MSG_V01 0x0020 +#define MEM_FREE_REQ_MSG_V01 0x0021 +#define MEM_FREE_RESP_MSG_V01 0x0021 +#define MEM_ALLOC_GENERIC_REQ_MSG_V01 0x0022 +#define MEM_ALLOC_GENERIC_RESP_MSG_V01 0x0022 +#define MEM_FREE_GENERIC_REQ_MSG_V01 0x0023 +#define MEM_FREE_GENERIC_RESP_MSG_V01 0x0023 +#define MEM_QUERY_SIZE_REQ_MSG_V01 0x0024 +#define MEM_QUERY_SIZE_RESP_MSG_V01 0x0024 + +#endif diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c new file mode 100644 index 000000000000..50f3aeb0b4f0 --- /dev/null +++ b/drivers/soc/qcom/memshare/msm_memshare.c @@ -0,0 +1,847 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msm_memshare.h" +#include "heap_mem_ext_v01.h" + +/* Macros */ +#define MEMSHARE_DEV_NAME "memshare" +#define MEMSHARE_CHILD_DEV_NAME "memshare_child" +static DEFINE_DMA_ATTRS(attrs); + +static struct qmi_handle *mem_share_svc_handle; +static void mem_share_svc_recv_msg(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg); +static struct workqueue_struct *mem_share_svc_workqueue; + +/* Memshare Driver Structure */ +struct memshare_driver { + struct device *dev; + struct mutex mem_share; + struct mutex mem_free; + struct work_struct memshare_init_work; +}; + +struct memshare_child { + struct device *dev; +}; + +static struct memshare_driver *memsh_drv; +static struct memshare_child *memsh_child; +static struct mem_blocks memblock[MAX_CLIENTS]; +static uint32_t num_clients; +static struct msg_desc mem_share_svc_alloc_req_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_REQ_MSG_V01, + .ei_array = mem_alloc_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_alloc_resp_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_RESP_MSG_V01, + .ei_array = mem_alloc_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_req_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_REQ_MSG_V01, + .ei_array = mem_free_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_resp_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_RESP_MSG_V01, + .ei_array = mem_free_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_alloc_generic_req_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01, + .ei_array = mem_alloc_generic_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_alloc_generic_resp_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_GENERIC_RESP_MSG_V01, + .ei_array = mem_alloc_generic_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_generic_req_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_GENERIC_REQ_MSG_V01, + .ei_array = mem_free_generic_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_generic_resp_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_GENERIC_RESP_MSG_V01, + .ei_array = mem_free_generic_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_size_query_req_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_QUERY_SIZE_REQ_MSG_V01, + .ei_array = mem_query_size_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_size_query_resp_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_QUERY_SIZE_RESP_MSG_V01, + .ei_array = mem_query_size_resp_msg_data_v01_ei, +}; + +static int check_client(int client_id, int proc, int request) +{ + int i = 0; + int found = DHMS_MEM_CLIENT_INVALID; + + for (i = 0; i < MAX_CLIENTS; i++) { + if (memblock[i].client_id == client_id && + memblock[i].peripheral == proc) { + found = i; + break; + } + } + if ((found == DHMS_MEM_CLIENT_INVALID) && !request) { + pr_debug("No registered client, adding a new client\n"); + /* Add a new client */ + for (i = 0; i < MAX_CLIENTS; i++) { + if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) { + memblock[i].client_id = client_id; + memblock[i].alloted = 0; + memblock[i].guarantee = 0; + memblock[i].peripheral = proc; + found = i; + break; + } + } + } + + return found; +} + +void free_client(int id) +{ + + memblock[id].size = 0; + memblock[id].phy_addr = 0; + memblock[id].virtual_addr = 0; + memblock[id].alloted = 0; + memblock[id].client_id = DHMS_MEM_CLIENT_INVALID; + memblock[id].guarantee = 0; + memblock[id].peripheral = -1; + memblock[id].sequence_id = -1; + memblock[id].memory_type = MEMORY_CMA; + +} + +void free_mem_clients(int proc) +{ + int i; + + pr_debug("memshare: freeing clients\n"); + + for (i = 0; i < MAX_CLIENTS; i++) { + if (memblock[i].peripheral == proc && + !memblock[i].guarantee && memblock[i].alloted) { + pr_debug("Freeing memory for client id: %d\n", + memblock[i].client_id); + dma_free_attrs(memsh_drv->dev, memblock[i].size, + memblock[i].virtual_addr, memblock[i].phy_addr, + &attrs); + free_client(i); + } + } +} + +void fill_alloc_response(struct mem_alloc_generic_resp_msg_v01 *resp, + int id, int *flag) +{ + resp->sequence_id_valid = 1; + resp->sequence_id = memblock[id].sequence_id; + resp->dhms_mem_alloc_addr_info_valid = 1; + resp->dhms_mem_alloc_addr_info_len = 1; + resp->dhms_mem_alloc_addr_info[0].phy_addr = memblock[id].phy_addr; + resp->dhms_mem_alloc_addr_info[0].num_bytes = memblock[id].size; + if (!*flag) { + resp->resp.result = QMI_RESULT_SUCCESS_V01; + resp->resp.error = QMI_ERR_NONE_V01; + } else { + resp->resp.result = QMI_RESULT_FAILURE_V01; + resp->resp.error = QMI_ERR_NO_MEMORY_V01; + } + +} + +void initialize_client(void) +{ + int i; + + for (i = 0; i < MAX_CLIENTS; i++) { + memblock[i].alloted = 0; + memblock[i].size = 0; + memblock[i].guarantee = 0; + memblock[i].phy_addr = 0; + memblock[i].virtual_addr = 0; + memblock[i].client_id = DHMS_MEM_CLIENT_INVALID; + memblock[i].peripheral = -1; + memblock[i].sequence_id = -1; + memblock[i].memory_type = MEMORY_CMA; + } + dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); +} + +static int modem_notifier_cb(struct notifier_block *this, unsigned long code, + void *_cmd) +{ + pr_debug("memshare: Modem notification\n"); + + switch (code) { + + case SUBSYS_AFTER_POWERUP: + pr_err("memshare: Modem Restart has happened\n"); + free_mem_clients(DHMS_MEM_PROC_MPSS_V01); + break; + + default: + pr_debug("Memshare: code: %lu\n", code); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block nb = { + .notifier_call = modem_notifier_cb, +}; + +static int handle_alloc_req(void *req_h, void *req, void *conn_h) +{ + struct mem_alloc_req_msg_v01 *alloc_req; + struct mem_alloc_resp_msg_v01 alloc_resp; + int rc = 0; + + alloc_req = (struct mem_alloc_req_msg_v01 *)req; + pr_debug("%s: Received Alloc Request\n", __func__); + pr_debug("%s: req->num_bytes = %d\n", __func__, alloc_req->num_bytes); + mutex_lock(&memsh_drv->mem_share); + if (!memblock[GPS].size) { + memset(&alloc_resp, 0, sizeof(struct mem_alloc_resp_msg_v01)); + alloc_resp.resp = QMI_RESULT_FAILURE_V01; + rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes, + &memblock[GPS]); + } + alloc_resp.num_bytes_valid = 1; + alloc_resp.num_bytes = alloc_req->num_bytes; + alloc_resp.handle_valid = 1; + alloc_resp.handle = memblock[GPS].phy_addr; + if (rc) { + alloc_resp.resp = QMI_RESULT_FAILURE_V01; + memblock[GPS].size = 0; + } else { + alloc_resp.resp = QMI_RESULT_SUCCESS_V01; + } + + mutex_unlock(&memsh_drv->mem_share); + + pr_debug("alloc_resp.num_bytes :%d, alloc_resp.handle :%lx, alloc_resp.mem_req_result :%lx\n", + alloc_resp.num_bytes, + (unsigned long int)alloc_resp.handle, + (unsigned long int)alloc_resp.resp); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_alloc_resp_desc, &alloc_resp, + sizeof(alloc_resp)); + if (rc < 0) + pr_err("In %s, Error sending the alloc request: %d\n", + __func__, rc); + + return rc; +} + +static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) +{ + struct mem_alloc_generic_req_msg_v01 *alloc_req; + struct mem_alloc_generic_resp_msg_v01 *alloc_resp; + int rc, resp = 0; + int client_id; + + alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req; + pr_debug("alloc request client id: %d proc _id: %d\n", + alloc_req->client_id, alloc_req->proc_id); + mutex_lock(&memsh_drv->mem_share); + alloc_resp = kzalloc(sizeof(struct mem_alloc_generic_resp_msg_v01), + GFP_KERNEL); + if (!alloc_resp) { + mutex_unlock(&memsh_drv->mem_share); + return -ENOMEM; + } + alloc_resp->resp.result = QMI_RESULT_FAILURE_V01; + alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01; + client_id = check_client(alloc_req->client_id, alloc_req->proc_id, + CHECK); + + if (client_id >= MAX_CLIENTS) { + pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n", + __func__, alloc_req->client_id, + alloc_req->proc_id); + return -EINVAL; + } + + if (!memblock[client_id].alloted) { + rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes, + &memblock[client_id]); + if (rc) { + pr_err("In %s,Unable to allocate memory for requested client\n", + __func__); + resp = 1; + } + if (!resp) { + memblock[client_id].alloted = 1; + memblock[client_id].size = alloc_req->num_bytes; + memblock[client_id].peripheral = alloc_req->proc_id; + } + } + memblock[client_id].sequence_id = alloc_req->sequence_id; + + fill_alloc_response(alloc_resp, client_id, &resp); + + mutex_unlock(&memsh_drv->mem_share); + pr_debug("alloc_resp.num_bytes :%d, alloc_resp.handle :%lx, alloc_resp.mem_req_result :%lx\n", + alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes, + (unsigned long int) + alloc_resp->dhms_mem_alloc_addr_info[0].phy_addr, + (unsigned long int)alloc_resp->resp.result); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_alloc_generic_resp_desc, alloc_resp, + sizeof(alloc_resp)); + + if (rc < 0) + pr_err("In %s, Error sending the alloc request: %d\n", + __func__, rc); + + return rc; +} + +static int handle_free_req(void *req_h, void *req, void *conn_h) +{ + struct mem_free_req_msg_v01 *free_req; + struct mem_free_resp_msg_v01 free_resp; + int rc; + + mutex_lock(&memsh_drv->mem_free); + if (!memblock[GPS].guarantee) { + free_req = (struct mem_free_req_msg_v01 *)req; + pr_debug("%s: Received Free Request\n", __func__); + memset(&free_resp, 0, sizeof(struct mem_free_resp_msg_v01)); + pr_debug("In %s: pblk->virtual_addr :%lx, pblk->phy_addr: %lx\n,size: %d", + __func__, + (unsigned long int)memblock[GPS].virtual_addr, + (unsigned long int)free_req->handle, + memblock[GPS].size); + dma_free_coherent(memsh_drv->dev, memblock[GPS].size, + memblock[GPS].virtual_addr, + free_req->handle); + } + free_resp.resp = QMI_RESULT_SUCCESS_V01; + mutex_unlock(&memsh_drv->mem_free); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_free_resp_desc, &free_resp, + sizeof(free_resp)); + if (rc < 0) + pr_err("In %s, Error sending the free request: %d\n", + __func__, rc); + + return rc; +} + +static int handle_free_generic_req(void *req_h, void *req, void *conn_h) +{ + struct mem_free_generic_req_msg_v01 *free_req; + struct mem_free_generic_resp_msg_v01 free_resp; + int rc; + int flag = 0; + uint32_t client_id; + + free_req = (struct mem_free_generic_req_msg_v01 *)req; + pr_debug("%s: Received Free Request\n", __func__); + mutex_lock(&memsh_drv->mem_free); + memset(&free_resp, 0, sizeof(struct mem_free_generic_resp_msg_v01)); + free_resp.resp.error = QMI_ERR_INTERNAL_V01; + free_resp.resp.result = QMI_RESULT_FAILURE_V01; + pr_debug("Client id: %d proc id: %d\n", free_req->client_id, + free_req->proc_id); + client_id = check_client(free_req->client_id, free_req->proc_id, FREE); + if (client_id == DHMS_MEM_CLIENT_INVALID) { + pr_err("In %s, Invalid client request to free memory\n", + __func__); + flag = 1; + } else if (!memblock[client_id].guarantee && + memblock[client_id].alloted) { + pr_debug("In %s: pblk->virtual_addr :%lx, pblk->phy_addr: %lx\n,size: %d", + __func__, + (unsigned long int) + memblock[client_id].virtual_addr, + (unsigned long int)memblock[client_id].phy_addr, + memblock[client_id].size); + dma_free_attrs(memsh_drv->dev, memblock[client_id].size, + memblock[client_id].virtual_addr, + memblock[client_id].phy_addr, + &attrs); + free_client(client_id); + } else { + pr_err("In %s, Request came for a guaranteed client cannot free up the memory\n", + __func__); + } + + if (flag) { + free_resp.resp.result = QMI_RESULT_FAILURE_V01; + free_resp.resp.error = QMI_ERR_INVALID_ID_V01; + } else { + free_resp.resp.result = QMI_RESULT_SUCCESS_V01; + free_resp.resp.error = QMI_ERR_NONE_V01; + } + + mutex_unlock(&memsh_drv->mem_free); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_free_generic_resp_desc, &free_resp, + sizeof(free_resp)); + + if (rc < 0) + pr_err("In %s, Error sending the free request: %d\n", + __func__, rc); + + return rc; +} + +static int handle_query_size_req(void *req_h, void *req, void *conn_h) +{ + int rc, client_id; + struct mem_query_size_req_msg_v01 *query_req; + struct mem_query_size_rsp_msg_v01 *query_resp; + + query_req = (struct mem_query_size_req_msg_v01 *)req; + mutex_lock(&memsh_drv->mem_share); + query_resp = kzalloc(sizeof(struct mem_query_size_rsp_msg_v01), + GFP_KERNEL); + if (!query_resp) { + mutex_unlock(&memsh_drv->mem_share); + return -ENOMEM; + } + pr_debug("query request client id: %d proc _id: %d\n", + query_req->client_id, query_req->proc_id); + client_id = check_client(query_req->client_id, query_req->proc_id, + CHECK); + + if (client_id >= MAX_CLIENTS) { + pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n", + __func__, query_req->client_id, + query_req->proc_id); + return -EINVAL; + } + + if (memblock[client_id].size) { + query_resp->size_valid = 1; + query_resp->size = memblock[client_id].size; + } else { + query_resp->size_valid = 1; + query_resp->size = 0; + } + query_resp->resp.result = QMI_RESULT_SUCCESS_V01; + query_resp->resp.error = QMI_ERR_NONE_V01; + mutex_unlock(&memsh_drv->mem_share); + + pr_debug("query_resp.size :%d, alloc_resp.mem_req_result :%lx\n", + query_resp->size, + (unsigned long int)query_resp->resp.result); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_size_query_resp_desc, query_resp, + sizeof(query_resp)); + + if (rc < 0) + pr_err("In %s, Error sending the query request: %d\n", + __func__, rc); + + return rc; +} + +static int mem_share_svc_connect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (mem_share_svc_handle != handle || !conn_h) + return -EINVAL; + + return 0; +} + +static int mem_share_svc_disconnect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (mem_share_svc_handle != handle || !conn_h) + return -EINVAL; + + return 0; +} + +static int mem_share_svc_req_desc_cb(unsigned int msg_id, + struct msg_desc **req_desc) +{ + int rc; + + pr_debug("memshare: In %s\n", __func__); + switch (msg_id) { + case MEM_ALLOC_REQ_MSG_V01: + *req_desc = &mem_share_svc_alloc_req_desc; + rc = sizeof(struct mem_alloc_req_msg_v01); + break; + + case MEM_FREE_REQ_MSG_V01: + *req_desc = &mem_share_svc_free_req_desc; + rc = sizeof(struct mem_free_req_msg_v01); + break; + + case MEM_ALLOC_GENERIC_REQ_MSG_V01: + *req_desc = &mem_share_svc_alloc_generic_req_desc; + rc = sizeof(struct mem_alloc_generic_req_msg_v01); + break; + + case MEM_FREE_GENERIC_REQ_MSG_V01: + *req_desc = &mem_share_svc_free_generic_req_desc; + rc = sizeof(struct mem_free_generic_req_msg_v01); + break; + + case MEM_QUERY_SIZE_REQ_MSG_V01: + *req_desc = &mem_share_svc_size_query_req_desc; + rc = sizeof(struct mem_query_size_req_msg_v01); + break; + + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static int mem_share_svc_req_cb(struct qmi_handle *handle, void *conn_h, + void *req_h, unsigned int msg_id, void *req) +{ + int rc; + + pr_debug("memshare: In %s\n", __func__); + if (mem_share_svc_handle != handle || !conn_h) + return -EINVAL; + + switch (msg_id) { + case MEM_ALLOC_REQ_MSG_V01: + rc = handle_alloc_req(req_h, req, conn_h); + break; + + case MEM_FREE_REQ_MSG_V01: + rc = handle_free_req(req_h, req, conn_h); + break; + + case MEM_ALLOC_GENERIC_REQ_MSG_V01: + rc = handle_alloc_generic_req(req_h, req, conn_h); + break; + + case MEM_FREE_GENERIC_REQ_MSG_V01: + rc = handle_free_generic_req(req_h, req, conn_h); + break; + + case MEM_QUERY_SIZE_REQ_MSG_V01: + rc = handle_query_size_req(req_h, req, conn_h); + break; + + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static void mem_share_svc_recv_msg(struct work_struct *work) +{ + int rc; + + pr_debug("memshare: In %s\n", __func__); + do { + pr_debug("%s: Notified about a Receive Event", __func__); + } while ((rc = qmi_recv_msg(mem_share_svc_handle)) == 0); + + if (rc != -ENOMSG) + pr_err("%s: Error receiving message\n", __func__); +} + +static void qmi_mem_share_svc_ntfy(struct qmi_handle *handle, + enum qmi_event_type event, void *priv) +{ + pr_debug("memshare: In %s\n", __func__); + switch (event) { + case QMI_RECV_MSG: + queue_delayed_work(mem_share_svc_workqueue, + &work_recv_msg, 0); + break; + default: + break; + } +} + +static struct qmi_svc_ops_options mem_share_svc_ops_options = { + .version = 1, + .service_id = MEM_SHARE_SERVICE_SVC_ID, + .service_vers = MEM_SHARE_SERVICE_VERS, + .service_ins = MEM_SHARE_SERVICE_INS_ID, + .connect_cb = mem_share_svc_connect_cb, + .disconnect_cb = mem_share_svc_disconnect_cb, + .req_desc_cb = mem_share_svc_req_desc_cb, + .req_cb = mem_share_svc_req_cb, +}; + +int memshare_alloc(struct device *dev, + unsigned int block_size, + struct mem_blocks *pblk) +{ + + int ret; + + pr_debug("%s: memshare_alloc called", __func__); + if (!pblk) { + pr_err("%s: Failed to alloc\n", __func__); + return -ENOMEM; + } + + pblk->virtual_addr = dma_alloc_attrs(dev, block_size, + &pblk->phy_addr, GFP_KERNEL, + &attrs); + if (pblk->virtual_addr == NULL) { + pr_err("allocation failed, %d\n", block_size); + ret = -ENOMEM; + return ret; + } + pr_debug("pblk->phy_addr :%lx, pblk->virtual_addr %lx\n", + (unsigned long int)pblk->phy_addr, + (unsigned long int)pblk->virtual_addr); + return 0; +} + +static void memshare_init_worker(struct work_struct *work) +{ + int rc; + + mem_share_svc_workqueue = + create_singlethread_workqueue("mem_share_svc"); + if (!mem_share_svc_workqueue) + return; + + mem_share_svc_handle = qmi_handle_create(qmi_mem_share_svc_ntfy, NULL); + if (!mem_share_svc_handle) { + pr_err("%s: Creating mem_share_svc qmi handle failed\n", + __func__); + destroy_workqueue(mem_share_svc_workqueue); + return; + } + rc = qmi_svc_register(mem_share_svc_handle, &mem_share_svc_ops_options); + if (rc < 0) { + pr_err("%s: Registering mem share svc failed %d\n", + __func__, rc); + qmi_handle_destroy(mem_share_svc_handle); + destroy_workqueue(mem_share_svc_workqueue); + return; + } + pr_debug("memshare: memshare_init successful\n"); +} + +static int memshare_child_probe(struct platform_device *pdev) +{ + int rc; + uint32_t size, client_id; + const char *name; + struct memshare_child *drv; + + drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child), + GFP_KERNEL); + + if (!drv) { + pr_err("Unable to allocate memory to driver\n"); + return -ENOMEM; + } + + drv->dev = &pdev->dev; + memsh_child = drv; + platform_set_drvdata(pdev, memsh_child); + + rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size", + &size); + if (rc) { + pr_err("In %s, Error reading size of clients, rc: %d\n", + __func__, rc); + return rc; + } + + rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id", + &client_id); + if (rc) { + pr_err("In %s, Error reading client id, rc: %d\n", + __func__, rc); + return rc; + } + + memblock[num_clients].guarantee = of_property_read_bool( + pdev->dev.of_node, + "qcom,allocate-boot-time"); + + rc = of_property_read_string(pdev->dev.of_node, "label", + &name); + if (rc) { + pr_err("In %s, Error reading peripheral info for client, rc: %d\n", + __func__, rc); + return rc; + } + + if (strcmp(name, "modem") == 0) + memblock[num_clients].peripheral = DHMS_MEM_PROC_MPSS_V01; + else if (strcmp(name, "adsp") == 0) + memblock[num_clients].peripheral = DHMS_MEM_PROC_ADSP_V01; + else if (strcmp(name, "wcnss") == 0) + memblock[num_clients].peripheral = DHMS_MEM_PROC_WCNSS_V01; + + memblock[num_clients].size = size; + memblock[num_clients].client_id = client_id; + + if (memblock[num_clients].guarantee) { + rc = memshare_alloc(memsh_child->dev, + memblock[num_clients].size, + &memblock[num_clients]); + if (rc) { + pr_err("In %s, Unable to allocate memory for guaranteed clients, rc: %d\n", + __func__, rc); + return rc; + } + memblock[num_clients].alloted = 1; + } + + num_clients++; + + return 0; +} + +static int memshare_probe(struct platform_device *pdev) +{ + int rc; + struct memshare_driver *drv; + + drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver), + GFP_KERNEL); + + if (!drv) { + pr_err("Unable to allocate memory to driver\n"); + return -ENOMEM; + } + + /* Memory allocation has been done successfully */ + mutex_init(&drv->mem_free); + mutex_init(&drv->mem_share); + + INIT_WORK(&drv->memshare_init_work, memshare_init_worker); + schedule_work(&drv->memshare_init_work); + + drv->dev = &pdev->dev; + memsh_drv = drv; + platform_set_drvdata(pdev, memsh_drv); + initialize_client(); + num_clients = 0; + + rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + + if (rc) { + pr_err("In %s, error populating the devices\n", __func__); + return rc; + } + + subsys_notif_register_notifier("modem", &nb); + pr_info("In %s, Memshare probe success\n", __func__); + + return 0; +} + +static int memshare_remove(struct platform_device *pdev) +{ + if (!memsh_drv) + return 0; + + qmi_svc_unregister(mem_share_svc_handle); + flush_workqueue(mem_share_svc_workqueue); + qmi_handle_destroy(mem_share_svc_handle); + destroy_workqueue(mem_share_svc_workqueue); + + return 0; +} + +static int memshare_child_remove(struct platform_device *pdev) +{ + if (!memsh_child) + return 0; + + return 0; +} + +static struct of_device_id memshare_match_table[] = { + { + .compatible = "qcom,memshare", + }, + {} +}; + +static struct of_device_id memshare_match_table1[] = { + { + .compatible = "qcom,memshare-peripheral", + }, + {} +}; + + +static struct platform_driver memshare_pdriver = { + .probe = memshare_probe, + .remove = memshare_remove, + .driver = { + .name = MEMSHARE_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = memshare_match_table, + }, +}; + +static struct platform_driver memshare_pchild = { + .probe = memshare_child_probe, + .remove = memshare_child_remove, + .driver = { + .name = MEMSHARE_CHILD_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = memshare_match_table1, + }, +}; + +module_platform_driver(memshare_pdriver); +module_platform_driver(memshare_pchild); + +MODULE_DESCRIPTION("Mem Share QMI Service Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h new file mode 100644 index 000000000000..6ea44ac8d296 --- /dev/null +++ b/drivers/soc/qcom/memshare/msm_memshare.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_MEM_SHARE_H +#define _LINUX_MEM_SHARE_H + +#define MEM_SHARE_SERVICE_SVC_ID 0x00000034 +#define MEM_SHARE_SERVICE_INS_ID 1 +#define MEM_SHARE_SERVICE_VERS 1 + +#define MEMORY_CMA 1 +#define MEMORY_NON_CMA 0 +#define MAX_CLIENTS 10 +#define GPS 0 +#define CHECK 0 +#define FREE 1 + +struct mem_blocks { + /* Client Id information */ + uint32_t client_id; + /* Peripheral associated with client */ + uint32_t peripheral; + /* Sequence Id */ + uint32_t sequence_id; + /* CMA or Non-CMA region */ + uint32_t memory_type; + /* Guaranteed Memory */ + uint32_t guarantee; + /* Memory alloted or not */ + uint32_t alloted; + /* Size required for client */ + uint32_t size; + /* start address of the memory block reserved by server memory + * subsystem to client + */ + phys_addr_t phy_addr; + /* Virtual address for the physical address allocated + */ + void *virtual_addr; +}; + +int memshare_alloc(struct device *dev, + unsigned int block_size, + struct mem_blocks *pblk); +void memshare_free(unsigned int block_size, + struct mem_blocks *pblk); +#endif /* _LINUX_MEM_SHARE_H */ -- 2.11.0