#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_api.h"
#define DRV_NAME "ipa"
}
EXPORT_SYMBOL(ipa_recycle_wan_skb);
+/**
+ * ipa_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp,
+ notify, priv, hdr_len, outp);
+
+ return ret;
+}
+
+/**
+ * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul,
+ ipa_ep_idx_dl);
+
+ return ret;
+}
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
*/
#include <linux/ipa_mhi.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_common_i.h"
#ifndef _IPA_API_H_
void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+ int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *);
+
+ int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl);
};
#ifdef CONFIG_IPA
-obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o
-obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o
\ No newline at end of file
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
--- /dev/null
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_uc_offload.h>
+#include <linux/msm_ipa.h>
+#include "../ipa_common_i.h"
+
+#define IPA_NTN_DMA_POOL_ALIGNMENT 8
+#define OFFLOAD_DRV_NAME "ipa_uc_offload"
+#define IPA_UC_OFFLOAD_DBG(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_LOW(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_ERR(fmt, args...) \
+ do { \
+ pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_INFO(fmt, args...) \
+ do { \
+ pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+enum ipa_uc_offload_state {
+ IPA_UC_OFFLOAD_STATE_INVALID,
+ IPA_UC_OFFLOAD_STATE_INITIALIZED,
+ IPA_UC_OFFLOAD_STATE_UP,
+ IPA_UC_OFFLOAD_STATE_DOWN,
+};
+
+struct ipa_uc_offload_ctx {
+ enum ipa_uc_offload_proto proto;
+ enum ipa_uc_offload_state state;
+ void *priv;
+ u8 hdr_len;
+ u32 partial_hdr_hdl[IPA_IP_MAX];
+ char netdev_name[IPA_RESOURCE_NAME_MAX];
+ ipa_notify_cb notify;
+ struct completion ntn_completion;
+};
+
+static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
+
+static int ipa_commit_partial_hdr(
+ struct ipa_ioc_add_hdr *hdr,
+ const char *netdev_name,
+ struct ipa_hdr_info *hdr_info)
+{
+ int i;
+
+ if (hdr == NULL || hdr_info == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdrs = 2;
+
+ snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+ "%s_ipv4", netdev_name);
+ snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+ "%s_ipv6", netdev_name);
+ for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+ hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+ memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+ hdr->hdr[i].type = hdr_info[i].hdr_type;
+ hdr->hdr[i].is_partial = 1;
+ hdr->hdr[i].is_eth2_ofst_valid = 1;
+ hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+ }
+
+ if (ipa_add_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ipa_uc_offload_ntn_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_tx_intf tx;
+ struct ipa_rx_intf rx;
+ struct ipa_ioc_tx_intf_prop tx_prop[2];
+ struct ipa_ioc_rx_intf_prop rx_prop[2];
+ u32 len;
+ int ret = 0;
+
+ IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
+ inp->netdev_name);
+
+ memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
+ ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
+ ntn_ctx->notify = inp->notify;
+ ntn_ctx->priv = inp->priv;
+
+ /* add partial header */
+ len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
+ IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ /* populate tx prop */
+ tx.num_props = 2;
+ tx.prop = tx_prop;
+
+ memset(tx_prop, 0, sizeof(tx_prop));
+ tx_prop[0].ip = IPA_IP_v4;
+ tx_prop[0].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+ sizeof(tx_prop[0].hdr_name));
+
+ tx_prop[1].ip = IPA_IP_v6;
+ tx_prop[1].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+ sizeof(tx_prop[1].hdr_name));
+
+ /* populate rx prop */
+ rx.num_props = 2;
+ rx.prop = rx_prop;
+
+ memset(rx_prop, 0, sizeof(rx_prop));
+ rx_prop[0].ip = IPA_IP_v4;
+ rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[0].attrib.meta_data = inp->meta_data;
+ rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ rx_prop[1].ip = IPA_IP_v6;
+ rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[1].attrib.meta_data = inp->meta_data;
+ rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
+ IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
+ memset(ntn_ctx, 0, sizeof(*ntn_ctx));
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+ init_completion(&ntn_ctx->ntn_completion);
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+
+fail:
+ kfree(hdr);
+ return ret;
+}
+
+int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp)
+{
+ struct ipa_uc_offload_ctx *ctx;
+ int ret = 0;
+
+ if (inp == NULL || outp == NULL) {
+ IPA_UC_OFFLOAD_ERR("invalid params in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->proto <= IPA_UC_INVALID ||
+ inp->proto >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
+ return -EINVAL;
+ }
+
+ if (!ipa_uc_offload_ctx[inp->proto]) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
+ return -EFAULT;
+ }
+ ipa_uc_offload_ctx[inp->proto] = ctx;
+ ctx->proto = inp->proto;
+ } else
+ ctx = ipa_uc_offload_ctx[inp->proto];
+
+ if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
+ IPA_UC_OFFLOAD_ERR("Already Initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctx->proto == IPA_UC_NTN) {
+ ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
+ if (!ret)
+ outp->clnt_hndl = IPA_UC_NTN;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
+
+static int ipa_uc_ntn_cons_release(void)
+{
+ return 0;
+}
+
+static int ipa_uc_ntn_cons_request(void)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *ntn_ctx;
+
+ ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
+ if (!ntn_ctx) {
+ IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
+ ret = -EFAULT;
+ } else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
+ if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
+ offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
+ IPA_UC_OFFLOAD_ERR("Invalid user data\n");
+ return;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
+
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ complete_all(&offload_ctx->ntn_completion);
+ break;
+
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
+ break;
+ }
+}
+
+int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
+ struct ipa_ntn_conn_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_rm_create_params param;
+ int result = 0;
+
+ if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
+ return -EINVAL;
+ }
+ if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
+ return -EINVAL;
+ }
+
+ memset(¶m, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ param.reg_params.user_data = ntn_ctx;
+ param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
+ param.floor_voltage = IPA_VOLTAGE_SVS;
+ result = ipa_rm_create_resource(¶m);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ memset(¶m, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ param.request_resource = ipa_uc_ntn_cons_request;
+ param.release_resource = ipa_uc_ntn_cons_release;
+ result = ipa_rm_create_resource(¶m);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+ goto fail_create_rm_cons;
+ }
+
+ if (ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to add rm dependency\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+ ntn_ctx->priv, ntn_ctx->hdr_len, outp)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+ result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ if (result == -EINPROGRESS) {
+ if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
+ 10*HZ) == 0) {
+ IPA_UC_OFFLOAD_ERR("ODU PROD resource req time out\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ } else if (result != 0) {
+ IPA_UC_OFFLOAD_ERR("fail to request resource\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_create_rm_cons:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+
+ return result;
+}
+
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
+ struct ipa_uc_offload_conn_out_params *outp)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ if (!(inp && outp)) {
+ IPA_UC_OFFLOAD_ERR("bad parm. in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->clnt_hndl <= IPA_UC_INVALID ||
+ inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
+ inp->clnt_hndl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+ IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
+ return -EPERM;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
+ offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
+
+int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+ struct ipa_rm_perf_profile rm_profile;
+ enum ipa_rm_resource_name resource_name;
+
+ if (profile == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ rm_profile.max_supported_bandwidth_mbps =
+ profile->max_supported_bw_mbps;
+
+ if (profile->client == IPA_CLIENT_ODU_PROD) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ } else if (profile->client == IPA_CLIENT_ODU_TETH_CONS) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ } else {
+ IPA_UC_OFFLOAD_ERR("not supported\n");
+ return -EINVAL;
+ }
+
+ if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_perf_profile);
+
+static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+ if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete rm dependency\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+ return -EFAULT;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
+ if (ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl)) {
+ IPA_UC_OFFLOAD_ERR("fail to tear down uc offload pipes\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid state\n");
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
+
+static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int len, result = 0;
+ struct ipa_ioc_del_hdr *hdr;
+
+ len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdls = 2;
+ hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
+ hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
+
+ if (ipa_del_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+fail:
+ kfree(hdr);
+ return result;
+}
+
+int ipa_uc_offload_cleanup(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_cleanup(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ kfree(offload_ctx);
+ offload_ctx = NULL;
+ ipa_uc_offload_ctx[clnt_hdl] = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_cleanup);
#define _IPA_COMMON_I_H_
#include <linux/ipc_logging.h>
#include <linux/ipa.h>
+#include <linux/ipa_uc_offload.h>
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
void ipa_set_tag_process_before_gating(bool val);
bool ipa_has_open_aggr_frame(enum ipa_client_type client);
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
u8 *ipa_write_64(u64 w, u8 *dest);
u8 *ipa_write_32(u32 w, u8 *dest);
--- /dev/null
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_
+#define _IPA_UC_OFFLOAD_COMMON_I_H_
+
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */
obj-$(CONFIG_IPA) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
else
IPADBG(":wdi init ok\n");
+ result = ipa_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
ipa_ctx->q6_proxy_clk_vote_valid = true;
ipa_register_panic_hdlr();
static struct dentry *dfile_stats;
static struct dentry *dfile_wstats;
static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
static struct dentry *dfile_dbg_cnt;
static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct IpaHwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa2_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
.read = ipa_read_wdi,
};
+const struct file_operations ipa_ntn_ops = {
+ .read = ipa_read_ntn,
+};
+
const struct file_operations ipa_msg_ops = {
.read = ipa_read_msg,
};
goto fail;
}
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
&ipa_dbg_cnt_ops);
if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
#include <linux/platform_device.h>
#include <asm/dma-iommu.h>
#include <linux/iommu.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_hw_defs.h"
#include "ipa_ram_mmap.h"
#include "ipa_reg.h"
#include "ipa_qmi_service.h"
#include "../ipa_api.h"
#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
bool skip_ep_cfg;
bool keep_ipa_awake;
struct ipa_wlan_stats wstats;
- u32 wdi_state;
+ u32 uc_offload_state;
u32 rx_replenish_threshold;
bool disconnect_in_progress;
u32 qmi_request_sent;
struct ipa_controller;
/**
- * @brief Enum value determined based on the feature it
- * corresponds to
- * +----------------+----------------+
- * | 3 bits | 5 bits |
- * +----------------+----------------+
- * | HW_FEATURE | OPCODE |
- * +----------------+----------------+
- *
- */
-#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
-#define EXTRACT_UC_FEATURE(value) (value >> 5)
-
-#define IPA_HW_NUM_FEATURES 0x8
-
-/**
- * enum ipa_hw_features - Values that represent the features supported in IPA HW
- * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
- * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
- * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
-*/
-enum ipa_hw_features {
- IPA_HW_FEATURE_COMMON = 0x0,
- IPA_HW_FEATURE_MHI = 0x1,
- IPA_HW_FEATURE_WDI = 0x3,
- IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
-};
-
-/**
- * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
- * section in 128B shared memory located in offset zero of SW Partition in IPA
- * SRAM.
- * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
- * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
- * of parameters (immediate parameters) and point on structure in system memory
- * (in such case the address must be accessible for HW)
- * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
- * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
- * bits of parameters (immediate parameters) and point on structure in system
- * memory
- * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
- * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits of
- * parameters (immediate parameters) and point on structure in system memory
- * @firstErrorAddress : Contains the address of first error-source on SNOC
- * @hwState : State of HW. The state carries information regarding the error type.
- * @warningCounter : The warnings counter. The counter carries information regarding
- * non fatal errors in HW
- * @interfaceVersionCommon : The Common interface version as reported by HW
- *
- * The shared memory is used for communication between IPA HW and CPU.
- */
-struct IpaHwSharedMemCommonMapping_t {
- u8 cmdOp;
- u8 reserved_01;
- u16 reserved_03_02;
- u32 cmdParams;
- u8 responseOp;
- u8 reserved_09;
- u16 reserved_0B_0A;
- u32 responseParams;
- u8 eventOp;
- u8 reserved_11;
- u16 reserved_13_12;
- u32 eventParams;
- u32 reserved_1B_18;
- u32 firstErrorAddress;
- u8 hwState;
- u8 warningCounter;
- u16 reserved_23_22;
- u16 interfaceVersionCommon;
- u16 reserved_27_26;
-} __packed;
-
-/**
- * union IpaHwFeatureInfoData_t - parameters for stats/config blob
- *
- * @offset : Location of a feature within the EventInfoData
- * @size : Size of the feature
- */
-union IpaHwFeatureInfoData_t {
- struct IpaHwFeatureInfoParams_t {
- u32 offset:16;
- u32 size:16;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * struct IpaHwEventInfoData_t - Structure holding the parameters for
- * statistics and config info
- *
- * @baseAddrOffset : Base Address Offset of the statistics or config
- * structure from IPA_WRAPPER_BASE
- * @IpaHwFeatureInfoData_t : Location and size of each feature within
- * the statistics or config structure
- *
- * @note Information about each feature in the featureInfo[]
- * array is populated at predefined indices per the IPA_HW_FEATURES
- * enum definition
- */
-struct IpaHwEventInfoData_t {
- u32 baseAddrOffset;
- union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
-} __packed;
-
-/**
- * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
- * IPA_HW_2_CPU_EVENT_LOG_INFO Event
- *
- * @featureMask : Mask indicating the features enabled in HW.
- * Refer IPA_HW_FEATURE_MASK
- * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
- * Log Buffer structure
- * @statsInfo : Statistics related information
- * @configInfo : Configuration related information
- *
- * @note The offset location of this structure from IPA_WRAPPER_BASE
- * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
- * Event
- */
-struct IpaHwEventLogInfoData_t {
- u32 featureMask;
- u32 circBuffBaseAddrOffset;
- struct IpaHwEventInfoData_t statsInfo;
- struct IpaHwEventInfoData_t configInfo;
-
-} __packed;
-
-/**
* struct ipa_uc_hdlrs - IPA uC callback functions
* @ipa_uc_loaded_hdlr: Function handler when uC is loaded
* @ipa_uc_event_hdlr: Event handler function
struct ipa_uc_ctx uc_ctx;
struct ipa_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa_uc_ntn_ctx uc_ntn_ctx;
u32 wan_rx_ring_size;
bool skip_uc_pipe_reset;
bool smmu_present;
int ipa2_suspend_wdi_pipe(u32 clnt_hdl);
int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa2_get_smem_restr_bytes(void);
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
/*
* To retrieve doorbell physical address of
* wlan pipes
phys_addr_t paddr, size_t size, int prot);
int ipa2_rx_poll(u32 clnt_hdl, int budget);
void ipa2_recycle_wan_skb(struct sk_buff *skb);
+int ipa_ntn_init(void);
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats);
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *),
+ void *user_data);
#endif /* _IPA_I_H_ */
--- /dev/null
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa_uc_ntn_event_handler(
+ struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio)
+{
+ union IpaHwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa_uc_ntn_event_log_info_handler(
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct IpaHwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct IpaHwStatsNTNInfoData_t) >=
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+ ipa_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct IpaHwStatsNTNInfoData_t));
+ if (!ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+ int ret;
+
+ ret = ipa2_uc_state_check();
+ if (ret) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+ ipa_ctx->uc_ntn_ctx.priv = user_data;
+ }
+
+ return -EEXIST;
+}
+
+static void ipa_uc_ntn_loaded_handler(void)
+{
+ if (!ipa_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa_ctx->uc_ntn_ctx.uc_ready_cb) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb(
+ ipa_ctx->uc_ntn_ctx.priv);
+
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb =
+ NULL;
+ ipa_ctx->uc_ntn_ctx.priv = NULL;
+ }
+}
+
+int ipa_ntn_init(void)
+{
+ struct ipa_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa_uc_ntn_event_log_info_handler;
+ uc_ntn_cbs.ipa_uc_loaded_hdlr =
+ ipa_uc_ntn_loaded_handler;
+
+ ipa_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa2_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct IpaHwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa2_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated ul:%d dl:%d\n",
+ ep_ul->valid, ep_dl->valid);
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa2_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union IpaHwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_ul);
+ ipa_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
--- /dev/null
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa_hw_features - Values that represent the features supported in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_NTN = 0x4,
+ IPA_HW_FEATURE_OFFLOAD = 0x5,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on structure in
+ * system memory (in such case the address must be accessible
+ * for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold
+ * 32 bits of parameters (immediate parameters) and point
+ * on structure in system memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the error
+ * type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u8 responseOp;
+ u8 reserved_09;
+ u16 reserved_0B_0A;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_11;
+ u16 reserved_13_12;
+ u32 eventParams;
+ u32 reserved_1B_18;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union IpaHwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union IpaHwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @IpaHwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct IpaHwEventInfoData_t {
+ u32 baseAddrOffset;
+ union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct IpaHwEventInfoData_t statsInfo;
+ struct IpaHwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct IpaHwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct IpaHwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct IpaHwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct IpaHwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union IpaHwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct IpaHwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (IPA_HW_NTN_ERRORS)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union IpaHwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTNRxInfoData_t - NTN Structure holding the
+ * Rx pipe information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTNRxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct IpaHwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct IpaHwStatsNTNInfoData_t {
+ struct NTNRxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is initialized
+ * but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use
+ * in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct IpaHwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union IpaHwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
- ep->wdi_state |= IPA_WDI_CONNECTED;
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
return 0;
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state |= IPA_WDI_ENABLED;
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_ENABLED;
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
else
IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
- ep->wdi_state |= IPA_WDI_RESUMED;
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
IPADBG("client (ep: %d) resumed\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
IPA_WDI_RESUMED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
ipa_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_RESUMED;
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa_ctx->ep[clnt_hdl];
- if (!(ep->wdi_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
+ api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
api_ctrl->ipa_inc_client_enable_clks_no_block =
api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
api_ctrl->ipa_rx_poll = ipa2_rx_poll;
api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa2_tear_down_uc_offload_pipes;
return 0;
}
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
else
IPADBG(":wdi init ok\n");
+ result = ipa3_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
ipa3_register_panic_hdlr();
ipa3_ctx->q6_proxy_clk_vote_valid = true;
static struct dentry *dfile_stats;
static struct dentry *dfile_wstats;
static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
static struct dentry *dfile_dbg_cnt;
static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct Ipa3HwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa3_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
.read = ipa3_read_wdi,
};
+const struct file_operations ipa3_ntn_ops = {
+ .read = ipa3_read_ntn,
+};
+
const struct file_operations ipa3_msg_ops = {
.read = ipa3_read_msg,
};
goto fail;
}
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa3_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
&ipa3_dbg_cnt_ops);
if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
bool skip_ep_cfg;
bool keep_ipa_awake;
struct ipa3_wlan_stats wstats;
- u32 wdi_state;
+ u32 uc_offload_state;
bool disconnect_in_progress;
u32 qmi_request_sent;
bool napi_enabled;
struct ipa3_controller;
/**
- * @brief Enum value determined based on the feature it
- * corresponds to
- * +----------------+----------------+
- * | 3 bits | 5 bits |
- * +----------------+----------------+
- * | HW_FEATURE | OPCODE |
- * +----------------+----------------+
- *
- */
-#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
-#define EXTRACT_UC_FEATURE(value) (value >> 5)
-
-#define IPA_HW_NUM_FEATURES 0x8
-
-/**
- * enum ipa3_hw_features - Values that represent the features supported in IPA HW
- * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
- * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
- * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
- * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
- * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
-*/
-enum ipa3_hw_features {
- IPA_HW_FEATURE_COMMON = 0x0,
- IPA_HW_FEATURE_MHI = 0x1,
- IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
- IPA_HW_FEATURE_WDI = 0x3,
- IPA_HW_FEATURE_ZIP = 0x4,
- IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
-};
-
-/**
- * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
- * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
- * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
- * device
- * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
- */
-enum ipa3_hw_2_cpu_events {
- IPA_HW_2_CPU_EVENT_NO_OP =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
- IPA_HW_2_CPU_EVENT_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_2_CPU_EVENT_LOG_INFO =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
-};
-
-/**
- * enum ipa3_hw_errors - Common error types.
- * @IPA_HW_ERROR_NONE : No error persists
- * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
- * @IPA_HW_DMA_ERROR : Unexpected DMA error
- * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
- * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
- * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
- * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
- */
-enum ipa3_hw_errors {
- IPA_HW_ERROR_NONE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
- IPA_HW_INVALID_DOORBELL_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_DMA_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
- IPA_HW_FATAL_SYSTEM_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
- IPA_HW_INVALID_OPCODE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
- IPA_HW_INVALID_PARAMS =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
- IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
- IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
- IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
-};
-
-/**
- * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
- * section in 128B shared memory located in offset zero of SW Partition in IPA
- * SRAM.
- * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
- * @cmdParams : CPU->HW command parameter lower 32bit.
- * @cmdParams_hi : CPU->HW command parameter higher 32bit.
- * of parameters (immediate parameters) and point on structure in system memory
- * (in such case the address must be accessible for HW)
- * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
- * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
- * bits of parameters (immediate parameters) and point on structure in system
- * memory
- * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
- * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits of
- * parameters (immediate parameters) and point on structure in system memory
- * @firstErrorAddress : Contains the address of first error-source on SNOC
- * @hwState : State of HW. The state carries information regarding the error type.
- * @warningCounter : The warnings counter. The counter carries information regarding
- * non fatal errors in HW
- * @interfaceVersionCommon : The Common interface version as reported by HW
- *
- * The shared memory is used for communication between IPA HW and CPU.
- */
-struct IpaHwSharedMemCommonMapping_t {
- u8 cmdOp;
- u8 reserved_01;
- u16 reserved_03_02;
- u32 cmdParams;
- u32 cmdParams_hi;
- u8 responseOp;
- u8 reserved_0D;
- u16 reserved_0F_0E;
- u32 responseParams;
- u8 eventOp;
- u8 reserved_15;
- u16 reserved_17_16;
- u32 eventParams;
- u32 firstErrorAddress;
- u8 hwState;
- u8 warningCounter;
- u16 reserved_23_22;
- u16 interfaceVersionCommon;
- u16 reserved_27_26;
-} __packed;
-
-/**
- * union IpaHwFeatureInfoData_t - parameters for stats/config blob
- *
- * @offset : Location of a feature within the EventInfoData
- * @size : Size of the feature
- */
-union IpaHwFeatureInfoData_t {
- struct IpaHwFeatureInfoParams_t {
- u32 offset:16;
- u32 size:16;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * union IpaHwErrorEventData_t - HW->CPU Common Events
- * @errorType : Entered when a system error is detected by the HW. Type of
- * error is specified by IPA_HW_ERRORS
- * @reserved : Reserved
- */
-union IpaHwErrorEventData_t {
- struct IpaHwErrorEventParams_t {
- u32 errorType:8;
- u32 reserved:24;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * struct IpaHwEventInfoData_t - Structure holding the parameters for
- * statistics and config info
- *
- * @baseAddrOffset : Base Address Offset of the statistics or config
- * structure from IPA_WRAPPER_BASE
- * @IpaHwFeatureInfoData_t : Location and size of each feature within
- * the statistics or config structure
- *
- * @note Information about each feature in the featureInfo[]
- * array is populated at predefined indices per the IPA_HW_FEATURES
- * enum definition
- */
-struct IpaHwEventInfoData_t {
- u32 baseAddrOffset;
- union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
-} __packed;
-
-/**
- * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
- * IPA_HW_2_CPU_EVENT_LOG_INFO Event
- *
- * @featureMask : Mask indicating the features enabled in HW.
- * Refer IPA_HW_FEATURE_MASK
- * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
- * Log Buffer structure
- * @statsInfo : Statistics related information
- * @configInfo : Configuration related information
- *
- * @note The offset location of this structure from IPA_WRAPPER_BASE
- * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
- * Event
- */
-struct IpaHwEventLogInfoData_t {
- u32 featureMask;
- u32 circBuffBaseAddrOffset;
- struct IpaHwEventInfoData_t statsInfo;
- struct IpaHwEventInfoData_t configInfo;
-
-} __packed;
-
-/**
* struct ipa3_uc_hdlrs - IPA uC callback functions
* @ipa_uc_loaded_hdlr: Function handler when uC is loaded
* @ipa_uc_event_hdlr: Event handler function
struct ipa3_uc_ctx uc_ctx;
struct ipa3_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa3_uc_ntn_ctx uc_ntn_ctx;
u32 wan_rx_ring_size;
bool skip_uc_pipe_reset;
enum ipa_transport_type transport_prototype;
int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
/*
* To retrieve doorbell physical address of
* wlan pipes
int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
u32 size, bool map);
+int ipa3_ntn_init(void);
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
#endif /* _IPA3_I_H_ */
--- /dev/null
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+
+{
+ union Ipa3HwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa3_uc_ntn_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct Ipa3HwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ ipa3_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct Ipa3HwStatsNTNInfoData_t));
+ if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa3_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa3_ntn_init(void)
+{
+ struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa3_uc_ntn_event_log_info_handler;
+
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa3_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ struct ipa3_ep_context *ep_ul;
+ struct ipa3_ep_context *ep_dl;
+ int ipa_ep_idx_ul;
+ int ipa_ep_idx_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated.\n");
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa3_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union Ipa3HwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_ul);
+ ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
--- /dev/null
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa3_hw_features - Values that represent the features supported
+ * in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa3_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_ZIP = 0x4,
+ IPA_HW_FEATURE_NTN = 0x5,
+ IPA_HW_FEATURE_OFFLOAD = 0x6,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ * device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa3_hw_2_cpu_events {
+ IPA_HW_2_CPU_EVENT_NO_OP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_2_CPU_EVENT_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_EVENT_LOG_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
+ * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ */
+enum ipa3_hw_errors {
+ IPA_HW_ERROR_NONE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_INVALID_DOORBELL_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_DMA_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_FATAL_SYSTEM_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_HW_INVALID_OPCODE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_HW_INVALID_PARAMS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+ IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+ IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+ IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter lower 32bit.
+ * @cmdParams_hi : CPU->HW command parameter higher 32bit.
+ * of parameters (immediate parameters) and point on structure in system memory
+ * (in such case the address must be accessible for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on structure in system
+ * memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the
+ * error type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u32 cmdParams_hi;
+ u8 responseOp;
+ u8 reserved_0D;
+ u16 reserved_0F_0E;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_15;
+ u16 reserved_17_16;
+ u32 eventParams;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union Ipa3HwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+ struct IpaHwErrorEventParams_t {
+ u32 errorType:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct Ipa3HwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @Ipa3HwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct Ipa3HwEventInfoData_t {
+ u32 baseAddrOffset;
+ union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct Ipa3HwEventInfoData_t statsInfo;
+ struct Ipa3HwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa3_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa3_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa3_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa3_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa3_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct Ipa3HwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct Ipa3HwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union Ipa3HwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct Ipa3HwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union Ipa3HwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
+ * information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTN3RxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct Ipa3HwStatsNTNInfoData_t {
+ struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa3_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa3_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa3_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa3_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
- ep->wdi_state |= IPA_WDI_CONNECTED;
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
return 0;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state |= IPA_WDI_ENABLED;
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_ENABLED;
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
else
IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
- ep->wdi_state |= IPA_WDI_RESUMED;
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
IPADBG("client (ep: %d) resumed\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
IPA_WDI_RESUMED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
ipa3_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_RESUMED;
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
uc_timeout:
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
- if (!(ep->wdi_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
api_ctrl->ipa_rx_poll = ipa3_rx_poll;
api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa3_tear_down_uc_offload_pipes;
return 0;
}
--- /dev/null
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_H_
+#define _IPA_UC_OFFLOAD_H_
+
+#include <linux/ipa.h>
+
+/**
+ * enum ipa_uc_offload_proto
+ * Protocol type: either WDI or Neutrino
+ *
+ * @IPA_UC_WDI: wdi Protocol
+ * @IPA_UC_NTN: Neutrino Protocol
+ */
+enum ipa_uc_offload_proto {
+ IPA_UC_INVALID = 0,
+ IPA_UC_WDI = 1,
+ IPA_UC_NTN = 2,
+ IPA_UC_MAX_PROT_SIZE
+};
+
+/**
+ * struct ipa_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_hdr_info {
+ u8 *hdr;
+ u8 hdr_len;
+ u8 dst_mac_addr_offset;
+ enum ipa_hdr_l2_type hdr_type;
+};
+
+/**
+ * struct ipa_uc_offload_intf_params - parameters for uC offload
+ * interface registration
+ *
+ * @netdev_name: network interface name
+ * @notify: callback for exception/embedded packets
+ * @priv: callback cookie
+ * @hdr_info: header information
+ * @meta_data: meta data if any
+ * @meta_data_mask: meta data mask
+ * @proto: uC offload protocol type
+ * @alt_dst_pipe: alternate routing output pipe
+ */
+struct ipa_uc_offload_intf_params {
+ const char *netdev_name;
+ ipa_notify_cb notify;
+ void *priv;
+ struct ipa_hdr_info hdr_info[IPA_IP_MAX];
+ u8 is_meta_data_valid;
+ u32 meta_data;
+ u32 meta_data_mask;
+ enum ipa_uc_offload_proto proto;
+ enum ipa_client_type alt_dst_pipe;
+};
+
+/**
+ * struct ipa_ntn_setup_info - NTN TX/Rx configuration
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @ring_base_pa: physical address of the base of the Tx/Rx ring
+ * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements)
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @num_buffers: Rx/Tx buffer pool size (in terms of elements)
+ * @data_buff_size: size of the each data buffer allocated in DDR
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's
+ * tail pointer
+ */
+struct ipa_ntn_setup_info {
+ enum ipa_client_type client;
+ phys_addr_t ring_base_pa;
+ u32 ntn_ring_size;
+
+ phys_addr_t buff_pool_base_pa;
+ u32 num_buffers;
+ u32 data_buff_size;
+
+ phys_addr_t ntn_reg_base_ptr_pa;
+};
+
+/**
+ * struct ipa_uc_offload_out_params - out parameters for uC offload
+ *
+ * @clnt_hndl: Handle that client need to pass during
+ * further operations
+ */
+struct ipa_uc_offload_out_params {
+ u32 clnt_hndl;
+};
+
+/**
+ * struct ipa_ntn_conn_in_params - NTN TX/Rx connect parameters
+ * @ul: parameters to connect UL pipe(from Neutrino to IPA)
+ * @dl: parameters to connect DL pipe(from IPA to Neutrino)
+ */
+struct ipa_ntn_conn_in_params {
+ struct ipa_ntn_setup_info ul;
+ struct ipa_ntn_setup_info dl;
+};
+
+/**
+ * struct ipa_ntn_conn_out_params - information provided
+ * to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_ntn_conn_out_params {
+ phys_addr_t ul_uc_db_pa;
+ phys_addr_t dl_uc_db_pa;
+};
+
+/**
+ * struct ipa_uc_offload_conn_in_params - information provided by
+ * uC offload client
+ * @clnt_hndl: Handle that return as part of reg interface
+ * @proto: Protocol to use for offload data path
+ * @ntn: uC RX/Tx configuration info
+ */
+struct ipa_uc_offload_conn_in_params {
+ u32 clnt_hndl;
+ union {
+ struct ipa_ntn_conn_in_params ntn;
+ } u;
+};
+
+/**
+ * struct ipa_uc_offload_conn_out_params - information provided
+ * to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_uc_offload_conn_out_params {
+ union {
+ struct ipa_ntn_conn_out_params ntn;
+ } u;
+};
+
+/**
+ * struct ipa_perf_profile - To set BandWidth profile
+ *
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
+ */
+struct ipa_perf_profile {
+ enum ipa_client_type client;
+ u32 max_supported_bw_mbps;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/**
+ * ipa_uc_offload_reg_intf - Client should call this function to
+ * init uC offload data path
+ *
+ * @init: [in] initialization parameters
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *in,
+ struct ipa_uc_offload_out_params *out);
+
+/**
+ * ipa_uc_offload_cleanup - Client Driver should call this
+ * function before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_cleanup(u32 clnt_hdl);
+
+/**
+ * ipa_uc_offload_conn_pipes - Client should call this
+ * function to connect uC pipe for offload data path
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in,
+ struct ipa_uc_offload_conn_out_params *out);
+
+/**
+ * ipa_uc_offload_disconn_pipes() - Client should call this
+ * function to disconnect uC pipe to disable offload data path
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl);
+
+/**
+ * ipa_set_perf_profile() - Client should call this function to
+ * set IPA clock Band Width based on data rates
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_perf_profile(struct ipa_perf_profile *profile);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+static inline int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *in,
+ struct ipa_uc_offload_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uC_offload_cleanup(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_offload_conn_pipes(
+ struct ipa_uc_offload_conn_in_params *in,
+ struct ipa_uc_offload_conn_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_UC_OFFLOAD_H_ */