EXPORT_SYMBOL(ipa_disconnect);
/**
-* ipa_clear_endpoint_delay() - Clear ep_delay.
-* @clnt_hdl: [in] IPA client handle
-*
-* Returns: 0 on success, negative on failure
-*
-* Note: Should not be called from atomic context
-*/
+ * ipa_clear_endpoint_delay() - Clear ep_delay.
+ * @clnt_hdl: [in] IPA client handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
int ipa_clear_endpoint_delay(u32 clnt_hdl)
{
int ret;
EXPORT_SYMBOL(ipa_clear_endpoint_delay);
/**
-* ipa_reset_endpoint() - reset an endpoint from BAM perspective
-* @clnt_hdl: [in] IPA client handle
-*
-* Returns: 0 on success, negative on failure
-*
-* Note: Should not be called from atomic context
-*/
+ * ipa_reset_endpoint() - reset an endpoint from BAM perspective
+ * @clnt_hdl: [in] IPA client handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
int ipa_reset_endpoint(u32 clnt_hdl)
{
int ret;
EXPORT_SYMBOL(ipa_reset_endpoint);
/**
-* ipa_disable_endpoint() - Disable an endpoint from IPA perspective
-* @clnt_hdl: [in] IPA client handle
-*
-* Returns: 0 on success, negative on failure
-*
-* Note: Should not be called from atomic context
-*/
+ * ipa_disable_endpoint() - Disable an endpoint from IPA perspective
+ * @clnt_hdl: [in] IPA client handle
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
int ipa_disable_endpoint(u32 clnt_hdl)
{
int ret;
EXPORT_SYMBOL(ipa_add_hdr);
/**
- * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them
- * to IPA HW
+ * ipa_add_hdr_usr() - add the specified headers to SW and optionally
+ * commit them to IPA HW
+ * @hdrs: [inout] set of headers to add
+ * @user_only: [in] indicate rules installed by userspace
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_hdr_usr, hdrs, user_only);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr_usr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally
+ * commit them to IPA HW
* @hdls: [inout] set of headers to delete
*
* Returns: 0 on success, negative on failure
* ipa_reset_hdr() - reset the current header table in SW (does not commit to
* HW)
*
+ * @user_only: [in] indicate delete rules installed by userspace
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa_reset_hdr(void)
+int ipa_reset_hdr(bool user_only)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_reset_hdr);
+ IPA_API_DISPATCH_RETURN(ipa_reset_hdr, user_only);
return ret;
}
* ipa_add_hdr_proc_ctx() - add the specified headers to SW
* and optionally commit them to IPA HW
* @proc_ctxs: [inout] set of processing context headers to add
+ * @user_only: [in] indicate rules installed by userspace
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs);
+ IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs, user_only);
return ret;
}
EXPORT_SYMBOL(ipa_add_rt_rule);
/**
+ * ipa_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ * @user_only: [in] indicate rules installed by userspace
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_usr, rules, user_only);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule_usr);
+
+/**
* ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
* commit to IPA HW
* @hdls: [inout] set of routing rules to delete
* ipa_reset_rt() - reset the current SW routing table of specified type
* (does not commit to HW)
* @ip: The family of routing tables
+ * @user_only: [in] indicate delete rules installed by userspace
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa_reset_rt(enum ipa_ip_type ip)
+int ipa_reset_rt(enum ipa_ip_type ip, bool user_only)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip);
+ IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip, user_only);
return ret;
}
/**
* ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
* commit to IPA HW
+ * @rules: [inout] set of filtering rules to add
*
* Returns: 0 on success, negative on failure
*
EXPORT_SYMBOL(ipa_add_flt_rule);
/**
+ * ipa_add_flt_rule_usr() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules: [inout] set of filtering rules to add
+ * @user_only: [in] indicate rules installed by userspace
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_usr, rules, user_only);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule_usr);
+
+/**
* ipa_del_flt_rule() - Remove the specified filtering rules from SW and
* optionally commit to IPA HW
*
/**
* ipa_reset_flt() - Reset the current SW filtering table of specified type
* (does not commit to HW)
- * @ip: [in] the family of routing tables
+ * @ip: [in] the family of routing tables
+ * @user_only: [in] indicate delete rules installed by userspace
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa_reset_flt(enum ipa_ip_type ip)
+int ipa_reset_flt(enum ipa_ip_type ip, bool user_only)
{
int ret;
- IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip);
+ IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip, user_only);
return ret;
}
EXPORT_SYMBOL(ipa_uc_dereg_rdyCB);
/**
-* teth_bridge_init() - Initialize the Tethering bridge driver
-* @params - in/out params for USB initialization API (please look at struct
-* definition for more info)
-*
-* USB driver gets a pointer to a callback function (usb_notify_cb) and an
-* associated data. USB driver installs this callback function in the call to
-* ipa_connect().
-*
-* Builds IPA resource manager dependency graph.
-*
-* Return codes: 0: success,
-* -EINVAL - Bad parameter
-* Other negative value - Failure
-*/
+ * teth_bridge_init() - Initialize the Tethering bridge driver
+ * @params - in/out params for USB initialization API (please look at struct
+ * definition for more info)
+ *
+ * USB driver gets a pointer to a callback function (usb_notify_cb) and an
+ * associated data. USB driver installs this callback function in the call to
+ * ipa_connect().
+ *
+ * Builds IPA resource manager dependency graph.
+ *
+ * Return codes: 0: success,
+ * -EINVAL - Bad parameter
+ * Other negative value - Failure
+ */
int teth_bridge_init(struct teth_bridge_init_params *params)
{
int ret;
EXPORT_SYMBOL(teth_bridge_init);
/**
-* teth_bridge_disconnect() - Disconnect tethering bridge module
-*/
+ * teth_bridge_disconnect() - Disconnect tethering bridge module
+ */
int teth_bridge_disconnect(enum ipa_client_type client)
{
int ret;
EXPORT_SYMBOL(teth_bridge_disconnect);
/**
-* teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
-* @connect_params: Connection info
-*
-* Return codes: 0: success
-* -EINVAL: invalid parameters
-* -EPERM: Operation not permitted as the bridge is already
-* connected
-*/
+ * teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+ * @connect_params: Connection info
+ *
+ * Return codes: 0: success
+ * -EINVAL: invalid parameters
+ * -EPERM: Operation not permitted as the bridge is already
+ * connected
+ */
int teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
{
int ret;
EXPORT_SYMBOL(ipa_write_qmap_id);
/**
-* ipa_add_interrupt_handler() - Adds handler to an interrupt type
-* @interrupt: Interrupt type
-* @handler: The handler to be added
-* @deferred_flag: whether the handler processing should be deferred in
-* a workqueue
-* @private_data: the client's private data
-*
-* Adds handler to an interrupt type and enable the specific bit
-* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
-*/
+ * ipa_add_interrupt_handler() - Adds handler to an interrupt type
+ * @interrupt: Interrupt type
+ * @handler: The handler to be added
+ * @deferred_flag: whether the handler processing should be deferred in
+ * a workqueue
+ * @private_data: the client's private data
+ *
+ * Adds handler to an interrupt type and enable the specific bit
+ * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+ */
int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
ipa_irq_handler_t handler,
bool deferred_flag,
EXPORT_SYMBOL(ipa_add_interrupt_handler);
/**
-* ipa_remove_interrupt_handler() - Removes handler to an interrupt type
-* @interrupt: Interrupt type
-*
-* Removes the handler and disable the specific bit in IRQ_EN register
-*/
+ * ipa_remove_interrupt_handler() - Removes handler to an interrupt type
+ * @interrupt: Interrupt type
+ *
+ * Removes the handler and disable the specific bit in IRQ_EN register
+ */
int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt)
{
int ret;
EXPORT_SYMBOL(ipa_remove_interrupt_handler);
/**
-* ipa_restore_suspend_handler() - restores the original suspend IRQ handler
-* as it was registered in the IPA init sequence.
-* Return codes:
-* 0: success
-* -EPERM: failed to remove current handler or failed to add original handler
-* */
+ * ipa_restore_suspend_handler() - restores the original suspend IRQ handler
+ * as it was registered in the IPA init sequence.
+ * Return codes:
+ * 0: success
+ * -EPERM: failed to remove current handler or failed to add original handler
+ */
int ipa_restore_suspend_handler(void)
{
int ret;
{
int result;
- /*
- * IPA probe function can be called for multiple times as the same probe
- * function handles multiple compatibilities
- */
+/**
+ * IPA probe function can be called for multiple times as the same probe
+ * function handles multiple compatibilities
+ */
pr_debug("ipa: IPA driver probing started for %s\n",
pdev_p->dev.of_node->name);
*
* Return codes:
* None
-*/
+ */
void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
{
IPA_API_DISPATCH(ipa_inc_client_enable_clks, id);
*
* Return codes:
* None
-*/
+ */
void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
{
IPA_API_DISPATCH(ipa_dec_client_disable_clks, id);
EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block);
/**
-* ipa_suspend_resource_no_block() - suspend client endpoints related to the
-* IPA_RM resource and decrement active clients counter. This function is
-* guaranteed to avoid sleeping.
-*
-* @resource: [IN] IPA Resource Manager resource
-*
-* Return codes: 0 on success, negative on failure.
-*/
+ * ipa_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
{
int ret;
int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs);
+ int (*ipa_add_hdr_usr)(struct ipa_ioc_add_hdr *hdrs, bool user_only);
+
int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls);
int (*ipa_commit_hdr)(void);
- int (*ipa_reset_hdr)(void);
+ int (*ipa_reset_hdr)(bool user_only);
int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup);
int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy);
- int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+ int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only);
int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls);
int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules);
+ int (*ipa_add_rt_rule_usr)(struct ipa_ioc_add_rt_rule *rules,
+ bool user_only);
+
int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls);
int (*ipa_commit_rt)(enum ipa_ip_type ip);
- int (*ipa_reset_rt)(enum ipa_ip_type ip);
+ int (*ipa_reset_rt)(enum ipa_ip_type ip, bool user_only);
int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup);
int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules);
+ int (*ipa_add_flt_rule_usr)(struct ipa_ioc_add_flt_rule *rules,
+ bool user_only);
+
int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls);
int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules);
int (*ipa_commit_flt)(enum ipa_ip_type ip);
- int (*ipa_reset_flt)(enum ipa_ip_type ip);
+ int (*ipa_reset_flt)(enum ipa_ip_type ip, bool user_only);
int (*allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem);
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* @link: entry's link in global header offset entries list
* @offset: the offset
* @bin: bin
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa_hdr_offset_entry {
struct list_head link;
u32 offset;
u32 bin;
+ bool ipacm_installed;
};
extern const char *ipa_clients_strings[];
return cnt;
}
+
+static int ipa2_clean_modem_rule(void)
+{
+ struct ipa_install_fltr_rule_req_msg_v01 *req;
+ int val = 0;
+
+ req = kzalloc(
+ sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAERR("mem allocated failed!\n");
+ return -ENOMEM;
+ }
+ req->filter_spec_list_valid = false;
+ req->filter_spec_list_len = 0;
+ req->source_pipe_index_valid = 0;
+ val = qmi_filter_request_send(req);
+ kfree(req);
+
+ return val;
+}
+
static int ipa2_active_clients_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
kfree(buff);
}
-static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache)
+static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type,
+ bool is_cache)
{
int retval;
struct ipa_wan_msg *wan_msg;
retval = -EFAULT;
break;
}
- if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+ if (ipa2_add_hdr_usr((struct ipa_ioc_add_hdr *)param,
+ true)) {
retval = -EFAULT;
break;
}
retval = -EFAULT;
break;
}
- if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+ if (ipa2_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param,
+ true)) {
retval = -EFAULT;
break;
}
retval = -EFAULT;
break;
}
- if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ if (ipa2_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param,
+ true)) {
retval = -EFAULT;
break;
}
retval = ipa2_commit_hdr();
break;
case IPA_IOC_RESET_HDR:
- retval = ipa2_reset_hdr();
+ retval = ipa2_reset_hdr(false);
break;
case IPA_IOC_COMMIT_RT:
retval = ipa2_commit_rt(arg);
break;
case IPA_IOC_RESET_RT:
- retval = ipa2_reset_rt(arg);
+ retval = ipa2_reset_rt(arg, false);
break;
case IPA_IOC_COMMIT_FLT:
retval = ipa2_commit_flt(arg);
break;
case IPA_IOC_RESET_FLT:
- retval = ipa2_reset_flt(arg);
+ retval = ipa2_reset_flt(arg, false);
break;
case IPA_IOC_GET_RT_TBL:
if (copy_from_user(header, (u8 *)arg,
break;
}
if (ipa2_add_hdr_proc_ctx(
- (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
+ (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) {
retval = -EFAULT;
break;
}
}
break;
- default: /* redundant, as cmd was checked against MAXNR */
+ case IPA_IOC_CLEANUP:
+ /*Route and filter rules will also be clean*/
+ IPADBG("Got IPA_IOC_CLEANUP\n");
+ retval = ipa2_reset_hdr(true);
+ memset(&nat_del, 0, sizeof(nat_del));
+ nat_del.table_index = 0;
+ retval = ipa2_nat_del_cmd(&nat_del);
+ retval = ipa2_clean_modem_rule();
+ break;
+
+ case IPA_IOC_QUERY_WLAN_CLIENT:
+ IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n");
+ retval = ipa2_resend_wlan_msg();
+ break;
+
+ default:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
}
/**
* ipa_setup_dflt_rt_tables() - Setup default routing tables
-*
+
* Return codes:
* 0: success
* -ENOMEM: failed to allocate memory
init_waitqueue_head(&ipa_ctx->msg_waitq);
mutex_init(&ipa_ctx->msg_lock);
+ /* store wlan client-connect-msg-list */
+ INIT_LIST_HEAD(&ipa_ctx->msg_wlan_client_list);
+ mutex_init(&ipa_ctx->msg_wlan_client_lock);
+
mutex_init(&ipa_ctx->lock);
mutex_init(&ipa_ctx->nat_mem.lock);
mutex_init(&ipa_ctx->ipa_cne_evt_lock);
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
const struct ipa_flt_rule *rule, u8 add_rear,
- u32 *rule_hdl)
+ u32 *rule_hdl, bool user)
{
struct ipa_flt_entry *entry;
struct ipa_rt_tbl *rt_tbl = NULL;
}
*rule_hdl = id;
entry->id = id;
+ entry->ipacm_installed = user;
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
tbl = &ipa_ctx->glob_flt_tbl[ip];
IPADBG_LOW("add global flt rule ip=%d\n", ip);
- return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, false);
}
static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
const struct ipa_flt_rule *rule, u8 add_rear,
- u32 *rule_hdl)
+ u32 *rule_hdl, bool user)
{
struct ipa_flt_tbl *tbl;
int ipa_ep_idx;
tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
- return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user);
}
/**
* ipa2_add_flt_rule() - Add the specified filtering rules to SW and optionally
* commit to IPA HW
+ * @rules: [inout] set of filtering rules to add
*
* Returns: 0 on success, negative on failure
*
*/
int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
{
+ return ipa2_add_flt_rule_usr(rules, false);
+}
+
+/**
+ * ipa2_add_flt_rule_usr() - Add the specified filtering rules
+ * to SW and optionally commit to IPA HW
+ * @rules: [inout] set of filtering rules to add
+ * @user_only: [in] indicate rules installed by userspace
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
+{
int i;
int result;
result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
&rules->rules[i].rule,
rules->rules[i].at_rear,
- &rules->rules[i].flt_rule_hdl);
+ &rules->rules[i].flt_rule_hdl,
+ user_only);
if (result) {
IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
/**
* ipa2_reset_flt() - Reset the current SW filtering table of specified type
* (does not commit to HW)
- * @ip: [in] the family of routing tables
+ * @ip: [in] the family of routing tables
+ * @user_only: [in] indicate rules deleted by userspace
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa2_reset_flt(enum ipa_ip_type ip)
+int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only)
{
struct ipa_flt_tbl *tbl;
struct ipa_flt_entry *entry;
IPA_INVALID_L4_PROTOCOL))
continue;
- list_del(&entry->link);
- entry->tbl->rule_cnt--;
- if (entry->rt_tbl)
- entry->rt_tbl->ref_cnt--;
- entry->cookie = 0;
- id = entry->id;
- kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+ if (!user_only ||
+ entry->ipacm_installed) {
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ id = entry->id;
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
- /* remove the handle from the database */
- ipa_id_remove(id);
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ }
}
for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
mutex_unlock(&ipa_ctx->lock);
return -EFAULT;
}
- list_del(&entry->link);
- entry->tbl->rule_cnt--;
- if (entry->rt_tbl)
- entry->rt_tbl->ref_cnt--;
- entry->cookie = 0;
- id = entry->id;
- kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
- /* remove the handle from the database */
- ipa_id_remove(id);
+ if (!user_only ||
+ entry->ipacm_installed) {
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ entry->cookie = 0;
+ id = entry->id;
+ kmem_cache_free(ipa_ctx->flt_rule_cache,
+ entry);
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ }
}
}
mutex_unlock(&ipa_ctx->lock);
tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
rule.action = IPA_PASS_TO_EXCEPTION;
__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
- &ep->dflt_flt4_rule_hdl);
+ &ep->dflt_flt4_rule_hdl, false);
ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
tbl->sticky_rear = true;
tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
rule.action = IPA_PASS_TO_EXCEPTION;
__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
- &ep->dflt_flt6_rule_hdl);
+ &ep->dflt_flt6_rule_hdl, false);
ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
tbl->sticky_rear = true;
mutex_unlock(&ipa_ctx->lock);
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
}
static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
- bool add_ref_hdr)
+ bool add_ref_hdr, bool user_only)
{
struct ipa_hdr_entry *hdr_entry;
struct ipa_hdr_proc_ctx_entry *entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
entry->cookie = IPA_PROC_HDR_COOKIE;
+ entry->ipacm_installed = user_only;
needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ?
sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) :
*/
offset->offset = htbl->end;
offset->bin = bin;
+ offset->ipacm_installed = user_only;
htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
list_add(&offset->link,
&htbl->head_offset_list[bin]);
offset =
list_first_entry(&htbl->head_free_offset_list[bin],
struct ipa_hdr_proc_ctx_offset_entry, link);
+ offset->ipacm_installed = user_only;
list_move(&offset->link, &htbl->head_offset_list[bin]);
}
}
-static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
{
struct ipa_hdr_entry *entry;
struct ipa_hdr_offset_entry *offset = NULL;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
entry->cookie = IPA_HDR_COOKIE;
+ entry->ipacm_installed = user;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
list_add(&offset->link,
&htbl->head_offset_list[bin]);
entry->offset_entry = offset;
+ offset->ipacm_installed = user;
}
} else {
entry->is_hdr_proc_ctx = false;
struct ipa_hdr_offset_entry, link);
list_move(&offset->link, &htbl->head_offset_list[bin]);
entry->offset_entry = offset;
+ offset->ipacm_installed = user;
}
list_add(&entry->link, &htbl->head_hdr_entry_list);
IPADBG("adding processing context for header %s\n", hdr->name);
proc_ctx.type = IPA_HDR_PROC_NONE;
proc_ctx.hdr_hdl = id;
- if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
+ if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) {
IPAERR("failed to add hdr proc ctx\n");
goto fail_add_proc_ctx;
}
*/
int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs)
{
+ return ipa2_add_hdr_usr(hdrs, false);
+}
+
+/**
+ * ipa2_add_hdr_usr() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @hdrs: [inout] set of headers to add
+ * @user_only: [in] indicate installed from user
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
+{
int i;
int result = -EFAULT;
IPADBG("adding %d headers to IPA driver internal data struct\n",
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
- if (__ipa_add_hdr(&hdrs->hdr[i])) {
+ if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) {
IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
mutex_unlock(&ipa_ctx->lock);
return result;
}
-
/**
* ipa2_del_hdr_by_user() - Remove the specified headers
* from SW and optionally commit them to IPA HW
* ipa2_add_hdr_proc_ctx() - add the specified headers to SW
* and optionally commit them to IPA HW
* @proc_ctxs: [inout] set of processing context headers to add
+ * @user_only: [in] indicate installed by user-space module
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only)
{
int i;
int result = -EFAULT;
IPADBG("adding %d header processing contextes to IPA driver\n",
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
- if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
+ if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i],
+ true, user_only)) {
IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
* ipa2_reset_hdr() - reset the current header table in SW (does not commit to
* HW)
*
+ * @user_only: [in] indicate delete rules installed by userspace
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa2_reset_hdr(void)
+int ipa2_reset_hdr(bool user_only)
{
struct ipa_hdr_entry *entry;
struct ipa_hdr_entry *next;
* issue a reset on the routing module since routing rules point to
* header table entries
*/
- if (ipa2_reset_rt(IPA_IP_v4))
+ if (ipa2_reset_rt(IPA_IP_v4, user_only))
IPAERR("fail to reset v4 rt\n");
- if (ipa2_reset_rt(IPA_IP_v6))
+ if (ipa2_reset_rt(IPA_IP_v6, user_only))
IPAERR("fail to reset v4 rt\n");
mutex_lock(&ipa_ctx->lock);
WARN_ON(1);
return -EFAULT;
}
- if (entry->is_hdr_proc_ctx) {
- dma_unmap_single(ipa_ctx->pdev,
- entry->phys_base,
- entry->hdr_len,
- DMA_TO_DEVICE);
- entry->proc_ctx = NULL;
- }
- list_del(&entry->link);
- entry->ref_cnt = 0;
- entry->cookie = 0;
- /* remove the handle from the database */
- ipa_id_remove(entry->id);
- kmem_cache_free(ipa_ctx->hdr_cache, entry);
+ if (!user_only || entry->ipacm_installed) {
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa_ctx->pdev,
+ entry->phys_base,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ entry->proc_ctx = NULL;
+ }
+ list_del(&entry->link);
+ entry->ref_cnt = 0;
+ entry->cookie = 0;
+ /* remove the handle from the database */
+ ipa_id_remove(entry->id);
+ kmem_cache_free(ipa_ctx->hdr_cache, entry);
+ }
}
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
list_for_each_entry_safe(off_entry, off_next,
if (off_entry->offset == 0)
continue;
- list_del(&off_entry->link);
- kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+ if (!user_only ||
+ off_entry->ipacm_installed) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache,
+ off_entry);
+ }
}
list_for_each_entry_safe(off_entry, off_next,
&ipa_ctx->hdr_tbl.head_free_offset_list[i],
link) {
- list_del(&off_entry->link);
- kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+
+ if (!user_only ||
+ off_entry->ipacm_installed) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa_ctx->hdr_offset_cache,
+ off_entry);
+ }
}
}
/* there is one header of size 8 */
WARN_ON(1);
return -EFAULT;
}
- list_del(&ctx_entry->link);
- ctx_entry->ref_cnt = 0;
- ctx_entry->cookie = 0;
- /* remove the handle from the database */
- ipa_id_remove(ctx_entry->id);
- kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, ctx_entry);
+ if (!user_only ||
+ ctx_entry->ipacm_installed) {
+ list_del(&ctx_entry->link);
+ ctx_entry->ref_cnt = 0;
+ ctx_entry->cookie = 0;
+ /* remove the handle from the database */
+ ipa_id_remove(ctx_entry->id);
+ kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache,
+ ctx_entry);
+ }
}
for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
link) {
- list_del(&ctx_off_entry->link);
- kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache,
+ if (!user_only ||
+ ctx_off_entry->ipacm_installed) {
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(
+ ipa_ctx->hdr_proc_ctx_offset_cache,
ctx_off_entry);
+ }
}
list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
&ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
link) {
- list_del(&ctx_off_entry->link);
- kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache,
- ctx_off_entry);
+
+ if (!user_only ||
+ ctx_off_entry->ipacm_installed) {
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(
+ ipa_ctx->hdr_proc_ctx_offset_cache,
+ ctx_off_entry);
+ }
}
}
ipa_ctx->hdr_proc_ctx_tbl.end = 0;
* @tbl: filter table
* @rt_tbl: routing table
* @hw_len: entry's size
+ * @id: rule handle - globally unique
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa_flt_entry {
struct list_head link;
struct ipa_rt_tbl *rt_tbl;
u32 hw_len;
int id;
+ bool ipacm_installed;
};
/**
* @is_eth2_ofst_valid: is eth2_ofst field valid?
* @eth2_ofst: offset to start of Ethernet-II/802.3 header
* @user_deleted: is the header deleted by the user?
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa_hdr_entry {
struct list_head link;
u8 is_eth2_ofst_valid;
u16 eth2_ofst;
bool user_deleted;
+ bool ipacm_installed;
};
/**
* @link: entry's link in global processing context header offset entries list
* @offset: the offset
* @bin: bin
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa_hdr_proc_ctx_offset_entry {
struct list_head link;
u32 offset;
u32 bin;
+ bool ipacm_installed;
};
/**
* @ref_cnt: reference counter of routing table
* @id: processing context header entry id
* @user_deleted: is the hdr processing context deleted by the user?
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa_hdr_proc_ctx_entry {
struct list_head link;
u32 ref_cnt;
int id;
bool user_deleted;
+ bool ipacm_installed;
};
/**
* @hdr: header table
* @proc_ctx: processing context table
* @hw_len: the length of the table
+ * @id: rule handle - globaly unique
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa_rt_entry {
struct list_head link;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
u32 hw_len;
int id;
+ bool ipacm_installed;
};
/**
struct list_head msg_list;
struct list_head pull_msg_list;
struct mutex msg_lock;
+ struct list_head msg_wlan_client_list;
+ struct mutex msg_wlan_client_lock;
wait_queue_head_t msg_waitq;
enum ipa_hw_type ipa_hw_type;
enum ipa_hw_mode ipa_hw_mode;
*/
int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user);
+
int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls);
int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user);
int ipa2_commit_hdr(void);
-int ipa2_reset_hdr(void);
+int ipa2_reset_hdr(bool user_only);
int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup);
/*
* Header Processing Context
*/
-int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only);
int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
*/
int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules,
+ bool user_only);
+
int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
int ipa2_commit_rt(enum ipa_ip_type ip);
-int ipa2_reset_rt(enum ipa_ip_type ip);
+int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only);
int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
*/
int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules,
+ bool user_only);
+
int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
int ipa2_commit_flt(enum ipa_ip_type ip);
-int ipa2_reset_flt(enum ipa_ip_type ip);
+int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only);
/*
* NAT
*/
int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
ipa_msg_free_fn callback);
+int ipa2_resend_wlan_msg(void);
int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta);
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#include <linux/fs.h>
#include <linux/sched.h>
#include "ipa_i.h"
+#include <linux/msm_ipa.h>
struct ipa_intf {
char name[IPA_RESOURCE_NAME_MAX];
kfree(buff);
}
+static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff)
+{
+ struct ipa_push_msg *msg_dup;
+ struct ipa_wlan_msg_ex *event_ex_cur_con = NULL;
+ struct ipa_wlan_msg_ex *event_ex_list = NULL;
+ struct ipa_wlan_msg *event_ex_cur_discon = NULL;
+ void *data_dup = NULL;
+ struct ipa_push_msg *entry;
+ struct ipa_push_msg *next;
+ int cnt = 0, total = 0, max = 0;
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ uint8_t mac2[IPA_MAC_ADDR_SIZE];
+
+ if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) {
+ /* debug print */
+ event_ex_cur_con = buff;
+ for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) {
+ if (event_ex_cur_con->attribs[cnt].attrib_type ==
+ WLAN_HDR_ATTRIB_MAC_ADDR) {
+ IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n",
+ event_ex_cur_con->attribs[cnt].u.mac_addr[0],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[1],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[2],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[3],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[4],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[5],
+ meta->msg_type);
+ }
+ }
+
+ mutex_lock(&ipa_ctx->msg_wlan_client_lock);
+ msg_dup = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL);
+ if (msg_dup == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ return -ENOMEM;
+ }
+ msg_dup->meta = *meta;
+ if (meta->msg_len > 0 && buff) {
+ data_dup = kmalloc(meta->msg_len, GFP_KERNEL);
+ if (data_dup == NULL) {
+ IPAERR("fail to alloc data_dup container\n");
+ kfree(msg_dup);
+ return -ENOMEM;
+ }
+ memcpy(data_dup, buff, meta->msg_len);
+ msg_dup->buff = data_dup;
+ msg_dup->callback = ipa2_send_msg_free;
+ }
+ list_add_tail(&msg_dup->link, &ipa_ctx->msg_wlan_client_list);
+ mutex_unlock(&ipa_ctx->msg_wlan_client_lock);
+ }
+
+ /* remove the cache */
+ if (meta->msg_type == WLAN_CLIENT_DISCONNECT) {
+ /* debug print */
+ event_ex_cur_discon = buff;
+ IPADBG("Mac %02x:%02x:%02x:%02x:%02x:%02x,msg %d\n",
+ event_ex_cur_discon->mac_addr[0],
+ event_ex_cur_discon->mac_addr[1],
+ event_ex_cur_discon->mac_addr[2],
+ event_ex_cur_discon->mac_addr[3],
+ event_ex_cur_discon->mac_addr[4],
+ event_ex_cur_discon->mac_addr[5],
+ meta->msg_type);
+ memcpy(mac2,
+ event_ex_cur_discon->mac_addr,
+ sizeof(mac2));
+
+ mutex_lock(&ipa_ctx->msg_wlan_client_lock);
+ list_for_each_entry_safe(entry, next,
+ &ipa_ctx->msg_wlan_client_list,
+ link) {
+ event_ex_list = entry->buff;
+ max = event_ex_list->num_of_attribs;
+ for (cnt = 0; cnt < max; cnt++) {
+ memcpy(mac,
+ event_ex_list->attribs[cnt].u.mac_addr,
+ sizeof(mac));
+ if (event_ex_list->attribs[cnt].attrib_type ==
+ WLAN_HDR_ATTRIB_MAC_ADDR) {
+ pr_debug("%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+
+ /* compare to delete one*/
+ if (memcmp(mac2,
+ mac,
+ sizeof(mac)) == 0) {
+ IPADBG("clean %d\n", total);
+ list_del(&entry->link);
+ kfree(entry);
+ break;
+ }
+ }
+ }
+ total++;
+ }
+ mutex_unlock(&ipa_ctx->msg_wlan_client_lock);
+ }
+ return 0;
+}
+
/**
* ipa2_send_msg() - Send "message" from kernel client to IPA driver
* @meta: [in] message meta-data
}
if (meta == NULL || (buff == NULL && callback != NULL) ||
- (buff != NULL && callback == NULL)) {
+ (buff != NULL && callback == NULL) || buff == NULL) {
IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
mutex_lock(&ipa_ctx->msg_lock);
list_add_tail(&msg->link, &ipa_ctx->msg_list);
+ /* support for softap client event cache */
+ if (wlan_msg_process(meta, buff))
+ IPAERR("wlan_msg_process failed\n");
+
+ /* unlock only after process */
mutex_unlock(&ipa_ctx->msg_lock);
IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]);
}
/**
+ * ipa2_resend_wlan_msg() - Resend cached "message" to IPACM
+ *
+ * resend wlan client connect events to user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_resend_wlan_msg(void)
+{
+ struct ipa_wlan_msg_ex *event_ex_list = NULL;
+ struct ipa_push_msg *entry;
+ struct ipa_push_msg *next;
+ int cnt = 0, total = 0;
+ struct ipa_push_msg *msg;
+ void *data = NULL;
+
+ IPADBG("\n");
+
+ mutex_lock(&ipa_ctx->msg_wlan_client_lock);
+ list_for_each_entry_safe(entry, next, &ipa_ctx->msg_wlan_client_list,
+ link) {
+
+ event_ex_list = entry->buff;
+ for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) {
+ if (event_ex_list->attribs[cnt].attrib_type ==
+ WLAN_HDR_ATTRIB_MAC_ADDR) {
+ IPADBG("%d-Mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ total,
+ event_ex_list->attribs[cnt].u.mac_addr[0],
+ event_ex_list->attribs[cnt].u.mac_addr[1],
+ event_ex_list->attribs[cnt].u.mac_addr[2],
+ event_ex_list->attribs[cnt].u.mac_addr[3],
+ event_ex_list->attribs[cnt].u.mac_addr[4],
+ event_ex_list->attribs[cnt].u.mac_addr[5]);
+ }
+ }
+
+ msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL);
+ if (msg == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ mutex_unlock(&ipa_ctx->msg_wlan_client_lock);
+ return -ENOMEM;
+ }
+ msg->meta = entry->meta;
+ data = kmalloc(entry->meta.msg_len, GFP_KERNEL);
+ if (data == NULL) {
+ IPAERR("fail to alloc data container\n");
+ kfree(msg);
+ mutex_unlock(&ipa_ctx->msg_wlan_client_lock);
+ return -ENOMEM;
+ }
+ memcpy(data, entry->buff, entry->meta.msg_len);
+ msg->buff = data;
+ msg->callback = ipa2_send_msg_free;
+ mutex_lock(&ipa_ctx->msg_lock);
+ list_add_tail(&msg->link, &ipa_ctx->msg_list);
+ mutex_unlock(&ipa_ctx->msg_lock);
+ wake_up(&ipa_ctx->msg_waitq);
+
+ total++;
+ }
+ mutex_unlock(&ipa_ctx->msg_wlan_client_lock);
+ return 0;
+}
+
+/**
* ipa2_register_pull_msg() - register pull message type
* @meta: [in] message meta-data
* @callback: [in] pull callback
base_addr = ipa_ctx->nat_mem.tmp_dma_handle;
}
- if (del->public_ip_addr == 0) {
- IPADBG("Bad Parameter\n");
- result = -EPERM;
- goto bail;
- }
-
memset(&desc, 0, sizeof(desc));
/* NO-OP IC for ensuring that IPA pipeline is empty */
reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
int rc;
int i;
+ /* check if modem up */
+ if (!qmi_indication_fin ||
+ !qmi_modem_init_fin ||
+ !ipa_q6_clnt) {
+ IPAWANDBG("modem QMI haven't up yet\n");
+ return -EINVAL;
+ }
+
/* check if the filter rules from IPACM is valid */
if (req->filter_spec_list_len == 0) {
IPAWANDBG("IPACM pass zero rules to Q6\n");
}
static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
- const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
+ bool user)
{
struct ipa_rt_tbl *tbl;
struct ipa_rt_entry *entry;
IPADBG_LOW("rule_cnt=%d\n", tbl->rule_cnt);
*rule_hdl = id;
entry->id = id;
+ entry->ipacm_installed = user;
return 0;
*/
int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
{
+ return ipa2_add_rt_rule_usr(rules, false);
+}
+
+/**
+ * ipa2_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ * @user_only: [in] indicate installed by userspace module
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
+{
int i;
int ret;
if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
&rules->rules[i].rule,
rules->rules[i].at_rear,
- &rules->rules[i].rt_rule_hdl)) {
+ &rules->rules[i].rt_rule_hdl,
+ user_only)) {
IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
/**
* ipa2_reset_rt() - reset the current SW routing table of specified type
* (does not commit to HW)
- * @ip: The family of routing tables
+ * @ip: [in] The family of routing tables
+ * @user_only: [in] indicate delete rules installed by userspace
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa2_reset_rt(enum ipa_ip_type ip)
+int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only)
{
struct ipa_rt_tbl *tbl;
struct ipa_rt_tbl *tbl_next;
struct ipa_rt_tbl_set *rset;
u32 apps_start_idx;
int id;
+ bool tbl_user = false;
if (ip >= IPA_IP_MAX) {
IPAERR_RL("bad parm\n");
* issue a reset on the filtering module of same IP type since
* filtering rules point to routing tables
*/
- if (ipa2_reset_flt(ip))
+ if (ipa2_reset_flt(ip, user_only))
IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa_ctx->rt_tbl_set[ip];
mutex_lock(&ipa_ctx->lock);
IPADBG("reset rt ip=%d\n", ip);
list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+ tbl_user = false;
list_for_each_entry_safe(rule, rule_next,
&tbl->head_rt_rule_list, link) {
if (ipa_id_find(rule->id) == NULL) {
return -EFAULT;
}
+ /* indicate if tbl used for user-specified rules*/
+ if (rule->ipacm_installed) {
+ IPADBG("tbl_user %d, tbl-index %d\n",
+ tbl_user, tbl->id);
+ tbl_user = true;
+ }
/*
* for the "default" routing tbl, remove all but the
* last rule
*/
if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
continue;
-
- list_del(&rule->link);
- tbl->rule_cnt--;
- if (rule->hdr)
- __ipa_release_hdr(rule->hdr->id);
- else if (rule->proc_ctx)
- __ipa_release_hdr_proc_ctx(rule->proc_ctx->id);
- rule->cookie = 0;
- id = rule->id;
- kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
-
- /* remove the handle from the database */
- ipa_id_remove(id);
+ if (!user_only ||
+ rule->ipacm_installed) {
+ list_del(&rule->link);
+ tbl->rule_cnt--;
+ if (rule->hdr)
+ __ipa_release_hdr(rule->hdr->id);
+ else if (rule->proc_ctx)
+ __ipa_release_hdr_proc_ctx(
+ rule->proc_ctx->id);
+ rule->cookie = 0;
+ id = rule->id;
+ kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+ /* remove the handle from the database */
+ ipa_id_remove(id);
+ }
}
if (ipa_id_find(tbl->id) == NULL) {
/* do not remove the "default" routing tbl which has index 0 */
if (tbl->idx != apps_start_idx) {
- if (!tbl->in_sys) {
- list_del(&tbl->link);
- set->tbl_cnt--;
- clear_bit(tbl->idx,
- &ipa_ctx->rt_idx_bitmap[ip]);
- IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
- tbl->idx, set->tbl_cnt);
- kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
- } else {
- list_move(&tbl->link, &rset->head_rt_tbl_list);
- clear_bit(tbl->idx,
- &ipa_ctx->rt_idx_bitmap[ip]);
- set->tbl_cnt--;
- IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
- tbl->idx, set->tbl_cnt);
+ if (!user_only || tbl_user) {
+ if (!tbl->in_sys) {
+ list_del(&tbl->link);
+ set->tbl_cnt--;
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ kmem_cache_free(ipa_ctx->rt_tbl_cache,
+ tbl);
+ } else {
+ list_move(&tbl->link,
+ &rset->head_rt_tbl_list);
+ clear_bit(tbl->idx,
+ &ipa_ctx->rt_idx_bitmap[ip]);
+ set->tbl_cnt--;
+ IPADBG("rst tbl_idx=%d cnt=%d\n",
+ tbl->idx, set->tbl_cnt);
+ }
+ /* remove the handle from the database */
+ ipa_id_remove(id);
}
- /* remove the handle from the database */
- ipa_id_remove(id);
}
}
mutex_unlock(&ipa_ctx->lock);
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client;
api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl;
api_ctrl->ipa_add_hdr = ipa2_add_hdr;
+ api_ctrl->ipa_add_hdr_usr = ipa2_add_hdr_usr;
api_ctrl->ipa_del_hdr = ipa2_del_hdr;
api_ctrl->ipa_commit_hdr = ipa2_commit_hdr;
api_ctrl->ipa_reset_hdr = ipa2_reset_hdr;
api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx;
api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx;
api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule;
+ api_ctrl->ipa_add_rt_rule_usr = ipa2_add_rt_rule_usr;
api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule;
api_ctrl->ipa_commit_rt = ipa2_commit_rt;
api_ctrl->ipa_reset_rt = ipa2_reset_rt;
api_ctrl->ipa_query_rt_index = ipa2_query_rt_index;
api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule;
api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule;
+ api_ctrl->ipa_add_flt_rule_usr = ipa2_add_flt_rule_usr;
api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule;
api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule;
api_ctrl->ipa_commit_flt = ipa2_commit_flt;
return cnt;
}
+static int ipa3_clean_modem_rule(void)
+{
+ struct ipa_install_fltr_rule_req_msg_v01 *req;
+ struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex;
+ int val = 0;
+
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) {
+ req = kzalloc(
+ sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAERR("mem allocated failed!\n");
+ return -ENOMEM;
+ }
+ req->filter_spec_list_valid = false;
+ req->filter_spec_list_len = 0;
+ req->source_pipe_index_valid = 0;
+ val = ipa3_qmi_filter_request_send(req);
+ kfree(req);
+ } else {
+ req_ex = kzalloc(
+ sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01),
+ GFP_KERNEL);
+ if (!req_ex) {
+ IPAERR("mem allocated failed!\n");
+ return -ENOMEM;
+ }
+ req_ex->filter_spec_ex_list_valid = false;
+ req_ex->filter_spec_ex_list_len = 0;
+ req_ex->source_pipe_index_valid = 0;
+ val = ipa3_qmi_filter_request_ex_send(req_ex);
+ kfree(req_ex);
+ }
+
+ return val;
+}
+
static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
kfree(buff);
}
-static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache)
+static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type,
+ bool is_cache)
{
int retval;
struct ipa_wan_msg *wan_msg;
retval = -EFAULT;
break;
}
- if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+ if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param,
+ true)) {
retval = -EFAULT;
break;
}
retval = -EFAULT;
break;
}
- if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+ if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param,
+ true)) {
retval = -EFAULT;
break;
}
retval = -EFAULT;
break;
}
- if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+ if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param,
+ true)) {
retval = -EFAULT;
break;
}
retval = ipa3_commit_hdr();
break;
case IPA_IOC_RESET_HDR:
- retval = ipa3_reset_hdr();
+ retval = ipa3_reset_hdr(false);
break;
case IPA_IOC_COMMIT_RT:
retval = ipa3_commit_rt(arg);
break;
case IPA_IOC_RESET_RT:
- retval = ipa3_reset_rt(arg);
+ retval = ipa3_reset_rt(arg, false);
break;
case IPA_IOC_COMMIT_FLT:
retval = ipa3_commit_flt(arg);
break;
case IPA_IOC_RESET_FLT:
- retval = ipa3_reset_flt(arg);
+ retval = ipa3_reset_flt(arg, false);
break;
case IPA_IOC_GET_RT_TBL:
if (copy_from_user(header, (u8 *)arg,
break;
}
if (ipa3_add_hdr_proc_ctx(
- (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
+ (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) {
retval = -EFAULT;
break;
}
}
break;
- default: /* redundant, as cmd was checked against MAXNR */
+ case IPA_IOC_CLEANUP:
+ /*Route and filter rules will also be clean*/
+ IPADBG("Got IPA_IOC_CLEANUP\n");
+ retval = ipa3_reset_hdr(true);
+ memset(&nat_del, 0, sizeof(nat_del));
+ nat_del.table_index = 0;
+ retval = ipa3_nat_del_cmd(&nat_del);
+ retval = ipa3_clean_modem_rule();
+ break;
+
+ case IPA_IOC_QUERY_WLAN_CLIENT:
+ IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n");
+ retval = ipa3_resend_wlan_msg();
+ break;
+
+ default:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
}
}
/**
-* ipa3_setup_dflt_rt_tables() - Setup default routing tables
-*
-* Return codes:
-* 0: success
-* -ENOMEM: failed to allocate memory
-* -EPERM: failed to add the tables
-*/
+ * ipa3_setup_dflt_rt_tables() - Setup default routing tables
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: failed to allocate memory
+ * -EPERM: failed to add the tables
+ */
int ipa3_setup_dflt_rt_tables(void)
{
struct ipa_ioc_add_rt_rule *rt_rule;
}
/**
-* ipa3_init_q6_smem() - Initialize Q6 general memory and
-* header memory regions in IPA.
-*
-* Return codes:
-* 0: success
-* -ENOMEM: failed to allocate dma memory
-* -EFAULT: failed to send IPA command to initialize the memory
-*/
+ * ipa3_init_q6_smem() - Initialize Q6 general memory and
+ * header memory regions in IPA.
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: failed to allocate dma memory
+ * -EFAULT: failed to send IPA command to initialize the memory
+ */
int ipa3_init_q6_smem(void)
{
int rc;
}
/**
-* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
-* in IPA HW. This is performed in case of SSR.
-*
-* This is a mandatory procedure, in case one of the steps fails, the
-* AP needs to restart.
-*/
+ * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
+ * in IPA HW. This is performed in case of SSR.
+ *
+ * This is a mandatory procedure, in case one of the steps fails, the
+ * AP needs to restart.
+ */
void ipa3_q6_pre_shutdown_cleanup(void)
{
IPADBG_LOW("ENTER\n");
BUG();
}
/* Remove delay from Q6 PRODs to avoid pending descriptors
- * on pipe reset procedure
- */
+ * on pipe reset procedure
+ */
ipa3_q6_pipe_delay(false);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
/**
-* ipa3_enable_clks() - Turn on IPA clocks
-*
-* Return codes:
-* None
-*/
+ * ipa3_enable_clks() - Turn on IPA clocks
+ *
+ * Return codes:
+ * None
+ */
void ipa3_enable_clks(void)
{
IPADBG("enabling IPA clocks and bus voting\n");
}
/**
-* ipa3_disable_clks() - Turn off IPA clocks
-*
-* Return codes:
-* None
-*/
+ * ipa3_disable_clks() - Turn off IPA clocks
+ *
+ * Return codes:
+ * None
+ */
void ipa3_disable_clks(void)
{
IPADBG("disabling IPA clocks and bus voting\n");
}
/**
-* ipa3_active_clients_log_mod() - Log a modification in the active clients
-* reference count
-*
-* This method logs any modification in the active clients reference count:
-* It logs the modification in the circular history buffer
-* It logs the modification in the hash table - looking for an entry,
-* creating one if needed and deleting one if needed.
-*
-* @id: ipa3_active client logging info struct to hold the log information
-* @inc: a boolean variable to indicate whether the modification is an increase
-* or decrease
-* @int_ctx: a boolean variable to indicate whether this call is being made from
-* an interrupt context and therefore should allocate GFP_ATOMIC memory
-*
-* Method process:
-* - Hash the unique identifier string
-* - Find the hash in the table
-* 1)If found, increase or decrease the reference count
-* 2)If not found, allocate a new hash table entry struct and initialize it
-* - Remove and deallocate unneeded data structure
-* - Log the call in the circular history buffer (unless it is a simple call)
-*/
+ * ipa3_active_clients_log_mod() - Log a modification in the active clients
+ * reference count
+ *
+ * This method logs any modification in the active clients reference count:
+ * It logs the modification in the circular history buffer
+ * It logs the modification in the hash table - looking for an entry,
+ * creating one if needed and deleting one if needed.
+ *
+ * @id: ipa3_active client logging info struct to hold the log information
+ * @inc: a boolean variable to indicate whether the modification is an increase
+ * or decrease
+ * @int_ctx: a boolean variable to indicate whether this call is being made from
+ * an interrupt context and therefore should allocate GFP_ATOMIC memory
+ *
+ * Method process:
+ * - Hash the unique identifier string
+ * - Find the hash in the table
+ * 1)If found, increase or decrease the reference count
+ * 2)If not found, allocate a new hash table entry struct and initialize it
+ * - Remove and deallocate unneeded data structure
+ * - Log the call in the circular history buffer (unless it is a simple call)
+ */
void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
bool inc, bool int_ctx)
{
}
/**
-* ipa3_inc_client_enable_clks() - Increase active clients counter, and
-* enable ipa clocks if necessary
-*
-* Return codes:
-* None
-*/
+ * ipa3_inc_client_enable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Return codes:
+ * None
+ */
void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
{
ipa3_active_clients_lock();
}
/**
-* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
-* clients if no asynchronous actions should be done. Asynchronous actions are
-* locking a mutex and waking up IPA HW.
-*
-* Return codes: 0 for success
-* -EPERM if an asynchronous action should have been done
-*/
+ * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
+ * clients if no asynchronous actions should be done. Asynchronous actions are
+ * locking a mutex and waking up IPA HW.
+ *
+ * Return codes: 0 for success
+ * -EPERM if an asynchronous action should have been done
+ */
int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id)
{
}
/**
-* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
-* acquire wakelock if necessary
-*
-* Return codes:
-* None
-*/
+ * ipa3_inc_acquire_wakelock() - Increase active clients counter, and
+ * acquire wakelock if necessary
+ *
+ * Return codes:
+ * None
+ */
void ipa3_inc_acquire_wakelock(void)
{
unsigned long flags;
}
/**
-* ipa3_suspend_handler() - Handles the suspend interrupt:
-* wakes up the suspended peripheral by requesting its consumer
-* @interrupt: Interrupt type
-* @private_data: The client's private data
-* @interrupt_data: Interrupt specific information data
-*/
+ * ipa3_suspend_handler() - Handles the suspend interrupt:
+ * wakes up the suspended peripheral by requesting its consumer
+ * @interrupt: Interrupt type
+ * @private_data: The client's private data
+ * @interrupt_data: Interrupt specific information data
+ */
void ipa3_suspend_handler(enum ipa_irq_type interrupt,
void *private_data,
void *interrupt_data)
}
/**
-* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
-* as it was registered in the IPA init sequence.
-* Return codes:
-* 0: success
-* -EPERM: failed to remove current handler or failed to add original handler
-* */
+ * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
+ * as it was registered in the IPA init sequence.
+ * Return codes:
+ * 0: success
+ * -EPERM: failed to remove current handler or failed to add original handler
+ */
int ipa3_restore_suspend_handler(void)
{
int result = 0;
}
/**
-* ipa3_pre_init() - Initialize the IPA Driver.
-* This part contains all initialization which doesn't require IPA HW, such
-* as structure allocations and initializations, register writes, etc.
-*
-* @resource_p: contain platform specific values from DST file
-* @pdev: The platform device structure representing the IPA driver
-*
-* Function initialization process:
-* - Allocate memory for the driver context data struct
-* - Initializing the ipa3_ctx with:
-* 1)parsed values from the dts file
-* 2)parameters passed to the module initialization
-* 3)read HW values(such as core memory size)
-* - Map IPA core registers to CPU memory
-* - Restart IPA core(HW reset)
-* - Set configuration for IPA BAM via BAM_CNFG_BITS
-* - Initialize the look-aside caches(kmem_cache/slab) for filter,
-* routing and IPA-tree
-* - Create memory pool with 4 objects for DMA operations(each object
-* is 512Bytes long), this object will be use for tx(A5->IPA)
-* - Initialize lists head(routing,filter,hdr,system pipes)
-* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
-* - Initialize spinlocks (for list related to A5<->IPA pipes)
-* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
-* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
-* routing table ,filtering rule
-* - Initialize the filter block by committing IPV4 and IPV6 default rules
-* - Create empty routing table in system memory(no committing)
-* - Initialize pipes memory pool with ipa3_pipe_mem_init for supported platforms
-* - Create a char-device for IPA
-* - Initialize IPA RM (resource manager)
-* - Configure GSI registers (in GSI case)
-*/
+ * ipa3_pre_init() - Initialize the IPA Driver.
+ * This part contains all initialization which doesn't require IPA HW, such
+ * as structure allocations and initializations, register writes, etc.
+ *
+ * @resource_p: contain platform specific values from DST file
+ * @pdev: The platform device structure representing the IPA driver
+ *
+ * Function initialization process:
+ * Allocate memory for the driver context data struct
+ * Initializing the ipa3_ctx with :
+ * 1)parsed values from the dts file
+ * 2)parameters passed to the module initialization
+ * 3)read HW values(such as core memory size)
+ * Map IPA core registers to CPU memory
+ * Restart IPA core(HW reset)
+ * Initialize the look-aside caches(kmem_cache/slab) for filter,
+ * routing and IPA-tree
+ * Create memory pool with 4 objects for DMA operations(each object
+ * is 512Bytes long), this object will be use for tx(A5->IPA)
+ * Initialize lists head(routing, hdr, system pipes)
+ * Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+ * Initialize spinlocks (for list related to A5<->IPA pipes)
+ * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+ * Initialize Red-Black-Tree(s) for handles of header,routing rule,
+ * routing table ,filtering rule
+ * Initialize the filter block by committing IPV4 and IPV6 default rules
+ * Create empty routing table in system memory(no committing)
+ * Create a char-device for IPA
+ * Initialize IPA RM (resource manager)
+ * Configure GSI registers (in GSI case)
+ */
static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
struct device *ipa_dev)
{
init_waitqueue_head(&ipa3_ctx->msg_waitq);
mutex_init(&ipa3_ctx->msg_lock);
+ /* store wlan client-connect-msg-list */
+ INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list);
+ mutex_init(&ipa3_ctx->msg_wlan_client_lock);
+
mutex_init(&ipa3_ctx->lock);
mutex_init(&ipa3_ctx->nat_mem.lock);
mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
*
* Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
* This will postpone the suspend operation until IPA is no longer used by AP.
-*/
+ */
int ipa3_ap_suspend(struct device *dev)
{
int i;
}
/**
-* ipa3_ap_resume() - resume callback for runtime_pm
-* @dev: pointer to device
-*
-* This callback will be invoked by the runtime_pm framework when an AP resume
-* operation is invoked.
-*
-* Always returns 0 since resume should always succeed.
-*/
+ * ipa3_ap_resume() - resume callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP resume
+ * operation is invoked.
+ *
+ * Always returns 0 since resume should always succeed.
+ */
int ipa3_ap_resume(struct device *dev)
{
return 0;
static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
- struct ipa3_flt_tbl *tbl)
+ struct ipa3_flt_tbl *tbl, bool user)
{
int id;
}
}
(*entry)->rule_id = id;
+ (*entry)->ipacm_installed = user;
return 0;
static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
const struct ipa_flt_rule *rule, u8 add_rear,
- u32 *rule_hdl)
+ u32 *rule_hdl, bool user)
{
struct ipa3_flt_entry *entry;
struct ipa3_rt_tbl *rt_tbl = NULL;
if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
goto error;
- if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+ if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, user))
goto error;
if (add_rear) {
if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
goto error;
- if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+ if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, true))
goto error;
list_add(&entry->link, &((*add_after_entry)->link));
static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
const struct ipa_flt_rule *rule, u8 add_rear,
- u32 *rule_hdl)
+ u32 *rule_hdl, bool user)
{
struct ipa3_flt_tbl *tbl;
int ipa_ep_idx;
tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
- return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+ return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user);
}
/**
* ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
* commit to IPA HW
+ * @rules: [inout] set of filtering rules to add
*
* Returns: 0 on success, negative on failure
*
*/
int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
{
+ return ipa3_add_flt_rule_usr(rules, false);
+}
+/**
+ * ipa3_add_flt_rule_usr() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules: [inout] set of filtering rules to add
+ * @user_only: [in] indicate rules installed by userspace
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
+{
int i;
int result;
result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
&rules->rules[i].rule,
rules->rules[i].at_rear,
- &rules->rules[i].flt_rule_hdl);
+ &rules->rules[i].flt_rule_hdl,
+ user_only);
else
result = -1;
* ipa3_reset_flt() - Reset the current SW filtering table of specified type
* (does not commit to HW)
* @ip: [in] the family of routing tables
+ * @user_only: [in] indicate rules deleted by userspace
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa3_reset_flt(enum ipa_ip_type ip)
+int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only)
{
struct ipa3_flt_tbl *tbl;
struct ipa3_flt_entry *entry;
struct ipa3_flt_entry *next;
int i;
int id;
+ int rule_id;
if (ip >= IPA_IP_MAX) {
IPAERR_RL("bad parm\n");
mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
}
- list_del(&entry->link);
- entry->tbl->rule_cnt--;
- if (entry->rt_tbl)
- entry->rt_tbl->ref_cnt--;
- /* if rule id was allocated from idr, remove it */
- if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
- (entry->rule_id >= ipahal_get_low_rule_id()))
- idr_remove(&entry->tbl->rule_ids,
- entry->rule_id);
- entry->cookie = 0;
- id = entry->id;
- kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
-
- /* remove the handle from the database */
- ipa3_id_remove(id);
+
+ if (!user_only ||
+ entry->ipacm_installed) {
+ list_del(&entry->link);
+ entry->tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ /* if rule id was allocated from idr, remove */
+ rule_id = entry->rule_id;
+ id = entry->id;
+ if ((rule_id < ipahal_get_rule_id_hi_bit()) &&
+ (rule_id >= ipahal_get_low_rule_id()))
+ idr_remove(&entry->tbl->rule_ids,
+ rule_id);
+ entry->cookie = 0;
+ kmem_cache_free(ipa3_ctx->flt_rule_cache,
+ entry);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+ }
}
}
mutex_unlock(&ipa3_ctx->lock);
tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
rule.action = IPA_PASS_TO_EXCEPTION;
__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
- &ep->dflt_flt4_rule_hdl);
+ &ep->dflt_flt4_rule_hdl, false);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
tbl->sticky_rear = true;
tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
rule.action = IPA_PASS_TO_EXCEPTION;
__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
- &ep->dflt_flt6_rule_hdl);
+ &ep->dflt_flt6_rule_hdl, false);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
tbl->sticky_rear = true;
mutex_unlock(&ipa3_ctx->lock);
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
}
static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
- bool add_ref_hdr)
+ bool add_ref_hdr, bool user_only)
{
struct ipa3_hdr_entry *hdr_entry;
struct ipa3_hdr_proc_ctx_entry *entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
entry->cookie = IPA_PROC_HDR_COOKIE;
+ entry->ipacm_installed = user_only;
needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
*/
offset->offset = htbl->end;
offset->bin = bin;
+ offset->ipacm_installed = user_only;
htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
list_add(&offset->link,
&htbl->head_offset_list[bin]);
offset =
list_first_entry(&htbl->head_free_offset_list[bin],
struct ipa3_hdr_proc_ctx_offset_entry, link);
+ offset->ipacm_installed = user_only;
list_move(&offset->link, &htbl->head_offset_list[bin]);
}
}
-static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
{
struct ipa3_hdr_entry *entry;
struct ipa_hdr_offset_entry *offset = NULL;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
entry->cookie = IPA_HDR_COOKIE;
+ entry->ipacm_installed = user;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
list_add(&offset->link,
&htbl->head_offset_list[bin]);
entry->offset_entry = offset;
+ offset->ipacm_installed = user;
}
} else {
entry->is_hdr_proc_ctx = false;
struct ipa_hdr_offset_entry, link);
list_move(&offset->link, &htbl->head_offset_list[bin]);
entry->offset_entry = offset;
+ offset->ipacm_installed = user;
}
list_add(&entry->link, &htbl->head_hdr_entry_list);
IPADBG("adding processing context for header %s\n", hdr->name);
proc_ctx.type = IPA_HDR_PROC_NONE;
proc_ctx.hdr_hdl = id;
- if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
+ if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) {
IPAERR("failed to add hdr proc ctx\n");
goto fail_add_proc_ctx;
}
*/
int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
{
+ return ipa3_add_hdr_usr(hdrs, false);
+}
+
+/**
+ * ipa3_add_hdr_usr() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @hdrs: [inout] set of headers to add
+ * @user_only: [in] indicate installed from user
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
+{
int i;
int result = -EFAULT;
IPADBG("adding %d headers to IPA driver internal data struct\n",
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
- if (__ipa_add_hdr(&hdrs->hdr[i])) {
+ if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) {
IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
* ipa3_add_hdr_proc_ctx() - add the specified headers to SW
* and optionally commit them to IPA HW
* @proc_ctxs: [inout] set of processing context headers to add
+ * @user_only: [in] indicate installed by user-space module
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only)
{
int i;
int result = -EFAULT;
IPADBG("adding %d header processing contextes to IPA driver\n",
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
- if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
+ if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i],
+ true, user_only)) {
IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
* ipa3_reset_hdr() - reset the current header table in SW (does not commit to
* HW)
*
+ * @user_only: [in] indicate delete rules installed by userspace
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa3_reset_hdr(void)
+int ipa3_reset_hdr(bool user_only)
{
struct ipa3_hdr_entry *entry;
struct ipa3_hdr_entry *next;
* issue a reset on the routing module since routing rules point to
* header table entries
*/
- if (ipa3_reset_rt(IPA_IP_v4))
+ if (ipa3_reset_rt(IPA_IP_v4, user_only))
IPAERR("fail to reset v4 rt\n");
- if (ipa3_reset_rt(IPA_IP_v6))
+ if (ipa3_reset_rt(IPA_IP_v6, user_only))
IPAERR("fail to reset v4 rt\n");
mutex_lock(&ipa3_ctx->lock);
WARN_ON(1);
return -EFAULT;
}
- if (entry->is_hdr_proc_ctx) {
- dma_unmap_single(ipa3_ctx->pdev,
- entry->phys_base,
- entry->hdr_len,
- DMA_TO_DEVICE);
- entry->proc_ctx = NULL;
- }
- list_del(&entry->link);
- entry->ref_cnt = 0;
- entry->cookie = 0;
- /* remove the handle from the database */
- ipa3_id_remove(entry->id);
- kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+ if (!user_only || entry->ipacm_installed) {
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa3_ctx->pdev,
+ entry->phys_base,
+ entry->hdr_len,
+ DMA_TO_DEVICE);
+ entry->proc_ctx = NULL;
+ }
+ list_del(&entry->link);
+ entry->ref_cnt = 0;
+ entry->cookie = 0;
+ /* remove the handle from the database */
+ ipa3_id_remove(entry->id);
+ kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+ }
}
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
list_for_each_entry_safe(off_entry, off_next,
if (off_entry->offset == 0)
continue;
- list_del(&off_entry->link);
- kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+ if (!user_only ||
+ off_entry->ipacm_installed) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa3_ctx->hdr_offset_cache,
+ off_entry);
+ }
}
list_for_each_entry_safe(off_entry, off_next,
&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
link) {
- list_del(&off_entry->link);
- kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+
+ if (!user_only ||
+ off_entry->ipacm_installed) {
+ list_del(&off_entry->link);
+ kmem_cache_free(ipa3_ctx->hdr_offset_cache,
+ off_entry);
+ }
}
}
/* there is one header of size 8 */
WARN_ON(1);
return -EFAULT;
}
- list_del(&ctx_entry->link);
- ctx_entry->ref_cnt = 0;
- ctx_entry->cookie = 0;
- /* remove the handle from the database */
- ipa3_id_remove(ctx_entry->id);
- kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
+ if (!user_only ||
+ ctx_entry->ipacm_installed) {
+ list_del(&ctx_entry->link);
+ ctx_entry->ref_cnt = 0;
+ ctx_entry->cookie = 0;
+ /* remove the handle from the database */
+ ipa3_id_remove(ctx_entry->id);
+ kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache,
+ ctx_entry);
+ }
}
for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
link) {
- list_del(&ctx_off_entry->link);
- kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
+ if (!user_only ||
+ ctx_off_entry->ipacm_installed) {
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(
+ ipa3_ctx->hdr_proc_ctx_offset_cache,
ctx_off_entry);
+ }
}
list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
&ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
link) {
- list_del(&ctx_off_entry->link);
- kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
- ctx_off_entry);
+
+ if (!user_only ||
+ ctx_off_entry->ipacm_installed) {
+ list_del(&ctx_off_entry->link);
+ kmem_cache_free(
+ ipa3_ctx->hdr_proc_ctx_offset_cache,
+ ctx_off_entry);
+ }
}
}
ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* @prio: rule 10bit priority which defines the order of the rule
* among other rules at the same integrated table
* @rule_id: rule 10bit ID to be returned in packet status
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa3_flt_entry {
struct list_head link;
int id;
u16 prio;
u16 rule_id;
+ bool ipacm_installed;
};
/**
* @is_eth2_ofst_valid: is eth2_ofst field valid?
* @eth2_ofst: offset to start of Ethernet-II/802.3 header
* @user_deleted: is the header deleted by the user?
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa3_hdr_entry {
struct list_head link;
u8 is_eth2_ofst_valid;
u16 eth2_ofst;
bool user_deleted;
+ bool ipacm_installed;
};
/**
* @link: entry's link in global processing context header offset entries list
* @offset: the offset
* @bin: bin
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa3_hdr_proc_ctx_offset_entry {
struct list_head link;
u32 offset;
u32 bin;
+ bool ipacm_installed;
};
/**
* @ref_cnt: reference counter of routing table
* @id: processing context header entry id
* @user_deleted: is the hdr processing context deleted by the user?
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa3_hdr_proc_ctx_entry {
struct list_head link;
u32 ref_cnt;
int id;
bool user_deleted;
+ bool ipacm_installed;
};
/**
* @prio: rule 10bit priority which defines the order of the rule
* among other rules at the integrated same table
* @rule_id: rule 10bit ID to be returned in packet status
+ * @rule_id_valid: indicate if rule_id_valid valid or not?
+ * @ipacm_installed: indicate if installed by ipacm
*/
struct ipa3_rt_entry {
struct list_head link;
u16 prio;
u16 rule_id;
u16 rule_id_valid;
+ bool ipacm_installed;
};
/**
struct list_head msg_list;
struct list_head pull_msg_list;
struct mutex msg_lock;
+ struct list_head msg_wlan_client_list;
+ struct mutex msg_wlan_client_lock;
wait_queue_head_t msg_waitq;
enum ipa_hw_type ipa_hw_type;
enum ipa3_hw_mode ipa3_hw_mode;
*/
int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user);
+
int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls);
int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user);
int ipa3_commit_hdr(void);
-int ipa3_reset_hdr(void);
+int ipa3_reset_hdr(bool user_only);
int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
/*
* Header Processing Context
*/
-int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only);
int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
*/
int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules,
+ bool user_only);
+
int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
int ipa3_commit_rt(enum ipa_ip_type ip);
-int ipa3_reset_rt(enum ipa_ip_type ip);
+int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only);
int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
*/
int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules,
+ bool user_only);
+
int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules);
int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
int ipa3_commit_flt(enum ipa_ip_type ip);
-int ipa3_reset_flt(enum ipa_ip_type ip);
+int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only);
/*
* NAT
*/
int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
ipa_msg_free_fn callback);
+int ipa3_resend_wlan_msg(void);
int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta);
* To transfer multiple data packets
* While passing the data descriptor list, the anchor node
* should be of type struct ipa_tx_data_desc not list_head
-*/
+ */
int ipa3_tx_dp_mul(enum ipa_client_type dst,
struct ipa_tx_data_desc *data_desc);
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#include <linux/fs.h>
#include <linux/sched.h>
#include "ipa_i.h"
+#include <linux/msm_ipa.h>
struct ipa3_intf {
char name[IPA_RESOURCE_NAME_MAX];
kfree(buff);
}
+static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff)
+{
+ struct ipa3_push_msg *msg_dup;
+ struct ipa_wlan_msg_ex *event_ex_cur_con = NULL;
+ struct ipa_wlan_msg_ex *event_ex_list = NULL;
+ struct ipa_wlan_msg *event_ex_cur_discon = NULL;
+ void *data_dup = NULL;
+ struct ipa3_push_msg *entry;
+ struct ipa3_push_msg *next;
+ int cnt = 0, total = 0, max = 0;
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ uint8_t mac2[IPA_MAC_ADDR_SIZE];
+
+ if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) {
+ /* debug print */
+ event_ex_cur_con = buff;
+ for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) {
+ if (event_ex_cur_con->attribs[cnt].attrib_type ==
+ WLAN_HDR_ATTRIB_MAC_ADDR) {
+ IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n",
+ event_ex_cur_con->attribs[cnt].u.mac_addr[0],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[1],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[2],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[3],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[4],
+ event_ex_cur_con->attribs[cnt].u.mac_addr[5],
+ meta->msg_type);
+ }
+ }
+
+ mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
+ msg_dup = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL);
+ if (msg_dup == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ return -ENOMEM;
+ }
+ msg_dup->meta = *meta;
+ if (meta->msg_len > 0 && buff) {
+ data_dup = kmalloc(meta->msg_len, GFP_KERNEL);
+ if (data_dup == NULL) {
+ IPAERR("fail to alloc data_dup container\n");
+ kfree(msg_dup);
+ return -ENOMEM;
+ }
+ memcpy(data_dup, buff, meta->msg_len);
+ msg_dup->buff = data_dup;
+ msg_dup->callback = ipa3_send_msg_free;
+ }
+ list_add_tail(&msg_dup->link, &ipa3_ctx->msg_wlan_client_list);
+ mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+ }
+
+ /* remove the cache */
+ if (meta->msg_type == WLAN_CLIENT_DISCONNECT) {
+ /* debug print */
+ event_ex_cur_discon = buff;
+ IPADBG("Mac %02x:%02x:%02x:%02x:%02x:%02x,msg %d\n",
+ event_ex_cur_discon->mac_addr[0],
+ event_ex_cur_discon->mac_addr[1],
+ event_ex_cur_discon->mac_addr[2],
+ event_ex_cur_discon->mac_addr[3],
+ event_ex_cur_discon->mac_addr[4],
+ event_ex_cur_discon->mac_addr[5],
+ meta->msg_type);
+ memcpy(mac2,
+ event_ex_cur_discon->mac_addr,
+ sizeof(mac2));
+
+ mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
+ list_for_each_entry_safe(entry, next,
+ &ipa3_ctx->msg_wlan_client_list,
+ link) {
+ event_ex_list = entry->buff;
+ max = event_ex_list->num_of_attribs;
+ for (cnt = 0; cnt < max; cnt++) {
+ memcpy(mac,
+ event_ex_list->attribs[cnt].u.mac_addr,
+ sizeof(mac));
+ if (event_ex_list->attribs[cnt].attrib_type ==
+ WLAN_HDR_ATTRIB_MAC_ADDR) {
+ pr_debug("%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+
+ /* compare to delete one*/
+ if (memcmp(mac2,
+ mac,
+ sizeof(mac)) == 0) {
+ IPADBG("clean %d\n", total);
+ list_del(&entry->link);
+ kfree(entry);
+ break;
+ }
+ }
+ }
+ total++;
+ }
+ mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+ }
+ return 0;
+}
+
/**
* ipa3_send_msg() - Send "message" from kernel client to IPA driver
* @meta: [in] message meta-data
void *data = NULL;
if (meta == NULL || (buff == NULL && callback != NULL) ||
- (buff != NULL && callback == NULL)) {
+ (buff != NULL && callback == NULL) || buff == NULL) {
IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
mutex_lock(&ipa3_ctx->msg_lock);
list_add_tail(&msg->link, &ipa3_ctx->msg_list);
+ /* support for softap client event cache */
+ if (wlan_msg_process(meta, buff))
+ IPAERR("wlan_msg_process failed\n");
+
+ /* unlock only after process */
mutex_unlock(&ipa3_ctx->msg_lock);
IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
}
/**
+ * ipa3_resend_wlan_msg() - Resend cached "message" to IPACM
+ *
+ * resend wlan client connect events to user-space
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_resend_wlan_msg(void)
+{
+ struct ipa_wlan_msg_ex *event_ex_list = NULL;
+ struct ipa3_push_msg *entry;
+ struct ipa3_push_msg *next;
+ int cnt = 0, total = 0;
+ struct ipa3_push_msg *msg;
+ void *data = NULL;
+
+ IPADBG("\n");
+
+ mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
+ list_for_each_entry_safe(entry, next, &ipa3_ctx->msg_wlan_client_list,
+ link) {
+
+ event_ex_list = entry->buff;
+ for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) {
+ if (event_ex_list->attribs[cnt].attrib_type ==
+ WLAN_HDR_ATTRIB_MAC_ADDR) {
+ IPADBG("%d-Mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ total,
+ event_ex_list->attribs[cnt].u.mac_addr[0],
+ event_ex_list->attribs[cnt].u.mac_addr[1],
+ event_ex_list->attribs[cnt].u.mac_addr[2],
+ event_ex_list->attribs[cnt].u.mac_addr[3],
+ event_ex_list->attribs[cnt].u.mac_addr[4],
+ event_ex_list->attribs[cnt].u.mac_addr[5]);
+ }
+ }
+
+ msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL);
+ if (msg == NULL) {
+ IPAERR("fail to alloc ipa_msg container\n");
+ mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+ return -ENOMEM;
+ }
+ msg->meta = entry->meta;
+ data = kmalloc(entry->meta.msg_len, GFP_KERNEL);
+ if (data == NULL) {
+ IPAERR("fail to alloc data container\n");
+ kfree(msg);
+ mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+ return -ENOMEM;
+ }
+ memcpy(data, entry->buff, entry->meta.msg_len);
+ msg->buff = data;
+ msg->callback = ipa3_send_msg_free;
+ mutex_lock(&ipa3_ctx->msg_lock);
+ list_add_tail(&msg->link, &ipa3_ctx->msg_list);
+ mutex_unlock(&ipa3_ctx->msg_lock);
+ wake_up(&ipa3_ctx->msg_waitq);
+
+ total++;
+ }
+ mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+ return 0;
+}
+
+/**
* ipa3_register_pull_msg() - register pull message type
* @meta: [in] message meta-data
* @callback: [in] pull callback
int rc;
int i;
+ /* check if modem up */
+ if (!ipa3_qmi_indication_fin ||
+ !ipa3_qmi_modem_init_fin ||
+ !ipa_q6_clnt) {
+ IPAWANDBG("modem QMI haven't up yet\n");
+ return -EINVAL;
+ }
+
/* check if the filter rules from IPACM is valid */
if (req->filter_spec_list_len == 0)
IPAWANDBG("IPACM pass zero rules to Q6\n");
int rc;
int i;
+ /* check if modem up */
+ if (!ipa3_qmi_indication_fin ||
+ !ipa3_qmi_modem_init_fin ||
+ !ipa_q6_clnt) {
+ IPAWANDBG("modem QMI haven't up yet\n");
+ return -EINVAL;
+ }
+
/* check if the filter rules from IPACM is valid */
if (req->filter_spec_ex_list_len == 0) {
IPAWANDBG("IPACM pass zero rules to Q6\n");
const struct ipa_rt_rule *rule,
struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
struct ipa3_hdr_proc_ctx_entry *proc_ctx,
- u16 rule_id)
+ u16 rule_id, bool user)
{
int id;
}
}
(*(entry))->rule_id = id;
+ (*(entry))->ipacm_installed = user;
return 0;
static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
- u16 rule_id)
+ u16 rule_id, bool user)
{
struct ipa3_rt_tbl *tbl;
struct ipa3_rt_entry *entry;
}
if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx,
- rule_id))
+ rule_id, user))
goto error;
if (at_rear)
if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
goto error;
- if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0))
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0, true))
goto error;
list_add(&entry->link, &((*add_after_entry)->link));
*
* Note: Should not be called from atomic context
*/
+
int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
{
+ return ipa3_add_rt_rule_usr(rules, false);
+}
+/**
+ * ipa3_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ * @user_only: [in] indicate installed by userspace module
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+
+int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
+{
int i;
int ret;
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl,
- 0)) {
+ 0,
+ user_only)) {
IPAERR("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl,
- rules->rules[i].rule_id)) {
+ rules->rules[i].rule_id, true)) {
IPAERR("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
/**
* ipa3_reset_rt() - reset the current SW routing table of specified type
* (does not commit to HW)
- * @ip: The family of routing tables
+ * @ip: [in] The family of routing tables
+ * @user_only: [in] indicate delete rules installed by userspace
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
-int ipa3_reset_rt(enum ipa_ip_type ip)
+int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
{
struct ipa3_rt_tbl *tbl;
struct ipa3_rt_tbl *tbl_next;
struct ipa3_rt_tbl_set *rset;
u32 apps_start_idx;
int id;
+ bool tbl_user = false;
if (ip >= IPA_IP_MAX) {
IPAERR_RL("bad parm\n");
* issue a reset on the filtering module of same IP type since
* filtering rules point to routing tables
*/
- if (ipa3_reset_flt(ip))
+ if (ipa3_reset_flt(ip, user_only))
IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa3_ctx->rt_tbl_set[ip];
mutex_lock(&ipa3_ctx->lock);
IPADBG("reset rt ip=%d\n", ip);
list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+ tbl_user = false;
list_for_each_entry_safe(rule, rule_next,
&tbl->head_rt_rule_list, link) {
if (ipa3_id_find(rule->id) == NULL) {
return -EFAULT;
}
+ /* indicate if tbl used for user-specified rules*/
+ if (rule->ipacm_installed) {
+ IPADBG("tbl_user %d, tbl-index %d\n",
+ tbl_user, tbl->id);
+ tbl_user = true;
+ }
/*
* for the "default" routing tbl, remove all but the
* last rule
if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
continue;
- list_del(&rule->link);
- tbl->rule_cnt--;
- if (rule->hdr)
- __ipa3_release_hdr(rule->hdr->id);
- else if (rule->proc_ctx)
- __ipa3_release_hdr_proc_ctx(rule->proc_ctx->id);
- rule->cookie = 0;
- idr_remove(&tbl->rule_ids, rule->rule_id);
- id = rule->id;
- kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
-
- /* remove the handle from the database */
- ipa3_id_remove(id);
+ if (!user_only ||
+ rule->ipacm_installed) {
+ list_del(&rule->link);
+ tbl->rule_cnt--;
+ if (rule->hdr)
+ __ipa3_release_hdr(rule->hdr->id);
+ else if (rule->proc_ctx)
+ __ipa3_release_hdr_proc_ctx(
+ rule->proc_ctx->id);
+ rule->cookie = 0;
+ idr_remove(&tbl->rule_ids, rule->rule_id);
+ id = rule->id;
+ kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
+
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
+ }
}
if (ipa3_id_find(tbl->id) == NULL) {
/* do not remove the "default" routing tbl which has index 0 */
if (tbl->idx != apps_start_idx) {
- idr_destroy(&tbl->rule_ids);
- if (tbl->in_sys[IPA_RULE_HASHABLE] ||
- tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
- list_move(&tbl->link, &rset->head_rt_tbl_list);
- clear_bit(tbl->idx,
+ if (!user_only || tbl_user) {
+ idr_destroy(&tbl->rule_ids);
+ if (tbl->in_sys[IPA_RULE_HASHABLE] ||
+ tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
+ list_move(&tbl->link,
+ &rset->head_rt_tbl_list);
+ clear_bit(tbl->idx,
&ipa3_ctx->rt_idx_bitmap[ip]);
- set->tbl_cnt--;
- IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+ set->tbl_cnt--;
+ IPADBG("rst tbl_idx=%d cnt=%d\n",
tbl->idx, set->tbl_cnt);
- } else {
- list_del(&tbl->link);
- set->tbl_cnt--;
- clear_bit(tbl->idx,
+ } else {
+ list_del(&tbl->link);
+ set->tbl_cnt--;
+ clear_bit(tbl->idx,
&ipa3_ctx->rt_idx_bitmap[ip]);
- IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+ IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
tbl->idx, set->tbl_cnt);
- kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+ kmem_cache_free(ipa3_ctx->rt_tbl_cache,
+ tbl);
+ }
+ /* remove the handle from the database */
+ ipa3_id_remove(id);
}
- /* remove the handle from the database */
- ipa3_id_remove(id);
}
}
mutex_unlock(&ipa3_ctx->lock);
struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
struct ipa3_hdr_entry *hdr_entry;
struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
+
if (rtrule->rule.hdr_hdl) {
hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
api_ctrl->ipa_add_hdr = ipa3_add_hdr;
+ api_ctrl->ipa_add_hdr_usr = ipa3_add_hdr_usr;
api_ctrl->ipa_del_hdr = ipa3_del_hdr;
api_ctrl->ipa_commit_hdr = ipa3_commit_hdr;
api_ctrl->ipa_reset_hdr = ipa3_reset_hdr;
api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
+ api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr;
api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
api_ctrl->ipa_commit_rt = ipa3_commit_rt;
api_ctrl->ipa_reset_rt = ipa3_reset_rt;
api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
+ api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr;
api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
api_ctrl->ipa_commit_flt = ipa3_commit_flt;
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
*/
int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only);
+
int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
int ipa_commit_hdr(void);
-int ipa_reset_hdr(void);
+int ipa_reset_hdr(bool user_only);
int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
/*
* Header Processing Context
*/
-int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only);
int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
*/
int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only);
+
int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
int ipa_commit_rt(enum ipa_ip_type ip);
-int ipa_reset_rt(enum ipa_ip_type ip);
+int ipa_reset_rt(enum ipa_ip_type ip, bool user_only);
int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
*/
int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only);
+
int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
int ipa_commit_flt(enum ipa_ip_type ip);
-int ipa_reset_flt(enum ipa_ip_type ip);
+int ipa_reset_flt(enum ipa_ip_type ip, bool user_only);
/*
* NAT
return -EPERM;
}
+static inline int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs,
+ bool user_only)
+{
+ return -EPERM;
+}
+
static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
{
return -EPERM;
return -EPERM;
}
-static inline int ipa_reset_hdr(void)
+static inline int ipa_reset_hdr(bool user_only)
{
return -EPERM;
}
* Header Processing Context
*/
static inline int ipa_add_hdr_proc_ctx(
- struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+ struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+ bool user_only)
{
return -EPERM;
}
return -EPERM;
}
+static inline int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules,
+ bool user_only)
+{
+ return -EPERM;
+}
+
static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
{
return -EPERM;
return -EPERM;
}
-static inline int ipa_reset_rt(enum ipa_ip_type ip)
+static inline int ipa_reset_rt(enum ipa_ip_type ip, bool user_only)
{
return -EPERM;
}
return -EPERM;
}
+static inline int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules,
+ bool user_only)
+{
+ return -EPERM;
+}
+
static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
{
return -EPERM;
return -EPERM;
}
-static inline int ipa_reset_flt(enum ipa_ip_type ip)
+static inline int ipa_reset_flt(enum ipa_ip_type ip, bool user_only)
{
return -EPERM;
}
#define IPA_IOCTL_DEL_VLAN_IFACE 53
#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 54
#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 55
-#define IPA_IOCTL_MAX 56
+#define IPA_IOCTL_CLEANUP 56
+#define IPA_IOCTL_QUERY_WLAN_CLIENT 57
+#define IPA_IOCTL_MAX 58
/**
* max size of the header to be inserted
#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \
struct ipa_ioc_l2tp_vlan_mapping_info *)
+#define IPA_IOC_CLEANUP _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_CLEANUP)
+#define IPA_IOC_QUERY_WLAN_CLIENT _IO(IPA_IOC_MAGIC,\
+ IPA_IOCTL_QUERY_WLAN_CLIENT)
/*
* unique magic number of the Tethering bridge ioctls
*/