--- /dev/null
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data config definition
+ *
+ */
+
+#ifndef _RMNET_CONFIG_H_
+#define _RMNET_CONFIG_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_phys_ep_conf_s {
+ void (*recycle)(struct sk_buff *); /* Destruct function */
+ void *config;
+};
+
+#endif /* _RMNET_CONFIG_H_ */
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/rmnet_data.h>
+#include <net/rmnet_config.h>
#include "rmnet_data_config.h"
#include "rmnet_data_handlers.h"
#include "rmnet_data_vnd.h"
* - pointer to configuration if successful
* - 0 (null) if device is not associated
*/
-static inline struct rmnet_phys_ep_conf_s *_rmnet_get_phys_ep_config
- (struct net_device *dev)
+struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
+ (struct net_device *dev)
{
- if (_rmnet_is_physical_endpoint_associated(dev))
- return (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(dev->rx_handler_data);
- else
+ struct rmnet_phys_ep_conf_s *_rmnet_phys_ep_config;
+
+ if (_rmnet_is_physical_endpoint_associated(dev)) {
+ _rmnet_phys_ep_config = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ if (_rmnet_phys_ep_config && _rmnet_phys_ep_config->config)
+ return (struct rmnet_phys_ep_config *)
+ _rmnet_phys_ep_config->config;
+ else
+ return 0;
+ } else {
return 0;
+ }
}
/**
struct rmnet_logical_ep_conf_s *_rmnet_get_logical_ep(struct net_device *dev,
int config_id)
{
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
struct rmnet_logical_ep_conf_s *epconfig_l;
if (rmnet_vnd_is_vnd(dev))
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
uint32_t ingress_data_format,
uint8_t tail_spacing)
{
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
ASSERT_RTNL();
LOGL("(%s,0x%08X);", dev->name, ingress_data_format);
uint16_t agg_size,
uint16_t agg_count)
{
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
ASSERT_RTNL();
LOGL("(%s,0x%08X, %d, %d);",
int rmnet_associate_network_device(struct net_device *dev)
{
struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *conf;
int rc;
+
ASSERT_RTNL();
LOGL("(%s);\n", dev->name);
}
config = kmalloc(sizeof(*config), GFP_ATOMIC);
+ conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
- if (!config)
+ if (!config || !conf)
return RMNET_CONFIG_NOMEM;
memset(config, 0, sizeof(struct rmnet_phys_ep_conf_s));
- config->dev = dev;
- spin_lock_init(&config->agg_lock);
+ memset(conf, 0, sizeof(struct rmnet_phys_ep_config));
+
+ config->config = conf;
+ conf->dev = dev;
+ spin_lock_init(&conf->agg_lock);
+ config->recycle = kfree_skb;
rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);
if (rc) {
LOGM("netdev_rx_handler_register returns %d", rc);
kfree(config);
+ kfree(conf);
return RMNET_CONFIG_DEVICE_IN_USE;
}
#include <linux/types.h>
#include <linux/time.h>
#include <linux/spinlock.h>
+#include <net/rmnet_config.h>
#ifndef _RMNET_DATA_CONFIG_H_
#define _RMNET_DATA_CONFIG_H_
* @agg_time: Wall clock time when aggregated frame was created
* @agg_last: Last time the aggregation routing was invoked
*/
-struct rmnet_phys_ep_conf_s {
+struct rmnet_phys_ep_config {
struct net_device *dev;
struct rmnet_logical_ep_conf_s local_ep;
struct rmnet_logical_ep_conf_s muxed_ep[RMNET_DATA_MAX_LOGICAL_EP];
int rmnet_create_vnd_prefix(int id, const char *name);
int rmnet_free_vnd(int id);
+struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
+ (struct net_device *dev);
+
#endif /* _RMNET_DATA_CONFIG_H_ */
#include <linux/netdev_features.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/rmnet_config.h>
#include "rmnet_data_private.h"
#include "rmnet_data_config.h"
#include "rmnet_data_vnd.h"
* - RX_HANDLER_PASS if packet should be passed up the stack by caller
*/
static rx_handler_result_t rmnet_ingress_deliver_packet(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config)
+ struct rmnet_phys_ep_config *config)
{
if (!config) {
LOGD("%s", "NULL physical EP provided");
* - result of __rmnet_deliver_skb() for all other cases
*/
static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config)
+ struct rmnet_phys_ep_config *config)
{
struct rmnet_logical_ep_conf_s *ep;
uint8_t mux_id;
* - result of _rmnet_map_ingress_handler() for all other cases
*/
static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config)
+ struct rmnet_phys_ep_config *config)
{
struct sk_buff *skbn;
int rc, co = 0;
* - 1 on failure
*/
static int rmnet_map_egress_handler(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config,
+ struct rmnet_phys_ep_config *config,
struct rmnet_logical_ep_conf_s *ep,
struct net_device *orig_dev)
{
*/
rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
{
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
struct net_device *dev;
int rc;
trace_rmnet_ingress_handler(skb);
rmnet_print_packet(skb, dev->name, 'r');
- config = (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(skb->dev->rx_handler_data);
+ config = _rmnet_get_phys_ep_config(skb->dev);
if (!config) {
LOGD("%s is not associated with rmnet_data", skb->dev->name);
void rmnet_egress_handler(struct sk_buff *skb,
struct rmnet_logical_ep_conf_s *ep)
{
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
struct net_device *orig_dev;
int rc;
orig_dev = skb->dev;
skb->dev = ep->egress_dev;
- config = (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(skb->dev->rx_handler_data);
+ config = _rmnet_get_phys_ep_config(skb->dev);
if (!config) {
LOGD("%s is not associated with rmnet_data", skb->dev->name);
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
+#include <net/rmnet_config.h>
#include "rmnet_data_private.h"
#include "rmnet_data_stats.h"
#include "rmnet_data_config.h"
skb_free[reason]++;
spin_unlock_irqrestore(&rmnet_skb_free_lock, flags);
- if (skb)
- kfree_skb(skb);
+ if (likely(skb)) {
+ struct rmnet_phys_ep_conf_s *config;
+
+ config = (struct rmnet_phys_ep_conf_s *)rcu_dereference
+ (skb->dev->rx_handler_data);
+ if (likely(config))
+ config->recycle(skb);
+ else
+ kfree_skb(skb);
+ }
}
void rmnet_stats_queue_xmit(int rc, unsigned int reason)
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <net/rmnet_config.h>
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
uint8_t rmnet_map_demultiplex(struct sk_buff *skb);
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config);
+ struct rmnet_phys_ep_config *config);
struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config);
+ struct rmnet_phys_ep_config *config);
void rmnet_map_aggregate(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config);
+ struct rmnet_phys_ep_config *config);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb);
int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
#include <linux/rmnet_data.h>
#include <linux/net_map.h>
#include <net/pkt_sched.h>
+#include <net/rmnet_config.h>
#include "rmnet_data_config.h"
#include "rmnet_map.h"
#include "rmnet_data_private.h"
* - RMNET_MAP_COMMAND_ACK on success
*/
static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config,
+ struct rmnet_phys_ep_config *config,
int enable)
{
struct rmnet_map_control_command_s *cmd;
*/
static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type,
- struct rmnet_phys_ep_conf_s *config)
+ struct rmnet_phys_ep_config *config)
{
struct rmnet_map_control_command_s *cmd;
int xmit_status;
* - RX_HANDLER_CONSUMED. Command frames are always consumed.
*/
rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config)
+ struct rmnet_phys_ep_config *config)
{
struct rmnet_map_control_command_s *cmd;
unsigned char command_name;
#include <net/ip.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/rmnet_config.h>
#include "rmnet_data_config.h"
#include "rmnet_map.h"
#include "rmnet_data_private.h"
struct agg_work {
struct delayed_work work;
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
};
#define RMNET_MAP_DEAGGR_SPACING 64
* - 0 (null) if no more aggregated packets
*/
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config)
+ struct rmnet_phys_ep_config *config)
{
struct sk_buff *skbn;
struct rmnet_map_header_s *maph;
static void rmnet_map_flush_packet_queue(struct work_struct *work)
{
struct agg_work *real_work;
- struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *config;
unsigned long flags;
struct sk_buff *skb;
int rc, agg_count = 0;
* the argument SKB and should not be further processed by any other function.
*/
void rmnet_map_aggregate(struct sk_buff *skb,
- struct rmnet_phys_ep_conf_s *config) {
+ struct rmnet_phys_ep_config *config) {
uint8_t *dest_buff;
struct agg_work *work;
unsigned long flags;