``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
+ ``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
full memory pages of data, for example because MTU is high enough or through
HW-GRO.
-``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
-path to send packets. In ordinary path, driver fills descriptors in DRAM and
+``ETHTOOL_A_RINGS_[RX|TX]_PUSH`` flag is used to enable descriptor fast
+path to send or receive packets. In ordinary path, driver fills descriptors in DRAM and
notifies NIC hardware. In fast path, driver pushes descriptors to the device
through MMIO writes, thus reducing the latency. However, enabling this feature
may increase the CPU cost. Drivers may enforce additional per-packet
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
+ ``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
* @rx_buf_len: Current length of buffers on the rx ring.
* @tcp_data_split: Scatter packet headers and data to separate buffers
* @tx_push: The flag of tx push mode
+ * @rx_push: The flag of rx push mode
* @cqe_size: Size of TX/RX completion queue event
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
u8 tcp_data_split;
u8 tx_push;
+ u8 rx_push;
u32 cqe_size;
};
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+ ETHTOOL_RING_USE_RX_PUSH = BIT(3),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
+ ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
-extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1];
+extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_PUSH + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
- nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
+ nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */
+ nla_total_size(sizeof(u8))); /* _RINGS_RX_PUSH */
}
static int rings_fill_reply(struct sk_buff *skb,
kr->tcp_data_split))) ||
(kr->cqe_size &&
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
- nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
+ nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) ||
+ nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push))
return -EMSGSIZE;
return 0;
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
+ [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int
return -EOPNOTSUPP;
}
+ if (tb[ETHTOOL_A_RINGS_RX_PUSH] &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_RX_PUSH],
+ "setting rx push not supported");
+ return -EOPNOTSUPP;
+ }
+
return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP;
}
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
ethnl_update_u8(&kernel_ringparam.tx_push,
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
+ ethnl_update_u8(&kernel_ringparam.rx_push,
+ tb[ETHTOOL_A_RINGS_RX_PUSH], &mod);
if (!mod)
return 0;