From 45db81e1590c82ddc735ccd33f8adab02528b3e3 Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Tue, 18 Oct 2011 01:39:55 +0000 Subject: [PATCH] stmmac: limit max_mtu in case of 4KiB and use __netdev_alloc_skb (V2) Problem using big mtu around 4096 bytes is you end allocating (4096 +NET_SKB_PAD + NET_IP_ALIGN + sizeof(struct skb_shared_info) bytes -> 8192 bytes : order-1 pages It's better to limit the mtu to SKB_MAX_HEAD(NET_SKB_PAD), to have no more than one page per skb. Also the patch changes the netdev_alloc_skb_ip_align() done in init_dma_desc_rings() and uses a variant allowing GFP_KERNEL allocations allowing the driver to load even in case of memory pressure. Reported-by: Eric Dumazet Signed-off-by: Giuseppe Cavallaro Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5eccd996cde0..aeaa15b451de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -474,11 +474,13 @@ static void init_dma_desc_rings(struct net_device *dev) for (i = 0; i < rxsize; i++) { struct dma_desc *p = priv->dma_rx + i; - skb = netdev_alloc_skb_ip_align(dev, bfsize); + skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN, + GFP_KERNEL); if (unlikely(skb == NULL)) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); break; } + skb_reserve(skb, NET_IP_ALIGN); priv->rx_skbuff[i] = skb; priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); @@ -1401,7 +1403,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) if (priv->plat->enh_desc) max_mtu = JUMBO_LEN; else - max_mtu = BUF_SIZE_4KiB; + max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); if ((new_mtu < 46) || (new_mtu > max_mtu)) { pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); -- 2.11.0