OSDN Git Service

net: mhi-net: Add re-aggregation of fragmented packets
authorLoic Poulain <loic.poulain@linaro.org>
Thu, 4 Feb 2021 08:40:00 +0000 (09:40 +0100)
committerJakub Kicinski <kuba@kernel.org>
Sat, 6 Feb 2021 19:28:45 +0000 (11:28 -0800)
When device side MTU is larger than host side MTU, the packets
(typically rmnet packets) are split over multiple MHI transfers.
In that case, fragments must be re-aggregated to recover the packet
before forwarding to upper layer.

A fragmented packet result in -EOVERFLOW MHI transaction status for
each of its fragments, except the final one. Such transfer was
previously considered as error and fragments were simply dropped.

This change adds re-aggregation mechanism using skb chaining, via
skb frag_list.

A warning (once) is printed since this behavior usually comes from
a misconfiguration of the device (e.g. modem MTU).

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Link: https://lore.kernel.org/r/1612428002-12333-1-git-send-email-loic.poulain@linaro.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/mhi_net.c

index 4f51253..8800991 100644 (file)
@@ -32,6 +32,8 @@ struct mhi_net_stats {
 struct mhi_net_dev {
        struct mhi_device *mdev;
        struct net_device *ndev;
+       struct sk_buff *skbagg_head;
+       struct sk_buff *skbagg_tail;
        struct delayed_work rx_refill;
        struct mhi_net_stats stats;
        u32 rx_queue_sz;
@@ -132,6 +134,32 @@ static void mhi_net_setup(struct net_device *ndev)
        ndev->tx_queue_len = 1000;
 }
 
+static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
+                                      struct sk_buff *skb)
+{
+       struct sk_buff *head = mhi_netdev->skbagg_head;
+       struct sk_buff *tail = mhi_netdev->skbagg_tail;
+
+       /* This is non-paged skb chaining using frag_list */
+       if (!head) {
+               mhi_netdev->skbagg_head = skb;
+               return skb;
+       }
+
+       if (!skb_shinfo(head)->frag_list)
+               skb_shinfo(head)->frag_list = skb;
+       else
+               tail->next = skb;
+
+       head->len += skb->len;
+       head->data_len += skb->len;
+       head->truesize += skb->truesize;
+
+       mhi_netdev->skbagg_tail = skb;
+
+       return mhi_netdev->skbagg_head;
+}
+
 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
                                struct mhi_result *mhi_res)
 {
@@ -142,19 +170,42 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
        free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
 
        if (unlikely(mhi_res->transaction_status)) {
-               dev_kfree_skb_any(skb);
-
-               /* MHI layer stopping/resetting the DL channel */
-               if (mhi_res->transaction_status == -ENOTCONN)
+               switch (mhi_res->transaction_status) {
+               case -EOVERFLOW:
+                       /* Packet can not fit in one MHI buffer and has been
+                        * split over multiple MHI transfers, do re-aggregation.
+                        * That usually means the device side MTU is larger than
+                        * the host side MTU/MRU. Since this is not optimal,
+                        * print a warning (once).
+                        */
+                       netdev_warn_once(mhi_netdev->ndev,
+                                        "Fragmented packets received, fix MTU?\n");
+                       skb_put(skb, mhi_res->bytes_xferd);
+                       mhi_net_skb_agg(mhi_netdev, skb);
+                       break;
+               case -ENOTCONN:
+                       /* MHI layer stopping/resetting the DL channel */
+                       dev_kfree_skb_any(skb);
                        return;
-
-               u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
-               u64_stats_inc(&mhi_netdev->stats.rx_errors);
-               u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+               default:
+                       /* Unknown error, simply drop */
+                       dev_kfree_skb_any(skb);
+                       u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+                       u64_stats_inc(&mhi_netdev->stats.rx_errors);
+                       u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+               }
        } else {
+               skb_put(skb, mhi_res->bytes_xferd);
+
+               if (mhi_netdev->skbagg_head) {
+                       /* Aggregate the final fragment */
+                       skb = mhi_net_skb_agg(mhi_netdev, skb);
+                       mhi_netdev->skbagg_head = NULL;
+               }
+
                u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
                u64_stats_inc(&mhi_netdev->stats.rx_packets);
-               u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+               u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
                u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
 
                switch (skb->data[0] & 0xf0) {
@@ -169,7 +220,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
                        break;
                }
 
-               skb_put(skb, mhi_res->bytes_xferd);
                netif_rx(skb);
        }
 
@@ -267,6 +317,7 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
        dev_set_drvdata(dev, mhi_netdev);
        mhi_netdev->ndev = ndev;
        mhi_netdev->mdev = mhi_dev;
+       mhi_netdev->skbagg_head = NULL;
        SET_NETDEV_DEV(ndev, &mhi_dev->dev);
        SET_NETDEV_DEVTYPE(ndev, &wwan_type);
 
@@ -301,6 +352,9 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
 
        mhi_unprepare_from_transfer(mhi_netdev->mdev);
 
+       if (mhi_netdev->skbagg_head)
+               kfree_skb(mhi_netdev->skbagg_head);
+
        free_netdev(mhi_netdev->ndev);
 }