OSDN Git Service

net/ibmvnic: prevent more than one thread from running in reset
authorJuliet Kim <julietk@linux.vnet.ibm.com>
Fri, 20 Sep 2019 20:11:23 +0000 (16:11 -0400)
committerDavid S. Miller <davem@davemloft.net>
Wed, 25 Sep 2019 11:41:41 +0000 (13:41 +0200)
The current code allows more than one thread to run in reset. This can
corrupt struct adapter data. Check adapter->resetting before performing
a reset, if there is another reset running delay (100 msec) before trying
again.

Signed-off-by: Juliet Kim <julietk@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h

index d7db5cc..2b073a3 100644 (file)
@@ -1207,7 +1207,7 @@ static void ibmvnic_cleanup(struct net_device *netdev)
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
        /* ensure that transmissions are stopped if called by do_reset */
-       if (adapter->resetting)
+       if (test_bit(0, &adapter->resetting))
                netif_tx_disable(netdev);
        else
                netif_tx_stop_all_queues(netdev);
@@ -1428,7 +1428,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        u8 proto = 0;
        netdev_tx_t ret = NETDEV_TX_OK;
 
-       if (adapter->resetting) {
+       if (test_bit(0, &adapter->resetting)) {
                if (!netif_subqueue_stopped(netdev, skb))
                        netif_stop_subqueue(netdev, queue_num);
                dev_kfree_skb_any(skb);
@@ -2054,6 +2054,12 @@ static void __ibmvnic_reset(struct work_struct *work)
 
        adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
 
+       if (test_and_set_bit_lock(0, &adapter->resetting)) {
+               schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
+                                     IBMVNIC_RESET_DELAY);
+               return;
+       }
+
        reset_state = adapter->state;
 
        rwi = get_next_rwi(adapter);
@@ -2095,6 +2101,10 @@ static void __ibmvnic_reset(struct work_struct *work)
                        break;
 
                rwi = get_next_rwi(adapter);
+
+               if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
+                           rwi->reset_reason == VNIC_RESET_MOBILITY))
+                       adapter->force_reset_recovery = true;
        }
 
        if (adapter->wait_for_reset) {
@@ -2107,7 +2117,16 @@ static void __ibmvnic_reset(struct work_struct *work)
                free_all_rwi(adapter);
        }
 
-       adapter->resetting = false;
+       clear_bit_unlock(0, &adapter->resetting);
+}
+
+static void __ibmvnic_delayed_reset(struct work_struct *work)
+{
+       struct ibmvnic_adapter *adapter;
+
+       adapter = container_of(work, struct ibmvnic_adapter,
+                              ibmvnic_delayed_reset.work);
+       __ibmvnic_reset(&adapter->ibmvnic_reset);
 }
 
 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@@ -2162,7 +2181,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
        rwi->reset_reason = reason;
        list_add_tail(&rwi->list, &adapter->rwi_list);
        spin_unlock_irqrestore(&adapter->rwi_lock, flags);
-       adapter->resetting = true;
        netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
        schedule_work(&adapter->ibmvnic_reset);
 
@@ -2207,7 +2225,7 @@ restart_poll:
                u16 offset;
                u8 flags = 0;
 
-               if (unlikely(adapter->resetting &&
+               if (unlikely(test_bit(0, &adapter->resetting) &&
                             adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
                        enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
                        napi_complete_done(napi, frames_processed);
@@ -2858,7 +2876,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
                return 1;
        }
 
-       if (adapter->resetting &&
+       if (test_bit(0, &adapter->resetting) &&
            adapter->reset_reason == VNIC_RESET_MOBILITY) {
                u64 val = (0xff000000) | scrq->hw_irq;
 
@@ -3408,7 +3426,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
        if (rc) {
                if (rc == H_CLOSED) {
                        dev_warn(dev, "CRQ Queue closed\n");
-                       if (adapter->resetting)
+                       if (test_bit(0, &adapter->resetting))
                                ibmvnic_reset(adapter, VNIC_RESET_FATAL);
                }
 
@@ -4484,7 +4502,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
        case IBMVNIC_CRQ_XPORT_EVENT:
                netif_carrier_off(netdev);
                adapter->crq.active = false;
-               if (adapter->resetting)
+               if (test_bit(0, &adapter->resetting))
                        adapter->force_reset_recovery = true;
                if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
                        dev_info(dev, "Migrated, re-enabling adapter\n");
@@ -4822,7 +4840,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
                return -1;
        }
 
-       if (adapter->resetting && !adapter->wait_for_reset &&
+       if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
            adapter->reset_reason != VNIC_RESET_MOBILITY) {
                if (adapter->req_rx_queues != old_num_rx_queues ||
                    adapter->req_tx_queues != old_num_tx_queues) {
@@ -4934,10 +4952,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        spin_lock_init(&adapter->stats_lock);
 
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
+       INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
+                         __ibmvnic_delayed_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
        init_completion(&adapter->init_done);
-       adapter->resetting = false;
+       clear_bit(0, &adapter->resetting);
 
        do {
                rc = init_crq_queue(adapter);
index 9d3d35c..ebc3924 100644 (file)
@@ -39,6 +39,8 @@
 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
 #define IBMVNIC_BUFFER_HLEN 500
 
+#define IBMVNIC_RESET_DELAY 100
+
 static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
 #define IBMVNIC_USE_SERVER_MAXES 0x1
        "use-server-maxes"
@@ -1077,7 +1079,8 @@ struct ibmvnic_adapter {
        spinlock_t rwi_lock;
        struct list_head rwi_list;
        struct work_struct ibmvnic_reset;
-       bool resetting;
+       struct delayed_work ibmvnic_delayed_reset;
+       unsigned long resetting;
        bool napi_enabled, from_passive_init;
 
        bool failover_pending;