OSDN Git Service

net: hns3: change hclge/hclgevf workqueue to WQ_UNBOUND mode
authorYufeng Mo <moyufeng@huawei.com>
Wed, 27 Oct 2021 12:11:44 +0000 (20:11 +0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 27 Oct 2021 13:47:33 +0000 (14:47 +0100)
Currently, the workqueue of hclge/hclgevf is executed on
the CPU that initiates scheduling requests by default. In
stress scenarios, the CPU may be busy and workqueue scheduling
is completed after a long period of time. To avoid this
situation and implement proper scheduling, use the WQ_UNBOUND
mode instead. In this way, the workqueue can be performed on
a relatively idle CPU.

Signed-off-by: Yufeng Mo <moyufeng@huawei.com>
Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c

index c6b9806..3dbde04 100644 (file)
@@ -2847,33 +2847,28 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
 {
        if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
            !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
-               mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
-                                   hclge_wq, &hdev->service_task, 0);
+               mod_delayed_work(hclge_wq, &hdev->service_task, 0);
 }
 
 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
 {
        if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
            !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
-               mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
-                                   hclge_wq, &hdev->service_task, 0);
+               mod_delayed_work(hclge_wq, &hdev->service_task, 0);
 }
 
 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
 {
        if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
            !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
-               mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
-                                   hclge_wq, &hdev->service_task, 0);
+               mod_delayed_work(hclge_wq, &hdev->service_task, 0);
 }
 
 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
 {
        if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
            !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
-               mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
-                                   hclge_wq, &hdev->service_task,
-                                   delay_time);
+               mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
 }
 
 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
@@ -3491,33 +3486,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
        hdev->num_msi_used += 1;
 }
 
-static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
-                                     const cpumask_t *mask)
-{
-       struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
-                                             affinity_notify);
-
-       cpumask_copy(&hdev->affinity_mask, mask);
-}
-
-static void hclge_irq_affinity_release(struct kref *ref)
-{
-}
-
 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
 {
        irq_set_affinity_hint(hdev->misc_vector.vector_irq,
                              &hdev->affinity_mask);
-
-       hdev->affinity_notify.notify = hclge_irq_affinity_notify;
-       hdev->affinity_notify.release = hclge_irq_affinity_release;
-       irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
-                                 &hdev->affinity_notify);
 }
 
 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
 {
-       irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
        irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
 }
 
@@ -13082,7 +13058,7 @@ static int hclge_init(void)
 {
        pr_info("%s is initializing\n", HCLGE_NAME);
 
-       hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
+       hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
        if (!hclge_wq) {
                pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
                return -ENOMEM;
index de6afbc..69cd8f8 100644 (file)
@@ -944,7 +944,6 @@ struct hclge_dev {
 
        /* affinity mask and notify for misc interrupt */
        cpumask_t affinity_mask;
-       struct irq_affinity_notify affinity_notify;
        struct hclge_ptp *ptp;
        struct devlink *devlink;
 };
index bef6b98..5efa542 100644 (file)
@@ -3899,7 +3899,7 @@ static int hclgevf_init(void)
 {
        pr_info("%s is initializing\n", HCLGEVF_NAME);
 
-       hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
+       hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
        if (!hclgevf_wq) {
                pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
                return -ENOMEM;