OSDN Git Service

nvme-tcp: have queue prod/cons send list become a llist
authorSagi Grimberg <sagi@grimberg.me>
Fri, 19 Jun 2020 00:30:22 +0000 (17:30 -0700)
committerChristoph Hellwig <hch@lst.de>
Wed, 8 Jul 2020 14:16:18 +0000 (16:16 +0200)
The queue processing will splice to a queue local list, this should
alleviate some contention on the send_list lock, but also prepares
us to the next patch where we look on these lists for network stack
flag optimization.

Remove queue lock as its not used anymore.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Tested-by: Mark Wunderlich <mark.wunderlich@intel.com>
[hch: simplified a loop]
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/tcp.c

index 7006aca..4788685 100644 (file)
@@ -46,6 +46,7 @@ struct nvme_tcp_request {
        u32                     pdu_sent;
        u16                     ttag;
        struct list_head        entry;
+       struct llist_node       lentry;
        __le32                  ddgst;
 
        struct bio              *curr_bio;
@@ -75,8 +76,8 @@ struct nvme_tcp_queue {
        struct work_struct      io_work;
        int                     io_cpu;
 
-       spinlock_t              lock;
        struct mutex            send_mutex;
+       struct llist_head       req_list;
        struct list_head        send_list;
 
        /* recv state */
@@ -266,10 +267,8 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
        struct nvme_tcp_queue *queue = req->queue;
        bool empty;
 
-       spin_lock(&queue->lock);
-       empty = list_empty(&queue->send_list) && !queue->request;
-       list_add_tail(&req->entry, &queue->send_list);
-       spin_unlock(&queue->lock);
+       empty = llist_add(&req->lentry, &queue->req_list) &&
+               list_empty(&queue->send_list) && !queue->request;
 
        /*
         * if we're the first on the send_list and we can try to send
@@ -285,18 +284,33 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
        }
 }
 
+static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
+{
+       struct nvme_tcp_request *req;
+       struct llist_node *node;
+
+       for (node = llist_del_all(&queue->req_list); node; node = node->next) {
+               req = llist_entry(node, struct nvme_tcp_request, lentry);
+               list_add(&req->entry, &queue->send_list);
+       }
+}
+
 static inline struct nvme_tcp_request *
 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
 {
        struct nvme_tcp_request *req;
 
-       spin_lock(&queue->lock);
        req = list_first_entry_or_null(&queue->send_list,
                        struct nvme_tcp_request, entry);
-       if (req)
-               list_del(&req->entry);
-       spin_unlock(&queue->lock);
+       if (!req) {
+               nvme_tcp_process_req_list(queue);
+               req = list_first_entry_or_null(&queue->send_list,
+                               struct nvme_tcp_request, entry);
+               if (unlikely(!req))
+                       return NULL;
+       }
 
+       list_del(&req->entry);
        return req;
 }
 
@@ -1344,8 +1358,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        int ret, rcv_pdu_size;
 
        queue->ctrl = ctrl;
+       init_llist_head(&queue->req_list);
        INIT_LIST_HEAD(&queue->send_list);
-       spin_lock_init(&queue->lock);
        mutex_init(&queue->send_mutex);
        INIT_WORK(&queue->io_work, nvme_tcp_io_work);
        queue->queue_size = queue_size;