OSDN Git Service

nvme-auth: guarantee dhchap buffers under memory pressure
authorSagi Grimberg <sagi@grimberg.me>
Tue, 15 Nov 2022 16:08:06 +0000 (17:08 +0100)
committerChristoph Hellwig <hch@lst.de>
Wed, 16 Nov 2022 07:36:35 +0000 (08:36 +0100)
We want to guarantee that we have chap buffers when a controller
reconnects under memory pressure. Add a mempool specifically
for that.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/auth.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h

index 0812eb9..1b44676 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/nvme-auth.h>
 
 #define CHAP_BUF_SIZE 4096
+static struct kmem_cache *nvme_chap_buf_cache;
+static mempool_t *nvme_chap_buf_pool;
 
 struct nvme_dhchap_queue_context {
        struct list_head entry;
@@ -675,7 +677,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
        chap->transaction = 0;
        memset(chap->c1, 0, sizeof(chap->c1));
        memset(chap->c2, 0, sizeof(chap->c2));
-       kfree(chap->buf);
+       mempool_free(chap->buf, nvme_chap_buf_pool);
        chap->buf = NULL;
 }
 
@@ -701,7 +703,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
         * Allocate a large enough buffer for the entire negotiation:
         * 4k is enough to ffdhe8192.
         */
-       chap->buf = kmalloc(CHAP_BUF_SIZE, GFP_KERNEL);
+       chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
        if (!chap->buf) {
                chap->error = -ENOMEM;
                return;
@@ -1029,3 +1031,27 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
        }
 }
 EXPORT_SYMBOL_GPL(nvme_auth_free);
+
+int __init nvme_init_auth(void)
+{
+       nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
+                               CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!nvme_chap_buf_cache)
+               return -ENOMEM;
+
+       nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
+                       mempool_free_slab, nvme_chap_buf_cache);
+       if (!nvme_chap_buf_pool)
+               goto err_destroy_chap_buf_cache;
+
+       return 0;
+err_destroy_chap_buf_cache:
+       kmem_cache_destroy(nvme_chap_buf_cache);
+       return -ENOMEM;
+}
+
+void __exit nvme_exit_auth(void)
+{
+       mempool_destroy(nvme_chap_buf_pool);
+       kmem_cache_destroy(nvme_chap_buf_cache);
+}
index bad55fe..cb5e6d0 100644 (file)
@@ -5347,8 +5347,13 @@ static int __init nvme_core_init(void)
                goto unregister_generic_ns;
        }
 
+       result = nvme_init_auth();
+       if (result)
+               goto destroy_ns_chr;
        return 0;
 
+destroy_ns_chr:
+       class_destroy(nvme_ns_chr_class);
 unregister_generic_ns:
        unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
 destroy_subsys_class:
@@ -5369,6 +5374,7 @@ out:
 
 static void __exit nvme_core_exit(void)
 {
+       nvme_exit_auth();
        class_destroy(nvme_ns_chr_class);
        class_destroy(nvme_subsys_class);
        class_destroy(nvme_class);
index 47f96ab..ebd67e7 100644 (file)
@@ -1018,6 +1018,8 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
 }
 
 #ifdef CONFIG_NVME_AUTH
+int __init nvme_init_auth(void);
+void __exit nvme_exit_auth(void);
 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
 void nvme_auth_stop(struct nvme_ctrl *ctrl);
 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
@@ -1029,6 +1031,13 @@ static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
 {
        return 0;
 }
+static inline int __init nvme_init_auth(void)
+{
+       return 0;
+}
+static inline void __exit nvme_exit_auth(void)
+{
+}
 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
 {