OSDN Git Service

RDMA/hfi1: don't pass bogus GFP_ flags to dma_alloc_coherent
authorChristoph Hellwig <hch@lst.de>
Wed, 9 Nov 2022 07:15:00 +0000 (08:15 +0100)
committerChristoph Hellwig <hch@lst.de>
Mon, 21 Nov 2022 08:35:52 +0000 (09:35 +0100)
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags
for allocation context control.  Don't pass GFP_USER which doesn't make
sense for a kernel DMA allocation or __GFP_COMP which makes no sense
for an allocation that can't in any way be converted to a page pointer.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Dean Luick <dean.luick@cornelisnetworks.com>
Tested-by: Dean Luick <dean.luick@cornelisnetworks.com>
drivers/infiniband/hw/hfi1/init.c

index 436372b..24c0f0d 100644 (file)
@@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
        unsigned amt;
 
        if (!rcd->rcvhdrq) {
-               gfp_t gfp_flags;
-
                amt = rcvhdrq_size(rcd);
 
-               if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
-                       gfp_flags = GFP_KERNEL;
-               else
-                       gfp_flags = GFP_USER;
                rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
                                                  &rcd->rcvhdrq_dma,
-                                                 gfp_flags | __GFP_COMP);
+                                                 GFP_KERNEL);
 
                if (!rcd->rcvhdrq) {
                        dd_dev_err(dd,
@@ -1785,7 +1779,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
                        rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
                                                                    PAGE_SIZE,
                                                                    &rcd->rcvhdrqtailaddr_dma,
-                                                                   gfp_flags);
+                                                                   GFP_KERNEL);
                        if (!rcd->rcvhdrtail_kvaddr)
                                goto bail_free;
                }
@@ -1821,20 +1815,11 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
 {
        struct hfi1_devdata *dd = rcd->dd;
        u32 max_entries, egrtop, alloced_bytes = 0;
-       gfp_t gfp_flags;
        u16 order, idx = 0;
        int ret = 0;
        u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
 
        /*
-        * GFP_USER, but without GFP_FS, so buffer cache can be
-        * coalesced (we hope); otherwise, even at order 4,
-        * heavy filesystem activity makes these fail, and we can
-        * use compound pages.
-        */
-       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
-       /*
         * The minimum size of the eager buffers is a groups of MTU-sized
         * buffers.
         * The global eager_buffer_size parameter is checked against the
@@ -1864,7 +1849,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
                        dma_alloc_coherent(&dd->pcidev->dev,
                                           rcd->egrbufs.rcvtid_size,
                                           &rcd->egrbufs.buffers[idx].dma,
-                                          gfp_flags);
+                                          GFP_KERNEL);
                if (rcd->egrbufs.buffers[idx].addr) {
                        rcd->egrbufs.buffers[idx].len =
                                rcd->egrbufs.rcvtid_size;