OSDN Git Service

RDMA-rxe: Isolate mr code from atomic_reply()
authorBob Pearson <rpearsonhpe@gmail.com>
Thu, 19 Jan 2023 23:59:34 +0000 (17:59 -0600)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 26 Jan 2023 19:04:45 +0000 (15:04 -0400)
Isolate mr specific code from atomic_reply() in rxe_resp.c into
a subroutine rxe_mr_do_atomic_op() in rxe_mr.c.
Minor cleanups to rxe_check_range() and iova_to_vaddr().
Move enum resp_state to rxe.h

Link: https://lore.kernel.org/r/20230119235936.19728-4-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/rxe/rxe.h
drivers/infiniband/sw/rxe/rxe_loc.h
drivers/infiniband/sw/rxe/rxe_mr.c
drivers/infiniband/sw/rxe/rxe_resp.c

index ab33490..2415f37 100644 (file)
 #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device,          \
                "mw#%d %s:  " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
 
+/* responder states */
+enum resp_states {
+       RESPST_NONE,
+       RESPST_GET_REQ,
+       RESPST_CHK_PSN,
+       RESPST_CHK_OP_SEQ,
+       RESPST_CHK_OP_VALID,
+       RESPST_CHK_RESOURCE,
+       RESPST_CHK_LENGTH,
+       RESPST_CHK_RKEY,
+       RESPST_EXECUTE,
+       RESPST_READ_REPLY,
+       RESPST_ATOMIC_REPLY,
+       RESPST_ATOMIC_WRITE_REPLY,
+       RESPST_PROCESS_FLUSH,
+       RESPST_COMPLETE,
+       RESPST_ACKNOWLEDGE,
+       RESPST_CLEANUP,
+       RESPST_DUPLICATE_REQUEST,
+       RESPST_ERR_MALFORMED_WQE,
+       RESPST_ERR_UNSUPPORTED_OPCODE,
+       RESPST_ERR_MISALIGNED_ATOMIC,
+       RESPST_ERR_PSN_OUT_OF_SEQ,
+       RESPST_ERR_MISSING_OPCODE_FIRST,
+       RESPST_ERR_MISSING_OPCODE_LAST_C,
+       RESPST_ERR_MISSING_OPCODE_LAST_D1E,
+       RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
+       RESPST_ERR_RNR,
+       RESPST_ERR_RKEY_VIOLATION,
+       RESPST_ERR_INVALIDATE_RKEY,
+       RESPST_ERR_LENGTH,
+       RESPST_ERR_CQ_OVERFLOW,
+       RESPST_ERROR,
+       RESPST_RESET,
+       RESPST_DONE,
+       RESPST_EXIT,
+};
+
 void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
 
 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
index 29b6c21..bcb1bbc 100644 (file)
@@ -72,6 +72,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
 int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
                  int sg_nents, unsigned int *sg_offset);
 void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
+int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+                       u64 compare, u64 swap_add, u64 *orig_val);
 struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
                         enum rxe_mr_lookup_type type);
 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
index 229c725..df97414 100644 (file)
@@ -32,13 +32,15 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
 
        case IB_MR_TYPE_USER:
        case IB_MR_TYPE_MEM_REG:
-               if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
-                   iova > mr->ibmr.iova + mr->ibmr.length - length)
-                       return -EFAULT;
+               if (iova < mr->ibmr.iova ||
+                   iova + length > mr->ibmr.iova + mr->ibmr.length) {
+                       rxe_dbg_mr(mr, "iova/length out of range");
+                       return -EINVAL;
+               }
                return 0;
 
        default:
-               rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type);
+               rxe_dbg_mr(mr, "mr type not supported\n");
                return -EINVAL;
        }
 }
@@ -299,37 +301,22 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
 {
        size_t offset;
        int m, n;
-       void *addr;
 
-       if (mr->state != RXE_MR_STATE_VALID) {
-               rxe_dbg_mr(mr, "Not in valid state\n");
-               addr = NULL;
-               goto out;
-       }
+       if (mr->state != RXE_MR_STATE_VALID)
+               return NULL;
 
-       if (!mr->map) {
-               addr = (void *)(uintptr_t)iova;
-               goto out;
-       }
+       if (mr->ibmr.type == IB_MR_TYPE_DMA)
+               return (void *)(uintptr_t)iova;
 
-       if (mr_check_range(mr, iova, length)) {
-               rxe_dbg_mr(mr, "Range violation\n");
-               addr = NULL;
-               goto out;
-       }
+       if (mr_check_range(mr, iova, length))
+               return NULL;
 
        lookup_iova(mr, iova, &m, &n, &offset);
 
-       if (offset + length > mr->map[m]->buf[n].size) {
-               rxe_dbg_mr(mr, "Crosses page boundary\n");
-               addr = NULL;
-               goto out;
-       }
-
-       addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
+       if (offset + length > mr->map[m]->buf[n].size)
+               return NULL;
 
-out:
-       return addr;
+       return (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
 }
 
 int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length)
@@ -538,6 +525,46 @@ err1:
        return err;
 }
 
+/* Guarantee atomicity of atomic operations at the machine level. */
+static DEFINE_SPINLOCK(atomic_ops_lock);
+
+int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+                       u64 compare, u64 swap_add, u64 *orig_val)
+{
+       u64 *va;
+       u64 value;
+
+       if (mr->state != RXE_MR_STATE_VALID) {
+               rxe_dbg_mr(mr, "mr not in valid state");
+               return RESPST_ERR_RKEY_VIOLATION;
+       }
+
+       va = iova_to_vaddr(mr, iova, sizeof(u64));
+       if (!va) {
+               rxe_dbg_mr(mr, "iova out of range");
+               return RESPST_ERR_RKEY_VIOLATION;
+       }
+
+       if ((uintptr_t)va & 0x7) {
+               rxe_dbg_mr(mr, "iova not aligned");
+               return RESPST_ERR_MISALIGNED_ATOMIC;
+       }
+
+       spin_lock_bh(&atomic_ops_lock);
+       value = *orig_val = *va;
+
+       if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+               if (value == compare)
+                       *va = swap_add;
+       } else {
+               value += swap_add;
+               *va = value;
+       }
+       spin_unlock_bh(&atomic_ops_lock);
+
+       return 0;
+}
+
 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
 {
        struct rxe_sge          *sge    = &dma->sge[dma->cur_sge];
index c749722..9d4b4e9 100644 (file)
 #include "rxe_loc.h"
 #include "rxe_queue.h"
 
-enum resp_states {
-       RESPST_NONE,
-       RESPST_GET_REQ,
-       RESPST_CHK_PSN,
-       RESPST_CHK_OP_SEQ,
-       RESPST_CHK_OP_VALID,
-       RESPST_CHK_RESOURCE,
-       RESPST_CHK_LENGTH,
-       RESPST_CHK_RKEY,
-       RESPST_EXECUTE,
-       RESPST_READ_REPLY,
-       RESPST_ATOMIC_REPLY,
-       RESPST_ATOMIC_WRITE_REPLY,
-       RESPST_PROCESS_FLUSH,
-       RESPST_COMPLETE,
-       RESPST_ACKNOWLEDGE,
-       RESPST_CLEANUP,
-       RESPST_DUPLICATE_REQUEST,
-       RESPST_ERR_MALFORMED_WQE,
-       RESPST_ERR_UNSUPPORTED_OPCODE,
-       RESPST_ERR_MISALIGNED_ATOMIC,
-       RESPST_ERR_PSN_OUT_OF_SEQ,
-       RESPST_ERR_MISSING_OPCODE_FIRST,
-       RESPST_ERR_MISSING_OPCODE_LAST_C,
-       RESPST_ERR_MISSING_OPCODE_LAST_D1E,
-       RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
-       RESPST_ERR_RNR,
-       RESPST_ERR_RKEY_VIOLATION,
-       RESPST_ERR_INVALIDATE_RKEY,
-       RESPST_ERR_LENGTH,
-       RESPST_ERR_CQ_OVERFLOW,
-       RESPST_ERROR,
-       RESPST_RESET,
-       RESPST_DONE,
-       RESPST_EXIT,
-};
-
 static char *resp_state_name[] = {
        [RESPST_NONE]                           = "NONE",
        [RESPST_GET_REQ]                        = "GET_REQ",
@@ -725,17 +688,12 @@ static enum resp_states process_flush(struct rxe_qp *qp,
        return RESPST_ACKNOWLEDGE;
 }
 
-/* Guarantee atomicity of atomic operations at the machine level. */
-static DEFINE_SPINLOCK(atomic_ops_lock);
-
 static enum resp_states atomic_reply(struct rxe_qp *qp,
-                                        struct rxe_pkt_info *pkt)
+                                    struct rxe_pkt_info *pkt)
 {
-       u64 *vaddr;
-       enum resp_states ret;
        struct rxe_mr *mr = qp->resp.mr;
        struct resp_res *res = qp->resp.res;
-       u64 value;
+       int err;
 
        if (!res) {
                res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
@@ -743,32 +701,14 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
        }
 
        if (!res->replay) {
-               if (mr->state != RXE_MR_STATE_VALID) {
-                       ret = RESPST_ERR_RKEY_VIOLATION;
-                       goto out;
-               }
-
-               vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
-                                       sizeof(u64));
-
-               /* check vaddr is 8 bytes aligned. */
-               if (!vaddr || (uintptr_t)vaddr & 7) {
-                       ret = RESPST_ERR_MISALIGNED_ATOMIC;
-                       goto out;
-               }
-
-               spin_lock_bh(&atomic_ops_lock);
-               res->atomic.orig_val = value = *vaddr;
-
-               if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
-                       if (value == atmeth_comp(pkt))
-                               value = atmeth_swap_add(pkt);
-               } else {
-                       value += atmeth_swap_add(pkt);
-               }
+               u64 iova = qp->resp.va + qp->resp.offset;
 
-               *vaddr = value;
-               spin_unlock_bh(&atomic_ops_lock);
+               err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
+                                         atmeth_comp(pkt),
+                                         atmeth_swap_add(pkt),
+                                         &res->atomic.orig_val);
+               if (err)
+                       return err;
 
                qp->resp.msn++;
 
@@ -780,9 +720,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
                qp->resp.status = IB_WC_SUCCESS;
        }
 
-       ret = RESPST_ACKNOWLEDGE;
-out:
-       return ret;
+       return RESPST_ACKNOWLEDGE;
 }
 
 #ifdef CONFIG_64BIT