OSDN Git Service

svcrdma: Clean up the tracing for rw_ctx_init errors
authorChuck Lever <chuck.lever@oracle.com>
Fri, 20 Mar 2020 18:02:49 +0000 (14:02 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 18 May 2020 14:21:21 +0000 (10:21 -0400)
- De-duplicate code
- Rename the tracepoint with "_err" to allow enabling via glob
- Report the sg_cnt for the failing rw_ctx
- Fix a dumb signage issue

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/trace/events/rpcrdma.h
net/sunrpc/xprtrdma/svc_rdma_rw.c

index 132c3c7..f231975 100644 (file)
@@ -1583,28 +1583,32 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
 DEFINE_SVC_DMA_EVENT(dma_map_page);
 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
 
-TRACE_EVENT(svcrdma_dma_map_rwctx,
+TRACE_EVENT(svcrdma_dma_map_rw_err,
        TP_PROTO(
                const struct svcxprt_rdma *rdma,
+               unsigned int nents,
                int status
        ),
 
-       TP_ARGS(rdma, status),
+       TP_ARGS(rdma, nents, status),
 
        TP_STRUCT__entry(
                __field(int, status)
+               __field(unsigned int, nents)
                __string(device, rdma->sc_cm_id->device->name)
                __string(addr, rdma->sc_xprt.xpt_remotebuf)
        ),
 
        TP_fast_assign(
                __entry->status = status;
+               __entry->nents = nents;
                __assign_str(device, rdma->sc_cm_id->device->name);
                __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
        ),
 
-       TP_printk("addr=%s device=%s status=%d",
-               __get_str(addr), __get_str(device), __entry->status
+       TP_printk("addr=%s device=%s nents=%u status=%d",
+               __get_str(addr), __get_str(device), __entry->nents,
+               __entry->status
        )
 );
 
index 23c2d3c..db70709 100644 (file)
@@ -39,7 +39,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
 struct svc_rdma_rw_ctxt {
        struct list_head        rw_list;
        struct rdma_rw_ctx      rw_ctx;
-       int                     rw_nents;
+       unsigned int            rw_nents;
        struct sg_table         rw_sg_table;
        struct scatterlist      rw_first_sgl[];
 };
@@ -107,6 +107,34 @@ void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
        }
 }
 
+/**
+ * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
+ * @rdma: controlling transport instance
+ * @ctxt: R/W context to prepare
+ * @offset: RDMA offset
+ * @handle: RDMA tag/handle
+ * @direction: I/O direction
+ *
+ * Returns on success, the number of WQEs that will be needed
+ * on the workqueue, or a negative errno.
+ */
+static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
+                               struct svc_rdma_rw_ctxt *ctxt,
+                               u64 offset, u32 handle,
+                               enum dma_data_direction direction)
+{
+       int ret;
+
+       ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
+                              ctxt->rw_sg_table.sgl, ctxt->rw_nents,
+                              0, offset, handle, direction);
+       if (unlikely(ret < 0)) {
+               svc_rdma_put_rw_ctxt(rdma, ctxt);
+               trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
+       }
+       return ret;
+}
+
 /* A chunk context tracks all I/O for moving one Read or Write
  * chunk. This is a a set of rdma_rw's that handle data movement
  * for all segments of one chunk.
@@ -431,12 +459,10 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
                        goto out_noctx;
 
                constructor(info, write_len, ctxt);
-               ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
-                                      rdma->sc_port_num, ctxt->rw_sg_table.sgl,
-                                      ctxt->rw_nents, 0, seg_offset,
-                                      seg_handle, DMA_TO_DEVICE);
+               ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle,
+                                          DMA_TO_DEVICE);
                if (ret < 0)
-                       goto out_initerr;
+                       return -EIO;
 
                trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
 
@@ -462,11 +488,6 @@ out_overflow:
 out_noctx:
        dprintk("svcrdma: no R/W ctxs available\n");
        return -ENOMEM;
-
-out_initerr:
-       svc_rdma_put_rw_ctxt(rdma, ctxt);
-       trace_svcrdma_dma_map_rwctx(rdma, ret);
-       return -EIO;
 }
 
 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
@@ -646,12 +667,10 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
                        goto out_overrun;
        }
 
-       ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
-                              cc->cc_rdma->sc_port_num,
-                              ctxt->rw_sg_table.sgl, ctxt->rw_nents,
-                              0, offset, rkey, DMA_FROM_DEVICE);
+       ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
+                                  DMA_FROM_DEVICE);
        if (ret < 0)
-               goto out_initerr;
+               return -EIO;
 
        list_add(&ctxt->rw_list, &cc->cc_rwctxts);
        cc->cc_sqecount += ret;
@@ -664,11 +683,6 @@ out_noctx:
 out_overrun:
        dprintk("svcrdma: request overruns rq_pages\n");
        return -EINVAL;
-
-out_initerr:
-       trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
-       svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
-       return -EIO;
 }
 
 /* Walk the segments in the Read chunk starting at @p and construct