OSDN Git Service

Merge branch 'for-4.2' of git://linux-nfs.org/~bfields/linux
[uclinux-h8/linux.git] / net / sunrpc / xprtrdma / svc_rdma_transport.c
index f609c1c..6b36279 100644 (file)
@@ -91,7 +91,7 @@ struct svc_xprt_class svc_rdma_class = {
        .xcl_name = "rdma",
        .xcl_owner = THIS_MODULE,
        .xcl_ops = &svc_rdma_ops,
-       .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
+       .xcl_max_payload = RPCRDMA_MAXPAYLOAD,
        .xcl_ident = XPRT_TRANSPORT_RDMA,
 };
 
@@ -99,12 +99,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
 {
        struct svc_rdma_op_ctxt *ctxt;
 
-       while (1) {
-               ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
-               if (ctxt)
-                       break;
-               schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-       }
+       ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
+                               GFP_KERNEL | __GFP_NOFAIL);
        ctxt->xprt = xprt;
        INIT_LIST_HEAD(&ctxt->dto_q);
        ctxt->count = 0;
@@ -156,12 +152,8 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
 struct svc_rdma_req_map *svc_rdma_get_req_map(void)
 {
        struct svc_rdma_req_map *map;
-       while (1) {
-               map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
-               if (map)
-                       break;
-               schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-       }
+       map = kmem_cache_alloc(svc_rdma_map_cachep,
+                              GFP_KERNEL | __GFP_NOFAIL);
        map->count = 0;
        return map;
 }
@@ -175,8 +167,8 @@ void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
 static void cq_event_handler(struct ib_event *event, void *context)
 {
        struct svc_xprt *xprt = context;
-       dprintk("svcrdma: received CQ event id=%d, context=%p\n",
-               event->event, context);
+       dprintk("svcrdma: received CQ event %s (%d), context=%p\n",
+               ib_event_msg(event->event), event->event, context);
        set_bit(XPT_CLOSE, &xprt->xpt_flags);
 }
 
@@ -191,8 +183,9 @@ static void qp_event_handler(struct ib_event *event, void *context)
        case IB_EVENT_COMM_EST:
        case IB_EVENT_SQ_DRAINED:
        case IB_EVENT_QP_LAST_WQE_REACHED:
-               dprintk("svcrdma: QP event %d received for QP=%p\n",
-                       event->event, event->element.qp);
+               dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
+                       ib_event_msg(event->event), event->event,
+                       event->element.qp);
                break;
        /* These are considered fatal events */
        case IB_EVENT_PATH_MIG_ERR:
@@ -201,9 +194,10 @@ static void qp_event_handler(struct ib_event *event, void *context)
        case IB_EVENT_QP_ACCESS_ERR:
        case IB_EVENT_DEVICE_FATAL:
        default:
-               dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
+               dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
                        "closing transport\n",
-                       event->event, event->element.qp);
+                       ib_event_msg(event->event), event->event,
+                       event->element.qp);
                set_bit(XPT_CLOSE, &xprt->xpt_flags);
                break;
        }
@@ -402,7 +396,8 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
                for (i = 0; i < ret; i++) {
                        wc = &wc_a[i];
                        if (wc->status != IB_WC_SUCCESS) {
-                               dprintk("svcrdma: sq wc err status %d\n",
+                               dprintk("svcrdma: sq wc err status %s (%d)\n",
+                                       ib_wc_status_msg(wc->status),
                                        wc->status);
 
                                /* Close the transport */
@@ -490,18 +485,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
        return cma_xprt;
 }
 
-struct page *svc_rdma_get_page(void)
-{
-       struct page *page;
-
-       while ((page = alloc_page(GFP_KERNEL)) == NULL) {
-               /* If we can't get memory, wait a bit and try again */
-               printk(KERN_INFO "svcrdma: out of memory...retrying in 1s\n");
-               schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
-       }
-       return page;
-}
-
 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
 {
        struct ib_recv_wr recv_wr, *bad_recv_wr;
@@ -520,7 +503,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
                        pr_err("svcrdma: Too many sges (%d)\n", sge_no);
                        goto err_put_ctxt;
                }
-               page = svc_rdma_get_page();
+               page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
                ctxt->pages[sge_no] = page;
                pa = ib_dma_map_page(xprt->sc_cm_id->device,
                                     page, 0, PAGE_SIZE,
@@ -616,7 +599,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST:
                dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
-                       "event=%d\n", cma_id, cma_id->context, event->event);
+                       "event = %s (%d)\n", cma_id, cma_id->context,
+                       rdma_event_msg(event->event), event->event);
                handle_connect_req(cma_id,
                                   event->param.conn.initiator_depth);
                break;
@@ -636,7 +620,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
 
        default:
                dprintk("svcrdma: Unexpected event on listening endpoint %p, "
-                       "event=%d\n", cma_id, event->event);
+                       "event = %s (%d)\n", cma_id,
+                       rdma_event_msg(event->event), event->event);
                break;
        }
 
@@ -669,7 +654,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
                dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
-                       "event=%d\n", cma_id, xprt, event->event);
+                       "event = %s (%d)\n", cma_id, xprt,
+                       rdma_event_msg(event->event), event->event);
                if (xprt) {
                        set_bit(XPT_CLOSE, &xprt->xpt_flags);
                        svc_xprt_enqueue(xprt);
@@ -677,7 +663,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
                break;
        default:
                dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
-                       "event=%d\n", cma_id, event->event);
+                       "event = %s (%d)\n", cma_id,
+                       rdma_event_msg(event->event), event->event);
                break;
        }
        return 0;
@@ -848,10 +835,11 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        struct svcxprt_rdma *listen_rdma;
        struct svcxprt_rdma *newxprt = NULL;
        struct rdma_conn_param conn_param;
+       struct ib_cq_init_attr cq_attr = {};
        struct ib_qp_init_attr qp_attr;
        struct ib_device_attr devattr;
        int uninitialized_var(dma_mr_acc);
-       int need_dma_mr;
+       int need_dma_mr = 0;
        int ret;
        int i;
 
@@ -900,22 +888,22 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
                dprintk("svcrdma: error creating PD for connect request\n");
                goto errout;
        }
+       cq_attr.cqe = newxprt->sc_sq_depth;
        newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
                                         sq_comp_handler,
                                         cq_event_handler,
                                         newxprt,
-                                        newxprt->sc_sq_depth,
-                                        0);
+                                        &cq_attr);
        if (IS_ERR(newxprt->sc_sq_cq)) {
                dprintk("svcrdma: error creating SQ CQ for connect request\n");
                goto errout;
        }
+       cq_attr.cqe = newxprt->sc_max_requests;
        newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
                                         rq_comp_handler,
                                         cq_event_handler,
                                         newxprt,
-                                        newxprt->sc_max_requests,
-                                        0);
+                                        &cq_attr);
        if (IS_ERR(newxprt->sc_rq_cq)) {
                dprintk("svcrdma: error creating RQ CQ for connect request\n");
                goto errout;
@@ -985,35 +973,26 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        /*
         * Determine if a DMA MR is required and if so, what privs are required
         */
-       switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
-       case RDMA_TRANSPORT_IWARP:
-               newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
-               if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc =
-                               (IB_ACCESS_LOCAL_WRITE |
-                                IB_ACCESS_REMOTE_WRITE);
-               } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
-               } else
-                       need_dma_mr = 0;
-               break;
-       case RDMA_TRANSPORT_IB:
-               if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
-               } else if (!(devattr.device_cap_flags &
-                            IB_DEVICE_LOCAL_DMA_LKEY)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
-               } else
-                       need_dma_mr = 0;
-               break;
-       default:
+       if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+                                newxprt->sc_cm_id->port_num) &&
+           !rdma_ib_or_roce(newxprt->sc_cm_id->device,
+                            newxprt->sc_cm_id->port_num))
                goto errout;
+
+       if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
+           !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
+               need_dma_mr = 1;
+               dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
+               if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+                                       newxprt->sc_cm_id->port_num) &&
+                   !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
+                       dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
        }
 
+       if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+                               newxprt->sc_cm_id->port_num))
+               newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
+
        /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
        if (need_dma_mr) {
                /* Register all of physical memory */
@@ -1319,11 +1298,11 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
        struct ib_send_wr err_wr;
        struct page *p;
        struct svc_rdma_op_ctxt *ctxt;
-       u32 *va;
+       __be32 *va;
        int length;
        int ret;
 
-       p = svc_rdma_get_page();
+       p = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
        va = page_address(p);
 
        /* XDR encode error */