OSDN Git Service

rdma/cxgb4: Add support for 64Byte cqes
authorRaju Rangoju <rajur@chelsio.com>
Thu, 5 Jul 2018 12:56:01 +0000 (18:26 +0530)
committerJason Gunthorpe <jgg@mellanox.com>
Fri, 13 Jul 2018 17:52:55 +0000 (11:52 -0600)
This patch adds support for iw_cxb4 to extend cqes from existing 32Byte
size to 64Byte.

Also includes adds backward compatibility support (for 32Byte) to work
with older libraries.

Signed-off-by: Raju Rangoju <rajur@chelsio.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/t4.h
include/uapi/rdma/cxgb4-abi.h

index a3a8299..a055f9f 100644 (file)
@@ -77,6 +77,10 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        int user = (uctx != &rdev->uctx);
        int ret;
        struct sk_buff *skb;
+       struct c4iw_ucontext *ucontext = NULL;
+
+       if (user)
+               ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
 
        cq->cqid = c4iw_get_cqid(rdev, uctx);
        if (!cq->cqid) {
@@ -100,6 +104,16 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        dma_unmap_addr_set(cq, mapping, cq->dma_addr);
        memset(cq->queue, 0, cq->memsize);
 
+       if (user && ucontext->is_32b_cqe) {
+               cq->qp_errp = &((struct t4_status_page *)
+               ((u8 *)cq->queue + (cq->size - 1) *
+                (sizeof(*cq->queue) / 2)))->qp_err;
+       } else {
+               cq->qp_errp = &((struct t4_status_page *)
+               ((u8 *)cq->queue + (cq->size - 1) *
+                sizeof(*cq->queue)))->qp_err;
+       }
+
        /* build fw_ri_res_wr */
        wr_len = sizeof *res_wr + sizeof *res;
 
@@ -132,7 +146,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
                        FW_RI_RES_WR_IQPCIECH_V(2) |
                        FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
                        FW_RI_RES_WR_IQO_F |
-                       FW_RI_RES_WR_IQESIZE_V(1));
+                       ((user && ucontext->is_32b_cqe) ?
+                        FW_RI_RES_WR_IQESIZE_V(1) :
+                        FW_RI_RES_WR_IQESIZE_V(2)));
        res->u.cq.iqsize = cpu_to_be16(cq->size);
        res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 
@@ -884,6 +900,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
        int vector = attr->comp_vector;
        struct c4iw_dev *rhp;
        struct c4iw_cq *chp;
+       struct c4iw_create_cq ucmd;
        struct c4iw_create_cq_resp uresp;
        struct c4iw_ucontext *ucontext = NULL;
        int ret, wr_len;
@@ -899,9 +916,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
        if (vector >= rhp->rdev.lldi.nciq)
                return ERR_PTR(-EINVAL);
 
+       if (ib_context) {
+               ucontext = to_c4iw_ucontext(ib_context);
+               if (udata->inlen < sizeof(ucmd))
+                       ucontext->is_32b_cqe = 1;
+       }
+
        chp = kzalloc(sizeof(*chp), GFP_KERNEL);
        if (!chp)
                return ERR_PTR(-ENOMEM);
+
        chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
        if (!chp->wr_waitp) {
                ret = -ENOMEM;
@@ -916,9 +940,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
                goto err_free_wr_wait;
        }
 
-       if (ib_context)
-               ucontext = to_c4iw_ucontext(ib_context);
-
        /* account for the status page. */
        entries++;
 
@@ -942,13 +963,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
        if (hwentries < 64)
                hwentries = 64;
 
-       memsize = hwentries * sizeof *chp->cq.queue;
+       memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
+                       (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
 
        /*
         * memsize must be a multiple of the page size if its a user cq.
         */
        if (ucontext)
                memsize = roundup(memsize, PAGE_SIZE);
+
        chp->cq.size = hwentries;
        chp->cq.memsize = memsize;
        chp->cq.vector = vector;
@@ -979,6 +1002,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
                if (!mm2)
                        goto err_free_mm;
 
+               memset(&uresp, 0, sizeof(uresp));
                uresp.qid_mask = rhp->rdev.cqmask;
                uresp.cqid = chp->cq.cqid;
                uresp.size = chp->cq.size;
@@ -988,9 +1012,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
                ucontext->key += PAGE_SIZE;
                uresp.gts_key = ucontext->key;
                ucontext->key += PAGE_SIZE;
+               /* communicate to the userspace that
+                * kernel driver supports 64B CQE
+                */
+               uresp.flags |= C4IW_64B_CQE;
+
                spin_unlock(&ucontext->mmap_lock);
                ret = ib_copy_to_udata(udata, &uresp,
-                                      sizeof(uresp) - sizeof(uresp.reserved));
+                                      ucontext->is_32b_cqe ?
+                                      sizeof(uresp) - sizeof(uresp.flags) :
+                                      sizeof(uresp));
                if (ret)
                        goto err_free_mm2;
 
index 3e9d8b2..8741d23 100644 (file)
@@ -70,9 +70,10 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
                CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
                CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
 
-       pr_debug("%016llx %016llx %016llx %016llx\n",
+       pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
                 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
-                be64_to_cpu(p[3]));
+                be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
+                be64_to_cpu(p[6]), be64_to_cpu(p[7]));
 
        /*
         * Ingress WRITE and READ_RESP errors provide
index 870649f..8866bf9 100644 (file)
@@ -566,6 +566,7 @@ struct c4iw_ucontext {
        spinlock_t mmap_lock;
        struct list_head mmaps;
        struct kref kref;
+       bool is_32b_cqe;
 };
 
 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
index 8369c7c..838a7de 100644 (file)
@@ -179,9 +179,20 @@ struct t4_cqe {
                        __be32 wrid_hi;
                        __be32 wrid_low;
                } gen;
+               struct {
+                       __be32 stag;
+                       __be32 msn;
+                       __be32 reserved;
+                       __be32 abs_rqe_idx;
+               } srcqe;
+               struct {
+                       __be64 imm_data;
+               } imm_data_rcqe;
+
                u64 drain_cookie;
+               __be64 flits[3];
        } u;
-       __be64 reserved;
+       __be64 reserved[3];
        __be64 bits_type_ts;
 };
 
@@ -565,6 +576,7 @@ struct t4_cq {
        u16 cidx_inc;
        u8 gen;
        u8 error;
+       u8 *qp_errp;
        unsigned long flags;
 };
 
@@ -698,12 +710,12 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
 
 static inline int t4_cq_in_error(struct t4_cq *cq)
 {
-       return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+       return *cq->qp_errp;
 }
 
 static inline void t4_set_cq_in_error(struct t4_cq *cq)
 {
-       ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+       *cq->qp_errp = 1;
 }
 #endif
 
index a159ba8..65c9eac 100644 (file)
  * In particular do not use pointer types -- pass pointers in __aligned_u64
  * instead.
  */
+
+enum {
+       C4IW_64B_CQE = (1 << 0)
+};
+
+struct c4iw_create_cq {
+       __u32 flags;
+       __u32 reserved;
+};
+
 struct c4iw_create_cq_resp {
        __aligned_u64 key;
        __aligned_u64 gts_key;
@@ -51,7 +61,7 @@ struct c4iw_create_cq_resp {
        __u32 cqid;
        __u32 size;
        __u32 qid_mask;
-       __u32 reserved; /* explicit padding (optional for i386) */
+       __u32 flags;
 };
 
 enum {