OSDN Git Service

net/mlx5: Refactor fragmented buffer struct fields and init flow
authorTariq Toukan <tariqt@mellanox.com>
Wed, 12 Sep 2018 12:36:41 +0000 (15:36 +0300)
committerSaeed Mahameed <saeedm@mellanox.com>
Thu, 18 Oct 2018 20:13:31 +0000 (13:13 -0700)
Take struct mlx5_frag_buf out of mlx5_frag_buf_ctrl, as it is not
needed to manage and control the datapath of the fragmented buffers API.

struct mlx5_frag_buf contains control info to manage the allocation
and de-allocation of the fragmented buffer.
Its fields are not relevant for datapath, so here I take them out of the
struct mlx5_frag_buf_ctrl, except for the fragments array itself.

In addition, modified mlx5_fill_fbc to initialise the frags pointers
as well. This implies that the buffer must be allocated before the
function is called.

A set of type-specific *_get_byte_size() functions are replaced by
a generic one.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/net/ethernet/mellanox/mlx5/core/wq.c
include/linux/mlx5/driver.h

index 088205d..cca1820 100644 (file)
@@ -393,7 +393,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 
 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
 {
-       mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
+       mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
 }
 
 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -728,16 +728,11 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
                             int nent,
                             int cqe_size)
 {
-       struct mlx5_frag_buf_ctrl *c = &buf->fbc;
-       struct mlx5_frag_buf *frag_buf = &c->frag_buf;
-       u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
+       struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+       u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
+       u8 log_wq_sz     = ilog2(cqe_size);
        int err;
 
-       MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
-       MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
-
-       mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
-
        err = mlx5_frag_buf_alloc_node(dev->mdev,
                                       nent * cqe_size,
                                       frag_buf,
@@ -745,6 +740,8 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
        if (err)
                return err;
 
+       mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+
        buf->cqe_size = cqe_size;
        buf->nent = nent;
 
@@ -934,7 +931,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 
        *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
                 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
-                cq->buf.fbc.frag_buf.npages;
+                cq->buf.frag_buf.npages;
        *cqb = kvzalloc(*inlen, GFP_KERNEL);
        if (!*cqb) {
                err = -ENOMEM;
@@ -942,11 +939,11 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        }
 
        pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
-       mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
+       mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
 
        cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
        MLX5_SET(cqc, cqc, log_page_size,
-                cq->buf.fbc.frag_buf.page_shift -
+                cq->buf.frag_buf.page_shift -
                 MLX5_ADAPTER_PAGE_SHIFT);
 
        *index = dev->mdev->priv.uar->index;
@@ -1365,11 +1362,10 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                cqe_size = 64;
                err = resize_kernel(dev, cq, entries, cqe_size);
                if (!err) {
-                       struct mlx5_frag_buf_ctrl *c;
+                       struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
 
-                       c = &cq->resize_buf->fbc;
-                       npas = c->frag_buf.npages;
-                       page_shift = c->frag_buf.page_shift;
+                       npas = frag_buf->npages;
+                       page_shift = frag_buf->page_shift;
                }
        }
 
@@ -1390,8 +1386,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
                                     pas, 0);
        else
-               mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
-                                         pas);
+               mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
 
        MLX5_SET(modify_cq_in, in,
                 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
index 320d4df..289c18d 100644 (file)
@@ -435,6 +435,7 @@ struct mlx5_ib_qp {
 
 struct mlx5_ib_cq_buf {
        struct mlx5_frag_buf_ctrl fbc;
+       struct mlx5_frag_buf    frag_buf;
        struct ib_umem          *umem;
        int                     cqe_size;
        int                     nent;
index 68e7f8d..9007e91 100644 (file)
@@ -54,54 +54,37 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
        return (u32)wq->fbc.sz_m1 + 1;
 }
 
-static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
 {
-       return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
-}
-
-static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
-{
-       return mlx5_wq_cyc_get_byte_size(&wq->rq) +
-              mlx5_wq_cyc_get_byte_size(&wq->sq);
-}
-
-static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
-{
-       return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
-}
-
-static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
-{
-       return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
+       return ((u32)1 << log_sz) << log_stride;
 }
 
 int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                       void *wqc, struct mlx5_wq_cyc *wq,
                       struct mlx5_wq_ctrl *wq_ctrl)
 {
+       u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+       u8 log_wq_sz     = MLX5_GET(wq, wqc, log_wq_sz);
        struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
        int err;
 
-       mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
-                     MLX5_GET(wq, wqc, log_wq_sz),
-                     fbc);
-       wq->sz    = wq->fbc.sz_m1 + 1;
-
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+       wq->db  = wq_ctrl->db.db;
+
+       err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
                                       &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
-       fbc->frag_buf = wq_ctrl->buf;
-       wq->db  = wq_ctrl->db.db;
+       mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
+       wq->sz = mlx5_wq_cyc_get_size(wq);
 
        wq_ctrl->mdev = mdev;
 
@@ -113,46 +96,19 @@ err_db_free:
        return err;
 }
 
-static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
-                                struct mlx5_wq_qp *qp)
-{
-       struct mlx5_frag_buf_ctrl *sq_fbc;
-       struct mlx5_frag_buf *rqb, *sqb;
-
-       rqb  = &qp->rq.fbc.frag_buf;
-       *rqb = *buf;
-       rqb->size   = mlx5_wq_cyc_get_byte_size(&qp->rq);
-       rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
-
-       sq_fbc = &qp->sq.fbc;
-       sqb    = &sq_fbc->frag_buf;
-       *sqb   = *buf;
-       sqb->size   = mlx5_wq_cyc_get_byte_size(&qp->sq);
-       sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
-       sqb->frags += rqb->npages; /* first part is for the rq */
-       if (sq_fbc->strides_offset)
-               sqb->frags--;
-}
-
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
-       u16 sq_strides_offset;
-       u32 rq_pg_remainder;
-       int err;
+       u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
+       u8 log_rq_sz     = MLX5_GET(qpc, qpc, log_rq_size);
+       u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
+       u8 log_sq_sz     = MLX5_GET(qpc, qpc, log_sq_size);
 
-       mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
-                     MLX5_GET(qpc, qpc, log_rq_size),
-                     &wq->rq.fbc);
+       u32 rq_byte_size;
+       int err;
 
-       rq_pg_remainder   = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
-       sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
 
-       mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
-                            MLX5_GET(qpc, qpc, log_sq_size),
-                            sq_strides_offset,
-                            &wq->sq.fbc);
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
@@ -160,14 +116,32 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                return err;
        }
 
-       err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
+       err = mlx5_frag_buf_alloc_node(mdev,
+                                      wq_get_byte_sz(log_rq_sz, log_rq_stride) +
+                                      wq_get_byte_sz(log_sq_sz, log_sq_stride),
                                       &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
-       mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
+       mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
+
+       rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
+
+       if (rq_byte_size < PAGE_SIZE) {
+               /* SQ starts within the same page of the RQ */
+               u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
+
+               mlx5_init_fbc_offset(wq_ctrl->buf.frags,
+                                    log_sq_stride, log_sq_sz, sq_strides_offset,
+                                    &wq->sq.fbc);
+       } else {
+               u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
+
+               mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
+                             log_sq_stride, log_sq_sz, &wq->sq.fbc);
+       }
 
        wq->rq.db  = &wq_ctrl->db.db[MLX5_RCV_DBR];
        wq->sq.db  = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -186,17 +160,19 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                     void *cqc, struct mlx5_cqwq *wq,
                     struct mlx5_wq_ctrl *wq_ctrl)
 {
+       u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
+       u8 log_wq_sz     = MLX5_GET(cqc, cqc, log_cq_size);
        int err;
 
-       mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
-
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+       wq->db  = wq_ctrl->db.db;
+
+       err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
                                       &wq_ctrl->buf,
                                       param->buf_numa_node);
        if (err) {
@@ -205,8 +181,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                goto err_db_free;
        }
 
-       wq->fbc.frag_buf = wq_ctrl->buf;
-       wq->db  = wq_ctrl->db.db;
+       mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
 
        wq_ctrl->mdev = mdev;
 
@@ -222,30 +197,29 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *wqc, struct mlx5_wq_ll *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
+       u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+       u8 log_wq_sz     = MLX5_GET(wq, wqc, log_wq_sz);
        struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
        struct mlx5_wqe_srq_next_seg *next_seg;
        int err;
        int i;
 
-       mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
-                     MLX5_GET(wq, wqc, log_wq_sz),
-                     fbc);
-
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+       wq->db  = wq_ctrl->db.db;
+
+       err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
                                       &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
-       wq->fbc.frag_buf = wq_ctrl->buf;
-       wq->db  = wq_ctrl->db.db;
+       mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
 
        for (i = 0; i < fbc->sz_m1; i++) {
                next_seg = mlx5_wq_ll_get_wqe(wq, i);
index 94ffd02..e10f61a 100644 (file)
@@ -357,7 +357,7 @@ struct mlx5_frag_buf {
 };
 
 struct mlx5_frag_buf_ctrl {
-       struct mlx5_frag_buf    frag_buf;
+       struct mlx5_buf_list   *frags;
        u32                     sz_m1;
        u16                     frag_sz_m1;
        u16                     strides_offset;
@@ -994,10 +994,12 @@ static inline u32 mlx5_base_mkey(const u32 key)
        return key & 0xffffff00u;
 }
 
-static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
+                                       u8 log_stride, u8 log_sz,
                                        u16 strides_offset,
                                        struct mlx5_frag_buf_ctrl *fbc)
 {
+       fbc->frags      = frags;
        fbc->log_stride = log_stride;
        fbc->log_sz     = log_sz;
        fbc->sz_m1      = (1 << fbc->log_sz) - 1;
@@ -1006,18 +1008,11 @@ static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
        fbc->strides_offset = strides_offset;
 }
 
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
+                                u8 log_stride, u8 log_sz,
                                 struct mlx5_frag_buf_ctrl *fbc)
 {
-       mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
-}
-
-static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
-                                             void *cqc)
-{
-       mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
-                     MLX5_GET(cqc, cqc, log_cq_size),
-                     fbc);
+       mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
 }
 
 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
@@ -1028,8 +1023,7 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
        ix  += fbc->strides_offset;
        frag = ix >> fbc->log_frag_strides;
 
-       return fbc->frag_buf.frags[frag].buf +
-               ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+       return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
 }
 
 int mlx5_cmd_init(struct mlx5_core_dev *dev);