OSDN Git Service

RDMA/mlx5: Set relaxed ordering when requested
authorMichael Guralnik <michaelgur@mellanox.com>
Wed, 8 Jan 2020 18:05:40 +0000 (20:05 +0200)
committerJason Gunthorpe <jgg@mellanox.com>
Thu, 16 Jan 2020 19:55:47 +0000 (15:55 -0400)
Enable relaxed ordering in the mkey context when requested. As relaxed
ordering is not currently supported in UMR, disable UMR usage for relaxed
ordering MRs.

Link: https://lore.kernel.org/r/1578506740-22188-11-git-send-email-yishaih@mellanox.com
Signed-off-by: Michael Guralnik <michaelgur@mellanox.com>
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c

index 676462c..aa14d3c 100644 (file)
@@ -1523,7 +1523,7 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
 u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
 
 static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
-                                      bool do_modify_atomic)
+                                      bool do_modify_atomic, int access_flags)
 {
        if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
                return false;
@@ -1533,6 +1533,9 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
            MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
                return false;
 
+       if (access_flags & IB_ACCESS_RELAXED_ORDERING)
+               return false;
+
        return true;
 }
 
index a527843..1fcae07 100644 (file)
@@ -661,12 +661,21 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
                                          struct ib_pd *pd)
 {
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
        MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
        MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
        MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
        MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
        MLX5_SET(mkc, mkc, lr, 1);
 
+       if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+               MLX5_SET(mkc, mkc, relaxed_ordering_write,
+                        !!(acc & IB_ACCESS_RELAXED_ORDERING));
+       if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+               MLX5_SET(mkc, mkc, relaxed_ordering_read,
+                        !!(acc & IB_ACCESS_RELAXED_ORDERING));
+
        MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
        MLX5_SET(mkc, mkc, qpn, 0xffffff);
        MLX5_SET64(mkc, mkc, start_addr, start_addr);
@@ -1063,6 +1072,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
        mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
        MLX5_SET(mkc, mkc, free, !populate);
        MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+       if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+               MLX5_SET(mkc, mkc, relaxed_ordering_write,
+                        !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+       if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+               MLX5_SET(mkc, mkc, relaxed_ordering_read,
+                        !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
        MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
        MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
        MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
@@ -1251,7 +1266,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (err < 0)
                return ERR_PTR(err);
 
-       use_umr = mlx5_ib_can_use_umr(dev, true);
+       use_umr = mlx5_ib_can_use_umr(dev, true, access_flags);
 
        if (order <= mr_cache_max_order(dev) && use_umr) {
                mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1419,7 +1434,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                        goto err;
        }
 
-       if (!mlx5_ib_can_use_umr(dev, true) ||
+       if (!mlx5_ib_can_use_umr(dev, true, access_flags) ||
            (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
                /*
                 * UMR can't be used - MKey needs to be replaced.
index 92da6c4..409dffb 100644 (file)
@@ -380,7 +380,7 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
        memset(caps, 0, sizeof(*caps));
 
        if (!MLX5_CAP_GEN(dev->mdev, pg) ||
-           !mlx5_ib_can_use_umr(dev, true))
+           !mlx5_ib_can_use_umr(dev, true, 0))
                return;
 
        caps->general_caps = IB_ODP_SUPPORT;
index 04126ef..425efa1 100644 (file)
@@ -4823,7 +4823,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
        bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
        u8 flags = 0;
 
-       if (!mlx5_ib_can_use_umr(dev, atomic)) {
+       if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
                mlx5_ib_warn(to_mdev(qp->ibqp.device),
                             "Fast update of %s for MR is disabled\n",
                             (MLX5_CAP_GEN(dev->mdev,