2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
39 #include <rdma/ib_cache.h>
40 #include <rdma/ib_pack.h>
41 #include <rdma/ib_addr.h>
42 #include <rdma/ib_mad.h>
44 #include <linux/mlx4/driver.h>
45 #include <linux/mlx4/qp.h>
50 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
51 struct mlx4_ib_cq *recv_cq);
52 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
53 struct mlx4_ib_cq *recv_cq);
56 MLX4_IB_ACK_REQ_FREQ = 8,
60 MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83,
61 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
62 MLX4_IB_LINK_TYPE_IB = 0,
63 MLX4_IB_LINK_TYPE_ETH = 1
68 * Largest possible UD header: send with GRH and immediate
69 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
70 * tag. (LRH would only use 8 bytes, so Ethernet is the
73 MLX4_IB_UD_HEADER_SIZE = 82,
74 MLX4_IB_LSO_HEADER_SPARE = 128,
78 MLX4_IB_IBOE_ETHERTYPE = 0x8915
86 struct ib_ud_header ud_header;
87 u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
91 MLX4_IB_MIN_SQ_STRIDE = 6,
92 MLX4_IB_CACHE_LINE_SIZE = 64,
97 MLX4_RAW_QP_MSGMAX = 31,
104 static const __be32 mlx4_ib_opcode[] = {
105 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
106 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
107 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
108 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
109 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
110 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
111 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
112 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
113 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
114 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
115 [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
116 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
117 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
118 [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW),
121 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
123 return container_of(mqp, struct mlx4_ib_sqp, qp);
126 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
128 if (!mlx4_is_master(dev->dev))
131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
136 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
141 /* PPF or Native -- real SQP */
142 real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
147 /* VF or PF -- proxy SQP */
148 if (mlx4_is_mfunc(dev->dev)) {
149 for (i = 0; i < dev->dev->caps.num_ports; i++) {
150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] ||
151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
160 /* used for INIT/CLOSE port logic */
161 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
166 /* PPF or Native -- real QP0 */
167 real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
168 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
169 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
172 /* VF or PF -- proxy QP0 */
173 if (mlx4_is_mfunc(dev->dev)) {
174 for (i = 0; i < dev->dev->caps.num_ports; i++) {
175 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
184 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
186 return mlx4_buf_offset(&qp->buf, offset);
189 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
191 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
194 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
196 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
200 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
201 * first four bytes of every 64 byte chunk with
202 * 0x7FFFFFF | (invalid_ownership_value << 31).
204 * When the max work request size is less than or equal to the WQE
205 * basic block size, as an optimization, we can stamp all WQEs with
206 * 0xffffffff, and skip the very first chunk of each WQE.
208 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
216 struct mlx4_wqe_ctrl_seg *ctrl;
218 if (qp->sq_max_wqes_per_wr > 1) {
219 s = roundup(size, 1U << qp->sq.wqe_shift);
220 for (i = 0; i < s; i += 64) {
221 ind = (i >> qp->sq.wqe_shift) + n;
222 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
223 cpu_to_be32(0xffffffff);
224 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
225 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
229 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
230 s = (ctrl->fence_size & 0x3f) << 4;
231 for (i = 64; i < s; i += 64) {
233 *wqe = cpu_to_be32(0xffffffff);
238 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
240 struct mlx4_wqe_ctrl_seg *ctrl;
241 struct mlx4_wqe_inline_seg *inl;
245 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
246 s = sizeof(struct mlx4_wqe_ctrl_seg);
248 if (qp->ibqp.qp_type == IB_QPT_UD) {
249 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
250 struct mlx4_av *av = (struct mlx4_av *)dgram->av;
251 memset(dgram, 0, sizeof *dgram);
252 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
253 s += sizeof(struct mlx4_wqe_datagram_seg);
256 /* Pad the remainder of the WQE with an inline data segment. */
259 inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
261 ctrl->srcrb_flags = 0;
262 ctrl->fence_size = size / 16;
264 * Make sure descriptor is fully written before setting ownership bit
265 * (because HW can start executing as soon as we do).
269 ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
270 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
272 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
275 /* Post NOP WQE to prevent wrap-around in the middle of WR */
276 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
278 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
279 if (unlikely(s < qp->sq_max_wqes_per_wr)) {
280 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
286 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
288 struct ib_event event;
289 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
291 if (type == MLX4_EVENT_TYPE_PATH_MIG)
292 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
294 if (ibqp->event_handler) {
295 event.device = ibqp->device;
296 event.element.qp = ibqp;
298 case MLX4_EVENT_TYPE_PATH_MIG:
299 event.event = IB_EVENT_PATH_MIG;
301 case MLX4_EVENT_TYPE_COMM_EST:
302 event.event = IB_EVENT_COMM_EST;
304 case MLX4_EVENT_TYPE_SQ_DRAINED:
305 event.event = IB_EVENT_SQ_DRAINED;
307 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
308 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
310 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
311 event.event = IB_EVENT_QP_FATAL;
313 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
314 event.event = IB_EVENT_PATH_MIG_ERR;
316 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
317 event.event = IB_EVENT_QP_REQ_ERR;
319 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
320 event.event = IB_EVENT_QP_ACCESS_ERR;
323 pr_warn("Unexpected event type %d "
324 "on QP %06x\n", type, qp->qpn);
328 ibqp->event_handler(&event, ibqp->qp_context);
332 static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
335 * UD WQEs must have a datagram segment.
336 * RC and UC WQEs might have a remote address segment.
337 * MLX WQEs need two extra inline data segments (for the UD
338 * header and space for the ICRC).
342 return sizeof (struct mlx4_wqe_ctrl_seg) +
343 sizeof (struct mlx4_wqe_datagram_seg) +
344 ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
345 case MLX4_IB_QPT_PROXY_SMI_OWNER:
346 case MLX4_IB_QPT_PROXY_SMI:
347 case MLX4_IB_QPT_PROXY_GSI:
348 return sizeof (struct mlx4_wqe_ctrl_seg) +
349 sizeof (struct mlx4_wqe_datagram_seg) + 64;
350 case MLX4_IB_QPT_TUN_SMI_OWNER:
351 case MLX4_IB_QPT_TUN_GSI:
352 return sizeof (struct mlx4_wqe_ctrl_seg) +
353 sizeof (struct mlx4_wqe_datagram_seg);
356 return sizeof (struct mlx4_wqe_ctrl_seg) +
357 sizeof (struct mlx4_wqe_raddr_seg);
359 return sizeof (struct mlx4_wqe_ctrl_seg) +
360 sizeof (struct mlx4_wqe_masked_atomic_seg) +
361 sizeof (struct mlx4_wqe_raddr_seg);
362 case MLX4_IB_QPT_SMI:
363 case MLX4_IB_QPT_GSI:
364 return sizeof (struct mlx4_wqe_ctrl_seg) +
365 ALIGN(MLX4_IB_UD_HEADER_SIZE +
366 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
368 sizeof (struct mlx4_wqe_inline_seg),
369 sizeof (struct mlx4_wqe_data_seg)) +
371 sizeof (struct mlx4_wqe_inline_seg),
372 sizeof (struct mlx4_wqe_data_seg));
374 return sizeof (struct mlx4_wqe_ctrl_seg);
378 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
379 int is_user, int has_rq, struct mlx4_ib_qp *qp)
381 /* Sanity check RQ size before proceeding */
382 if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
383 cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
387 if (cap->max_recv_wr)
390 qp->rq.wqe_cnt = qp->rq.max_gs = 0;
392 /* HW requires >= 1 RQ entry with >= 1 gather entry */
393 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
396 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr));
397 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
398 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
401 /* leave userspace return values as they were, so as not to break ABI */
403 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
404 cap->max_recv_sge = qp->rq.max_gs;
406 cap->max_recv_wr = qp->rq.max_post =
407 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
408 cap->max_recv_sge = min(qp->rq.max_gs,
409 min(dev->dev->caps.max_sq_sg,
410 dev->dev->caps.max_rq_sg));
416 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
417 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
421 /* Sanity check SQ size before proceeding */
422 if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
423 cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
424 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
425 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
429 * For MLX transport we need 2 extra S/G entries:
430 * one for the header and one for the checksum at the end
432 if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
433 type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
434 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
437 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
438 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
439 send_wqe_overhead(type, qp->flags);
441 if (s > dev->dev->caps.max_sq_desc_sz)
445 * Hermon supports shrinking WQEs, such that a single work
446 * request can include multiple units of 1 << wqe_shift. This
447 * way, work requests can differ in size, and do not have to
448 * be a power of 2 in size, saving memory and speeding up send
449 * WR posting. Unfortunately, if we do this then the
450 * wqe_index field in CQEs can't be used to look up the WR ID
451 * anymore, so we do this only if selective signaling is off.
453 * Further, on 32-bit platforms, we can't use vmap() to make
454 * the QP buffer virtually contiguous. Thus we have to use
455 * constant-sized WRs to make sure a WR is always fully within
456 * a single page-sized chunk.
458 * Finally, we use NOP work requests to pad the end of the
459 * work queue, to avoid wrap-around in the middle of WR. We
460 * set NEC bit to avoid getting completions with error for
461 * these NOP WRs, but since NEC is only supported starting
462 * with firmware 2.2.232, we use constant-sized WRs for older
465 * And, since MLX QPs only support SEND, we use constant-sized
468 * We look for the smallest value of wqe_shift such that the
469 * resulting number of wqes does not exceed device
472 * We set WQE size to at least 64 bytes, this way stamping
473 * invalidates each WQE.
475 if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
476 qp->sq_signal_bits && BITS_PER_LONG == 64 &&
477 type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
478 !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
479 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
480 qp->sq.wqe_shift = ilog2(64);
482 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
485 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
488 * We need to leave 2 KB + 1 WR of headroom in the SQ to
489 * allow HW to prefetch.
491 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
492 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
493 qp->sq_max_wqes_per_wr +
496 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
499 if (qp->sq_max_wqes_per_wr <= 1)
505 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz,
506 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) -
507 send_wqe_overhead(type, qp->flags)) /
508 sizeof (struct mlx4_wqe_data_seg);
510 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
511 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
512 if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
514 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
516 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
520 cap->max_send_wr = qp->sq.max_post =
521 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
522 cap->max_send_sge = min(qp->sq.max_gs,
523 min(dev->dev->caps.max_sq_sg,
524 dev->dev->caps.max_rq_sg));
525 /* We don't support inline sends for kernel QPs (yet) */
526 cap->max_inline_data = 0;
531 static int set_user_sq_size(struct mlx4_ib_dev *dev,
532 struct mlx4_ib_qp *qp,
533 struct mlx4_ib_create_qp *ucmd)
535 /* Sanity check SQ size before proceeding */
536 if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
537 ucmd->log_sq_stride >
538 ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
539 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
542 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
543 qp->sq.wqe_shift = ucmd->log_sq_stride;
545 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
546 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
551 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
556 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt,
558 if (!qp->sqp_proxy_rcv)
560 for (i = 0; i < qp->rq.wqe_cnt; i++) {
561 qp->sqp_proxy_rcv[i].addr =
562 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr),
564 if (!qp->sqp_proxy_rcv[i].addr)
566 qp->sqp_proxy_rcv[i].map =
567 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
568 sizeof (struct mlx4_ib_proxy_sqp_hdr),
570 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
571 kfree(qp->sqp_proxy_rcv[i].addr);
580 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
581 sizeof (struct mlx4_ib_proxy_sqp_hdr),
583 kfree(qp->sqp_proxy_rcv[i].addr);
585 kfree(qp->sqp_proxy_rcv);
586 qp->sqp_proxy_rcv = NULL;
590 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
594 for (i = 0; i < qp->rq.wqe_cnt; i++) {
595 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
596 sizeof (struct mlx4_ib_proxy_sqp_hdr),
598 kfree(qp->sqp_proxy_rcv[i].addr);
600 kfree(qp->sqp_proxy_rcv);
603 static int qp_has_rq(struct ib_qp_init_attr *attr)
605 if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
611 static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
614 for (i = 0; i < dev->caps.num_ports; i++) {
615 if (qpn == dev->caps.qp0_proxy[i])
616 return !!dev->caps.qp0_qkey[i];
621 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
622 struct mlx4_ib_qp *qp)
624 mutex_lock(&dev->counters_table[qp->port - 1].mutex);
625 mlx4_counter_free(dev->dev, qp->counter_index->index);
626 list_del(&qp->counter_index->list);
627 mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
629 kfree(qp->counter_index);
630 qp->counter_index = NULL;
633 static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
634 struct ib_qp_init_attr *init_attr,
635 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
640 struct mlx4_ib_sqp *sqp;
641 struct mlx4_ib_qp *qp;
642 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
643 struct mlx4_ib_cq *mcq;
646 /* When tunneling special qps, we use a plain UD qp */
648 if (mlx4_is_mfunc(dev->dev) &&
649 (!mlx4_is_master(dev->dev) ||
650 !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
651 if (init_attr->qp_type == IB_QPT_GSI)
652 qp_type = MLX4_IB_QPT_PROXY_GSI;
654 if (mlx4_is_master(dev->dev) ||
655 qp0_enabled_vf(dev->dev, sqpn))
656 qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
658 qp_type = MLX4_IB_QPT_PROXY_SMI;
662 /* add extra sg entry for tunneling */
663 init_attr->cap.max_recv_sge++;
664 } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
665 struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
666 container_of(init_attr,
667 struct mlx4_ib_qp_tunnel_init_attr, init_attr);
668 if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
669 tnl_init->proxy_qp_type != IB_QPT_GSI) ||
670 !mlx4_is_master(dev->dev))
672 if (tnl_init->proxy_qp_type == IB_QPT_GSI)
673 qp_type = MLX4_IB_QPT_TUN_GSI;
674 else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
675 mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
677 qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
679 qp_type = MLX4_IB_QPT_TUN_SMI;
680 /* we are definitely in the PPF here, since we are creating
681 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
682 qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
683 + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
688 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
689 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
690 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
691 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
695 qp->pri.vid = 0xFFFF;
696 qp->alt.vid = 0xFFFF;
698 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
701 qp->pri.vid = 0xFFFF;
702 qp->alt.vid = 0xFFFF;
707 qp->mlx4_ib_qp_type = qp_type;
709 mutex_init(&qp->mutex);
710 spin_lock_init(&qp->sq.lock);
711 spin_lock_init(&qp->rq.lock);
712 INIT_LIST_HEAD(&qp->gid_list);
713 INIT_LIST_HEAD(&qp->steering_rules);
715 qp->state = IB_QPS_RESET;
716 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
717 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
719 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
724 struct mlx4_ib_create_qp ucmd;
726 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
731 qp->sq_no_prefetch = ucmd.sq_no_prefetch;
733 err = set_user_sq_size(dev, qp, &ucmd);
737 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
739 if (IS_ERR(qp->umem)) {
740 err = PTR_ERR(qp->umem);
744 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
745 ilog2(qp->umem->page_size), &qp->mtt);
749 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
753 if (qp_has_rq(init_attr)) {
754 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
755 ucmd.db_addr, &qp->db);
760 qp->sq_no_prefetch = 0;
762 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
763 qp->flags |= MLX4_IB_QP_LSO;
765 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
766 if (dev->steering_support ==
767 MLX4_STEERING_MODE_DEVICE_MANAGED)
768 qp->flags |= MLX4_IB_QP_NETIF;
773 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
777 if (qp_has_rq(init_attr)) {
778 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
785 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
790 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
795 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp);
801 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
803 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp);
805 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
807 if (!qp->sq.wrid || !qp->rq.wrid) {
814 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
815 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
816 if (alloc_proxy_bufs(pd->device, qp)) {
822 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
823 * otherwise, the WQE BlueFlame setup flow wrongly causes
825 if (init_attr->qp_type == IB_QPT_RAW_PACKET)
826 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
827 (init_attr->cap.max_send_wr ?
828 MLX4_RESERVE_ETH_BF_QP : 0) |
829 (init_attr->cap.max_recv_wr ?
830 MLX4_RESERVE_A0_QP : 0));
832 if (qp->flags & MLX4_IB_QP_NETIF)
833 err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
835 err = mlx4_qp_reserve_range(dev->dev, 1, 1,
841 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
842 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
844 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
848 if (init_attr->qp_type == IB_QPT_XRC_TGT)
849 qp->mqp.qpn |= (1 << 23);
852 * Hardware wants QPN written in big-endian order (after
853 * shifting) for send doorbell. Precompute this value to save
854 * a little bit when posting sends.
856 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
858 qp->mqp.event = mlx4_ib_qp_event;
862 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
863 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
864 to_mcq(init_attr->recv_cq));
865 /* Maintain device to QPs access, needed for further handling
868 list_add_tail(&qp->qps_list, &dev->qp_list);
869 /* Maintain CQ to QPs access, needed for further handling
872 mcq = to_mcq(init_attr->send_cq);
873 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
874 mcq = to_mcq(init_attr->recv_cq);
875 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
876 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
877 to_mcq(init_attr->recv_cq));
878 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
883 if (qp->flags & MLX4_IB_QP_NETIF)
884 mlx4_ib_steer_qp_free(dev, qpn, 1);
886 mlx4_qp_release_range(dev->dev, qpn, 1);
889 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
890 free_proxy_bufs(pd->device, qp);
893 if (qp_has_rq(init_attr))
894 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
901 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
905 ib_umem_release(qp->umem);
907 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
910 if (!pd->uobject && qp_has_rq(init_attr))
911 mlx4_db_free(dev->dev, &qp->db);
919 static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
922 case IB_QPS_RESET: return MLX4_QP_STATE_RST;
923 case IB_QPS_INIT: return MLX4_QP_STATE_INIT;
924 case IB_QPS_RTR: return MLX4_QP_STATE_RTR;
925 case IB_QPS_RTS: return MLX4_QP_STATE_RTS;
926 case IB_QPS_SQD: return MLX4_QP_STATE_SQD;
927 case IB_QPS_SQE: return MLX4_QP_STATE_SQER;
928 case IB_QPS_ERR: return MLX4_QP_STATE_ERR;
933 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
934 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
936 if (send_cq == recv_cq) {
937 spin_lock(&send_cq->lock);
938 __acquire(&recv_cq->lock);
939 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
940 spin_lock(&send_cq->lock);
941 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
943 spin_lock(&recv_cq->lock);
944 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
948 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
949 __releases(&send_cq->lock) __releases(&recv_cq->lock)
951 if (send_cq == recv_cq) {
952 __release(&recv_cq->lock);
953 spin_unlock(&send_cq->lock);
954 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
955 spin_unlock(&recv_cq->lock);
956 spin_unlock(&send_cq->lock);
958 spin_unlock(&send_cq->lock);
959 spin_unlock(&recv_cq->lock);
963 static void del_gid_entries(struct mlx4_ib_qp *qp)
965 struct mlx4_ib_gid_entry *ge, *tmp;
967 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
973 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
975 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
976 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
978 return to_mpd(qp->ibqp.pd);
981 static void get_cqs(struct mlx4_ib_qp *qp,
982 struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
984 switch (qp->ibqp.qp_type) {
986 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
990 *send_cq = to_mcq(qp->ibqp.send_cq);
994 *send_cq = to_mcq(qp->ibqp.send_cq);
995 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1000 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1003 struct mlx4_ib_cq *send_cq, *recv_cq;
1004 unsigned long flags;
1006 if (qp->state != IB_QPS_RESET) {
1007 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
1008 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
1009 pr_warn("modify QP %06x to RESET failed.\n",
1011 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
1012 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1014 qp->pri.smac_port = 0;
1017 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1020 if (qp->pri.vid < 0x1000) {
1021 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
1022 qp->pri.vid = 0xFFFF;
1023 qp->pri.candidate_vid = 0xFFFF;
1024 qp->pri.update_vid = 0;
1026 if (qp->alt.vid < 0x1000) {
1027 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
1028 qp->alt.vid = 0xFFFF;
1029 qp->alt.candidate_vid = 0xFFFF;
1030 qp->alt.update_vid = 0;
1034 get_cqs(qp, &send_cq, &recv_cq);
1036 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
1037 mlx4_ib_lock_cqs(send_cq, recv_cq);
1039 /* del from lists under both locks above to protect reset flow paths */
1040 list_del(&qp->qps_list);
1041 list_del(&qp->cq_send_list);
1042 list_del(&qp->cq_recv_list);
1044 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1045 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
1046 if (send_cq != recv_cq)
1047 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1050 mlx4_qp_remove(dev->dev, &qp->mqp);
1052 mlx4_ib_unlock_cqs(send_cq, recv_cq);
1053 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1055 mlx4_qp_free(dev->dev, &qp->mqp);
1057 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
1058 if (qp->flags & MLX4_IB_QP_NETIF)
1059 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
1061 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
1064 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
1068 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
1070 ib_umem_release(qp->umem);
1072 kvfree(qp->sq.wrid);
1073 kvfree(qp->rq.wrid);
1074 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1075 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
1076 free_proxy_bufs(&dev->ib_dev, qp);
1077 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1079 mlx4_db_free(dev->dev, &qp->db);
1082 del_gid_entries(qp);
1085 static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
1088 if (!mlx4_is_mfunc(dev->dev) ||
1089 (mlx4_is_master(dev->dev) &&
1090 attr->create_flags & MLX4_IB_SRIOV_SQP)) {
1091 return dev->dev->phys_caps.base_sqpn +
1092 (attr->qp_type == IB_QPT_SMI ? 0 : 2) +
1095 /* PF or VF -- creating proxies */
1096 if (attr->qp_type == IB_QPT_SMI)
1097 return dev->dev->caps.qp0_proxy[attr->port_num - 1];
1099 return dev->dev->caps.qp1_proxy[attr->port_num - 1];
1102 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
1103 struct ib_qp_init_attr *init_attr,
1104 struct ib_udata *udata)
1106 struct mlx4_ib_qp *qp = NULL;
1108 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1112 gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
1113 GFP_NOIO : GFP_KERNEL;
1115 * We only support LSO, vendor flag1, and multicast loopback blocking,
1116 * and only for kernel UD QPs.
1118 if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
1119 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
1120 MLX4_IB_SRIOV_TUNNEL_QP |
1123 MLX4_IB_QP_CREATE_USE_GFP_NOIO))
1124 return ERR_PTR(-EINVAL);
1126 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
1127 if (init_attr->qp_type != IB_QPT_UD)
1128 return ERR_PTR(-EINVAL);
1131 if (init_attr->create_flags &&
1132 ((udata && init_attr->create_flags & ~(sup_u_create_flags)) ||
1133 ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
1134 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
1135 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)) &&
1136 init_attr->qp_type != IB_QPT_UD) ||
1137 ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) &&
1138 init_attr->qp_type > IB_QPT_GSI)))
1139 return ERR_PTR(-EINVAL);
1141 switch (init_attr->qp_type) {
1142 case IB_QPT_XRC_TGT:
1143 pd = to_mxrcd(init_attr->xrcd)->pd;
1144 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1145 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
1147 case IB_QPT_XRC_INI:
1148 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1149 return ERR_PTR(-ENOSYS);
1150 init_attr->recv_cq = init_attr->send_cq;
1154 case IB_QPT_RAW_PACKET:
1155 qp = kzalloc(sizeof *qp, gfp);
1157 return ERR_PTR(-ENOMEM);
1158 qp->pri.vid = 0xFFFF;
1159 qp->alt.vid = 0xFFFF;
1163 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1164 udata, 0, &qp, gfp);
1167 return ERR_PTR(err);
1170 qp->ibqp.qp_num = qp->mqp.qpn;
1178 /* Userspace is not allowed to create special QPs: */
1180 return ERR_PTR(-EINVAL);
1182 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
1183 get_sqp_num(to_mdev(pd->device), init_attr),
1186 return ERR_PTR(err);
1188 qp->port = init_attr->port_num;
1189 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
1194 /* Don't support raw QPs */
1195 return ERR_PTR(-EINVAL);
1201 int mlx4_ib_destroy_qp(struct ib_qp *qp)
1203 struct mlx4_ib_dev *dev = to_mdev(qp->device);
1204 struct mlx4_ib_qp *mqp = to_mqp(qp);
1205 struct mlx4_ib_pd *pd;
1207 if (is_qp0(dev, mqp))
1208 mlx4_CLOSE_PORT(dev->dev, mqp->port);
1210 if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
1211 dev->qp1_proxy[mqp->port - 1] == mqp) {
1212 mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1213 dev->qp1_proxy[mqp->port - 1] = NULL;
1214 mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1217 if (mqp->counter_index)
1218 mlx4_ib_free_qp_counter(dev, mqp);
1221 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
1223 if (is_sqp(dev, mqp))
1224 kfree(to_msqp(mqp));
1231 static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
1234 case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC;
1235 case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC;
1236 case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD;
1237 case MLX4_IB_QPT_XRC_INI:
1238 case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
1239 case MLX4_IB_QPT_SMI:
1240 case MLX4_IB_QPT_GSI:
1241 case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
1243 case MLX4_IB_QPT_PROXY_SMI_OWNER:
1244 case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ?
1245 MLX4_QP_ST_MLX : -1);
1246 case MLX4_IB_QPT_PROXY_SMI:
1247 case MLX4_IB_QPT_TUN_SMI:
1248 case MLX4_IB_QPT_PROXY_GSI:
1249 case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ?
1250 MLX4_QP_ST_UD : -1);
1255 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
1260 u32 hw_access_flags = 0;
1262 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1263 dest_rd_atomic = attr->max_dest_rd_atomic;
1265 dest_rd_atomic = qp->resp_depth;
1267 if (attr_mask & IB_QP_ACCESS_FLAGS)
1268 access_flags = attr->qp_access_flags;
1270 access_flags = qp->atomic_rd_en;
1272 if (!dest_rd_atomic)
1273 access_flags &= IB_ACCESS_REMOTE_WRITE;
1275 if (access_flags & IB_ACCESS_REMOTE_READ)
1276 hw_access_flags |= MLX4_QP_BIT_RRE;
1277 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1278 hw_access_flags |= MLX4_QP_BIT_RAE;
1279 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1280 hw_access_flags |= MLX4_QP_BIT_RWE;
1282 return cpu_to_be32(hw_access_flags);
1285 static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
1288 if (attr_mask & IB_QP_PKEY_INDEX)
1289 sqp->pkey_index = attr->pkey_index;
1290 if (attr_mask & IB_QP_QKEY)
1291 sqp->qkey = attr->qkey;
1292 if (attr_mask & IB_QP_SQ_PSN)
1293 sqp->send_psn = attr->sq_psn;
1296 static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1298 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1301 static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1302 u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1303 struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
1305 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1306 IB_LINK_LAYER_ETHERNET;
1312 path->grh_mylmc = ah->src_path_bits & 0x7f;
1313 path->rlid = cpu_to_be16(ah->dlid);
1314 if (ah->static_rate) {
1315 path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
1316 while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
1317 !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
1318 --path->static_rate;
1320 path->static_rate = 0;
1322 if (ah->ah_flags & IB_AH_GRH) {
1323 int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev,
1325 ah->grh.sgid_index);
1327 if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
1328 pr_err("sgid_index (%u) too large. max is %d\n",
1329 real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
1333 path->grh_mylmc |= 1 << 7;
1334 path->mgid_index = real_sgid_index;
1335 path->hop_limit = ah->grh.hop_limit;
1336 path->tclass_flowlabel =
1337 cpu_to_be32((ah->grh.traffic_class << 20) |
1338 (ah->grh.flow_label));
1339 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1343 if (!(ah->ah_flags & IB_AH_GRH))
1346 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1347 ((port - 1) << 6) | ((ah->sl & 7) << 3);
1349 path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1350 if (vlan_tag < 0x1000) {
1351 if (smac_info->vid < 0x1000) {
1352 /* both valid vlan ids */
1353 if (smac_info->vid != vlan_tag) {
1354 /* different VIDs. unreg old and reg new */
1355 err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1358 smac_info->candidate_vid = vlan_tag;
1359 smac_info->candidate_vlan_index = vidx;
1360 smac_info->candidate_vlan_port = port;
1361 smac_info->update_vid = 1;
1362 path->vlan_index = vidx;
1364 path->vlan_index = smac_info->vlan_index;
1367 /* no current vlan tag in qp */
1368 err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
1371 smac_info->candidate_vid = vlan_tag;
1372 smac_info->candidate_vlan_index = vidx;
1373 smac_info->candidate_vlan_port = port;
1374 smac_info->update_vid = 1;
1375 path->vlan_index = vidx;
1377 path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1380 /* have current vlan tag. unregister it at modify-qp success */
1381 if (smac_info->vid < 0x1000) {
1382 smac_info->candidate_vid = 0xFFFF;
1383 smac_info->update_vid = 1;
1387 /* get smac_index for RoCE use.
1388 * If no smac was yet assigned, register one.
1389 * If one was already assigned, but the new mac differs,
1390 * unregister the old one and register the new one.
1392 if ((!smac_info->smac && !smac_info->smac_port) ||
1393 smac_info->smac != smac) {
1394 /* register candidate now, unreg if needed, after success */
1395 smac_index = mlx4_register_mac(dev->dev, port, smac);
1396 if (smac_index >= 0) {
1397 smac_info->candidate_smac_index = smac_index;
1398 smac_info->candidate_smac = smac;
1399 smac_info->candidate_smac_port = port;
1404 smac_index = smac_info->smac_index;
1407 memcpy(path->dmac, ah->dmac, 6);
1408 path->ackto = MLX4_IB_LINK_TYPE_ETH;
1409 /* put MAC table smac index for IBoE */
1410 path->grh_mylmc = (u8) (smac_index) | 0x80;
1412 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
1413 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
1419 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1420 enum ib_qp_attr_mask qp_attr_mask,
1421 struct mlx4_ib_qp *mqp,
1422 struct mlx4_qp_path *path, u8 port,
1423 u16 vlan_id, u8 *smac)
1425 return _mlx4_set_path(dev, &qp->ah_attr,
1426 mlx4_mac_to_u64(smac),
1428 path, &mqp->pri, port);
1431 static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1432 const struct ib_qp_attr *qp,
1433 enum ib_qp_attr_mask qp_attr_mask,
1434 struct mlx4_ib_qp *mqp,
1435 struct mlx4_qp_path *path, u8 port)
1437 return _mlx4_set_path(dev, &qp->alt_ah_attr,
1440 path, &mqp->alt, port);
1443 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1445 struct mlx4_ib_gid_entry *ge, *tmp;
1447 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1448 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
1450 ge->port = qp->port;
1455 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
1456 struct mlx4_ib_qp *qp,
1457 struct mlx4_qp_context *context)
1462 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
1464 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
1465 if (!qp->pri.smac && !qp->pri.smac_port) {
1466 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
1467 if (smac_index >= 0) {
1468 qp->pri.candidate_smac_index = smac_index;
1469 qp->pri.candidate_smac = u64_mac;
1470 qp->pri.candidate_smac_port = qp->port;
1471 context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
1479 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1481 struct counter_index *new_counter_index;
1485 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
1486 IB_LINK_LAYER_ETHERNET ||
1487 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
1488 !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
1491 err = mlx4_counter_alloc(dev->dev, &tmp_idx);
1495 new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL);
1496 if (!new_counter_index) {
1497 mlx4_counter_free(dev->dev, tmp_idx);
1501 new_counter_index->index = tmp_idx;
1502 new_counter_index->allocated = 1;
1503 qp->counter_index = new_counter_index;
1505 mutex_lock(&dev->counters_table[qp->port - 1].mutex);
1506 list_add_tail(&new_counter_index->list,
1507 &dev->counters_table[qp->port - 1].counters_list);
1508 mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
1513 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1514 const struct ib_qp_attr *attr, int attr_mask,
1515 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1517 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1518 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1519 struct mlx4_ib_pd *pd;
1520 struct mlx4_ib_cq *send_cq, *recv_cq;
1521 struct mlx4_qp_context *context;
1522 enum mlx4_qp_optpar optpar = 0;
1528 /* APM is not supported under RoCE */
1529 if (attr_mask & IB_QP_ALT_PATH &&
1530 rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1531 IB_LINK_LAYER_ETHERNET)
1534 context = kzalloc(sizeof *context, GFP_KERNEL);
1538 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
1539 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
1541 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
1542 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1544 optpar |= MLX4_QP_OPTPAR_PM_STATE;
1545 switch (attr->path_mig_state) {
1546 case IB_MIG_MIGRATED:
1547 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
1550 context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
1553 context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
1558 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
1559 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
1560 else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1561 context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
1562 else if (ibqp->qp_type == IB_QPT_UD) {
1563 if (qp->flags & MLX4_IB_QP_LSO)
1564 context->mtu_msgmax = (IB_MTU_4096 << 5) |
1565 ilog2(dev->dev->caps.max_gso_sz);
1567 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1568 } else if (attr_mask & IB_QP_PATH_MTU) {
1569 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
1570 pr_err("path MTU (%u) is invalid\n",
1574 context->mtu_msgmax = (attr->path_mtu << 5) |
1575 ilog2(dev->dev->caps.max_msg_sz);
1579 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
1580 context->rq_size_stride |= qp->rq.wqe_shift - 4;
1583 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
1584 context->sq_size_stride |= qp->sq.wqe_shift - 4;
1586 if (new_state == IB_QPS_RESET && qp->counter_index)
1587 mlx4_ib_free_qp_counter(dev, qp);
1589 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1590 context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
1591 context->xrcd = cpu_to_be32((u32) qp->xrcdn);
1592 if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1593 context->param3 |= cpu_to_be32(1 << 30);
1596 if (qp->ibqp.uobject)
1597 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
1599 context->usr_page = cpu_to_be32(dev->priv_uar.index);
1601 if (attr_mask & IB_QP_DEST_QPN)
1602 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
1604 if (attr_mask & IB_QP_PORT) {
1605 if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
1606 !(attr_mask & IB_QP_AV)) {
1607 mlx4_set_sched(&context->pri_path, attr->port_num);
1608 optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
1612 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
1613 err = create_qp_lb_counter(dev, qp);
1618 dev->counters_table[qp->port - 1].default_counter;
1619 if (qp->counter_index)
1620 counter_index = qp->counter_index->index;
1622 if (counter_index != -1) {
1623 context->pri_path.counter_index = counter_index;
1624 optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
1625 if (qp->counter_index) {
1626 context->pri_path.fl |=
1627 MLX4_FL_ETH_SRC_CHECK_MC_LB;
1628 context->pri_path.vlan_control |=
1629 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
1632 context->pri_path.counter_index =
1633 MLX4_SINK_COUNTER_INDEX(dev->dev);
1635 if (qp->flags & MLX4_IB_QP_NETIF) {
1636 mlx4_ib_steer_qp_reg(dev, qp, 1);
1641 if (attr_mask & IB_QP_PKEY_INDEX) {
1642 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1643 context->pri_path.disable_pkey_check = 0x40;
1644 context->pri_path.pkey_index = attr->pkey_index;
1645 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
1648 if (attr_mask & IB_QP_AV) {
1649 u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
1650 attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1652 struct ib_gid_attr gid_attr;
1657 if (rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
1658 attr->ah_attr.ah_flags & IB_AH_GRH) {
1659 int index = attr->ah_attr.grh.sgid_index;
1661 status = ib_get_cached_gid(ibqp->device, port_num,
1662 index, &gid, &gid_attr);
1663 if (!status && !memcmp(&gid, &zgid, sizeof(gid)))
1665 if (!status && gid_attr.ndev) {
1666 vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1667 memcpy(smac, gid_attr.ndev->dev_addr, ETH_ALEN);
1668 dev_put(gid_attr.ndev);
1674 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
1675 port_num, vlan, smac))
1678 optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
1679 MLX4_QP_OPTPAR_SCHED_QUEUE);
1682 if (attr_mask & IB_QP_TIMEOUT) {
1683 context->pri_path.ackto |= attr->timeout << 3;
1684 optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
1687 if (attr_mask & IB_QP_ALT_PATH) {
1688 if (attr->alt_port_num == 0 ||
1689 attr->alt_port_num > dev->dev->caps.num_ports)
1692 if (attr->alt_pkey_index >=
1693 dev->dev->caps.pkey_table_len[attr->alt_port_num])
1696 if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
1698 attr->alt_port_num))
1701 context->alt_path.pkey_index = attr->alt_pkey_index;
1702 context->alt_path.ackto = attr->alt_timeout << 3;
1703 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
1707 get_cqs(qp, &send_cq, &recv_cq);
1708 context->pd = cpu_to_be32(pd->pdn);
1709 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
1710 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
1711 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
1713 /* Set "fast registration enabled" for all kernel QPs */
1714 if (!qp->ibqp.uobject)
1715 context->params1 |= cpu_to_be32(1 << 11);
1717 if (attr_mask & IB_QP_RNR_RETRY) {
1718 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1719 optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
1722 if (attr_mask & IB_QP_RETRY_CNT) {
1723 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1724 optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
1727 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1728 if (attr->max_rd_atomic)
1730 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1731 optpar |= MLX4_QP_OPTPAR_SRA_MAX;
1734 if (attr_mask & IB_QP_SQ_PSN)
1735 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1737 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1738 if (attr->max_dest_rd_atomic)
1740 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1741 optpar |= MLX4_QP_OPTPAR_RRA_MAX;
1744 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
1745 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
1746 optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
1750 context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
1752 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1753 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1754 optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
1756 if (attr_mask & IB_QP_RQ_PSN)
1757 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1759 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1760 if (attr_mask & IB_QP_QKEY) {
1761 if (qp->mlx4_ib_qp_type &
1762 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
1763 context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
1765 if (mlx4_is_mfunc(dev->dev) &&
1766 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
1767 (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
1768 MLX4_RESERVED_QKEY_BASE) {
1769 pr_err("Cannot use reserved QKEY"
1770 " 0x%x (range 0xffff0000..0xffffffff"
1771 " is reserved)\n", attr->qkey);
1775 context->qkey = cpu_to_be32(attr->qkey);
1777 optpar |= MLX4_QP_OPTPAR_Q_KEY;
1781 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
1783 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1784 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1786 if (cur_state == IB_QPS_INIT &&
1787 new_state == IB_QPS_RTR &&
1788 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
1789 ibqp->qp_type == IB_QPT_UD ||
1790 ibqp->qp_type == IB_QPT_RAW_PACKET)) {
1791 context->pri_path.sched_queue = (qp->port - 1) << 6;
1792 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
1793 qp->mlx4_ib_qp_type &
1794 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
1795 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
1796 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
1797 context->pri_path.fl = 0x80;
1799 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
1800 context->pri_path.fl = 0x80;
1801 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
1803 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1804 IB_LINK_LAYER_ETHERNET) {
1805 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
1806 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
1807 context->pri_path.feup = 1 << 7; /* don't fsm */
1808 /* handle smac_index */
1809 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
1810 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
1811 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
1812 err = handle_eth_ud_smac_index(dev, qp, context);
1817 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1818 dev->qp1_proxy[qp->port - 1] = qp;
1823 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
1824 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1825 MLX4_IB_LINK_TYPE_ETH;
1826 if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1827 /* set QP to receive both tunneled & non-tunneled packets */
1828 if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
1829 context->srqn = cpu_to_be32(7 << 28);
1833 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1834 int is_eth = rdma_port_get_link_layer(
1835 &dev->ib_dev, qp->port) ==
1836 IB_LINK_LAYER_ETHERNET;
1838 context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
1839 optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
1844 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1845 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1850 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1851 context->rlkey |= (1 << 4);
1854 * Before passing a kernel QP to the HW, make sure that the
1855 * ownership bits of the send queue are set and the SQ
1856 * headroom is stamped so that the hardware doesn't start
1857 * processing stale work requests.
1859 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1860 struct mlx4_wqe_ctrl_seg *ctrl;
1863 for (i = 0; i < qp->sq.wqe_cnt; ++i) {
1864 ctrl = get_send_wqe(qp, i);
1865 ctrl->owner_opcode = cpu_to_be32(1 << 31);
1866 if (qp->sq_max_wqes_per_wr == 1)
1867 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);
1869 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
1873 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
1874 to_mlx4_state(new_state), context, optpar,
1875 sqd_event, &qp->mqp);
1879 qp->state = new_state;
1881 if (attr_mask & IB_QP_ACCESS_FLAGS)
1882 qp->atomic_rd_en = attr->qp_access_flags;
1883 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1884 qp->resp_depth = attr->max_dest_rd_atomic;
1885 if (attr_mask & IB_QP_PORT) {
1886 qp->port = attr->port_num;
1887 update_mcg_macs(dev, qp);
1889 if (attr_mask & IB_QP_ALT_PATH)
1890 qp->alt_port = attr->alt_port_num;
1892 if (is_sqp(dev, qp))
1893 store_sqp_attrs(to_msqp(qp), attr, attr_mask);
1896 * If we moved QP0 to RTR, bring the IB link up; if we moved
1897 * QP0 to RESET or ERROR, bring the link back down.
1899 if (is_qp0(dev, qp)) {
1900 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
1901 if (mlx4_INIT_PORT(dev->dev, qp->port))
1902 pr_warn("INIT_PORT failed for port %d\n",
1905 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1906 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1907 mlx4_CLOSE_PORT(dev->dev, qp->port);
1911 * If we moved a kernel QP to RESET, clean up all old CQ
1912 * entries and reinitialize the QP.
1914 if (new_state == IB_QPS_RESET) {
1915 if (!ibqp->uobject) {
1916 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1917 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1918 if (send_cq != recv_cq)
1919 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1925 qp->sq_next_wqe = 0;
1929 if (qp->flags & MLX4_IB_QP_NETIF)
1930 mlx4_ib_steer_qp_reg(dev, qp, 0);
1932 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
1933 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1935 qp->pri.smac_port = 0;
1938 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1941 if (qp->pri.vid < 0x1000) {
1942 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
1943 qp->pri.vid = 0xFFFF;
1944 qp->pri.candidate_vid = 0xFFFF;
1945 qp->pri.update_vid = 0;
1948 if (qp->alt.vid < 0x1000) {
1949 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
1950 qp->alt.vid = 0xFFFF;
1951 qp->alt.candidate_vid = 0xFFFF;
1952 qp->alt.update_vid = 0;
1956 if (err && qp->counter_index)
1957 mlx4_ib_free_qp_counter(dev, qp);
1958 if (err && steer_qp)
1959 mlx4_ib_steer_qp_reg(dev, qp, 0);
1961 if (qp->pri.candidate_smac ||
1962 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
1964 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
1966 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
1967 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1968 qp->pri.smac = qp->pri.candidate_smac;
1969 qp->pri.smac_index = qp->pri.candidate_smac_index;
1970 qp->pri.smac_port = qp->pri.candidate_smac_port;
1972 qp->pri.candidate_smac = 0;
1973 qp->pri.candidate_smac_index = 0;
1974 qp->pri.candidate_smac_port = 0;
1976 if (qp->alt.candidate_smac) {
1978 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
1981 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1982 qp->alt.smac = qp->alt.candidate_smac;
1983 qp->alt.smac_index = qp->alt.candidate_smac_index;
1984 qp->alt.smac_port = qp->alt.candidate_smac_port;
1986 qp->alt.candidate_smac = 0;
1987 qp->alt.candidate_smac_index = 0;
1988 qp->alt.candidate_smac_port = 0;
1991 if (qp->pri.update_vid) {
1993 if (qp->pri.candidate_vid < 0x1000)
1994 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
1995 qp->pri.candidate_vid);
1997 if (qp->pri.vid < 0x1000)
1998 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
2000 qp->pri.vid = qp->pri.candidate_vid;
2001 qp->pri.vlan_port = qp->pri.candidate_vlan_port;
2002 qp->pri.vlan_index = qp->pri.candidate_vlan_index;
2004 qp->pri.candidate_vid = 0xFFFF;
2005 qp->pri.update_vid = 0;
2008 if (qp->alt.update_vid) {
2010 if (qp->alt.candidate_vid < 0x1000)
2011 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
2012 qp->alt.candidate_vid);
2014 if (qp->alt.vid < 0x1000)
2015 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
2017 qp->alt.vid = qp->alt.candidate_vid;
2018 qp->alt.vlan_port = qp->alt.candidate_vlan_port;
2019 qp->alt.vlan_index = qp->alt.candidate_vlan_index;
2021 qp->alt.candidate_vid = 0xFFFF;
2022 qp->alt.update_vid = 0;
2028 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2029 int attr_mask, struct ib_udata *udata)
2031 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2032 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2033 enum ib_qp_state cur_state, new_state;
2036 mutex_lock(&qp->mutex);
2038 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
2039 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
2041 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
2042 ll = IB_LINK_LAYER_UNSPECIFIED;
2044 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2045 ll = rdma_port_get_link_layer(&dev->ib_dev, port);
2048 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
2050 pr_debug("qpn 0x%x: invalid attribute mask specified "
2051 "for transition %d to %d. qp_type %d,"
2052 " attr_mask 0x%x\n",
2053 ibqp->qp_num, cur_state, new_state,
2054 ibqp->qp_type, attr_mask);
2058 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
2059 if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
2060 if ((ibqp->qp_type == IB_QPT_RC) ||
2061 (ibqp->qp_type == IB_QPT_UD) ||
2062 (ibqp->qp_type == IB_QPT_UC) ||
2063 (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
2064 (ibqp->qp_type == IB_QPT_XRC_INI)) {
2065 attr->port_num = mlx4_ib_bond_next_port(dev);
2068 /* no sense in changing port_num
2069 * when ports are bonded */
2070 attr_mask &= ~IB_QP_PORT;
2074 if ((attr_mask & IB_QP_PORT) &&
2075 (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
2076 pr_debug("qpn 0x%x: invalid port number (%d) specified "
2077 "for transition %d to %d. qp_type %d\n",
2078 ibqp->qp_num, attr->port_num, cur_state,
2079 new_state, ibqp->qp_type);
2083 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
2084 (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
2085 IB_LINK_LAYER_ETHERNET))
2088 if (attr_mask & IB_QP_PKEY_INDEX) {
2089 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2090 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
2091 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2092 "for transition %d to %d. qp_type %d\n",
2093 ibqp->qp_num, attr->pkey_index, cur_state,
2094 new_state, ibqp->qp_type);
2099 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
2100 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
2101 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2102 "Transition %d to %d. qp_type %d\n",
2103 ibqp->qp_num, attr->max_rd_atomic, cur_state,
2104 new_state, ibqp->qp_type);
2108 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
2109 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
2110 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2111 "Transition %d to %d. qp_type %d\n",
2112 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
2113 new_state, ibqp->qp_type);
2117 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
2122 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
2124 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
2128 mutex_unlock(&qp->mutex);
2132 static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
2135 for (i = 0; i < dev->caps.num_ports; i++) {
2136 if (qpn == dev->caps.qp0_proxy[i] ||
2137 qpn == dev->caps.qp0_tunnel[i]) {
2138 *qkey = dev->caps.qp0_qkey[i];
2145 static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2146 struct ib_ud_wr *wr,
2147 void *wqe, unsigned *mlx_seg_len)
2149 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2150 struct ib_device *ib_dev = &mdev->ib_dev;
2151 struct mlx4_wqe_mlx_seg *mlx = wqe;
2152 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2153 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2161 if (wr->wr.opcode != IB_WR_SEND)
2166 for (i = 0; i < wr->wr.num_sge; ++i)
2167 send_size += wr->wr.sg_list[i].length;
2169 /* for proxy-qp0 sends, need to add in size of tunnel header */
2170 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2171 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
2172 send_size += sizeof (struct mlx4_ib_tunnel_header);
2174 ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header);
2176 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
2177 sqp->ud_header.lrh.service_level =
2178 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
2179 sqp->ud_header.lrh.destination_lid =
2180 cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2181 sqp->ud_header.lrh.source_lid =
2182 cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2185 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
2187 /* force loopback */
2188 mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR);
2189 mlx->rlid = sqp->ud_header.lrh.destination_lid;
2191 sqp->ud_header.lrh.virtual_lane = 0;
2192 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2193 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
2194 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2195 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2196 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2198 sqp->ud_header.bth.destination_qpn =
2199 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
2201 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2202 if (mlx4_is_master(mdev->dev)) {
2203 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2206 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2209 sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
2210 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
2212 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2213 sqp->ud_header.immediate_present = 0;
2215 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
2218 * Inline data segments may not cross a 64 byte boundary. If
2219 * our UD header is bigger than the space available up to the
2220 * next 64 byte boundary in the WQE, use two inline data
2221 * segments to hold the UD header.
2223 spc = MLX4_INLINE_ALIGN -
2224 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2225 if (header_size <= spc) {
2226 inl->byte_count = cpu_to_be32(1 << 31 | header_size);
2227 memcpy(inl + 1, sqp->header_buf, header_size);
2230 inl->byte_count = cpu_to_be32(1 << 31 | spc);
2231 memcpy(inl + 1, sqp->header_buf, spc);
2233 inl = (void *) (inl + 1) + spc;
2234 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
2236 * Need a barrier here to make sure all the data is
2237 * visible before the byte_count field is set.
2238 * Otherwise the HCA prefetcher could grab the 64-byte
2239 * chunk with this inline segment and get a valid (!=
2240 * 0xffffffff) byte count but stale data, and end up
2241 * generating a packet with bad headers.
2243 * The first inline segment's byte_count field doesn't
2244 * need a barrier, because it comes after a
2245 * control/MLX segment and therefore is at an offset
2249 inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
2254 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2258 static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
2262 for (i = ETH_ALEN; i; i--) {
2263 dst_mac[i - 1] = src_mac & 0xff;
2268 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
2269 void *wqe, unsigned *mlx_seg_len)
2271 struct ib_device *ib_dev = sqp->qp.ibqp.device;
2272 struct mlx4_wqe_mlx_seg *mlx = wqe;
2273 struct mlx4_wqe_ctrl_seg *ctrl = wqe;
2274 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2275 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2285 bool is_vlan = false;
2289 for (i = 0; i < wr->wr.num_sge; ++i)
2290 send_size += wr->wr.sg_list[i].length;
2292 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
2293 is_grh = mlx4_ib_ah_grh_present(ah);
2295 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2296 /* When multi-function is enabled, the ib_core gid
2297 * indexes don't necessarily match the hw ones, so
2298 * we must use our own cache */
2299 err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
2300 be32_to_cpu(ah->av.ib.port_pd) >> 24,
2301 ah->av.ib.gid_index, &sgid.raw[0]);
2305 err = ib_get_cached_gid(ib_dev,
2306 be32_to_cpu(ah->av.ib.port_pd) >> 24,
2307 ah->av.ib.gid_index, &sgid,
2309 if (!err && !memcmp(&sgid, &zgid, sizeof(sgid)))
2315 if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
2316 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
2320 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
2323 sqp->ud_header.lrh.service_level =
2324 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
2325 sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
2326 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
2330 sqp->ud_header.grh.traffic_class =
2331 (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
2332 sqp->ud_header.grh.flow_label =
2333 ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
2334 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
2336 memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
2338 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2339 /* When multi-function is enabled, the ib_core gid
2340 * indexes don't necessarily match the hw ones, so
2341 * we must use our own cache
2343 sqp->ud_header.grh.source_gid.global.subnet_prefix =
2344 cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
2345 demux[sqp->qp.port - 1].
2347 sqp->ud_header.grh.source_gid.global.interface_id =
2348 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
2349 guid_cache[ah->av.ib.gid_index];
2351 ib_get_cached_gid(ib_dev,
2352 be32_to_cpu(ah->av.ib.port_pd) >> 24,
2353 ah->av.ib.gid_index,
2354 &sqp->ud_header.grh.source_gid, NULL);
2357 memcpy(sqp->ud_header.grh.destination_gid.raw,
2358 ah->av.ib.dgid, 16);
2361 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
2364 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
2365 (sqp->ud_header.lrh.destination_lid ==
2366 IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
2367 (sqp->ud_header.lrh.service_level << 8));
2368 if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
2369 mlx->flags |= cpu_to_be32(0x1); /* force loopback */
2370 mlx->rlid = sqp->ud_header.lrh.destination_lid;
2373 switch (wr->wr.opcode) {
2375 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2376 sqp->ud_header.immediate_present = 0;
2378 case IB_WR_SEND_WITH_IMM:
2379 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2380 sqp->ud_header.immediate_present = 1;
2381 sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
2388 struct in6_addr in6;
2390 u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
2392 mlx->sched_prio = cpu_to_be16(pcp);
2394 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
2395 /* FIXME: cache smac value? */
2396 memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
2397 memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
2398 memcpy(&in6, sgid.raw, sizeof(in6));
2400 if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2401 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
2404 mlx4_u64_to_smac(smac, mac);
2405 memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
2407 /* use the src mac of the tunnel */
2408 memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
2411 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
2412 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2414 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
2416 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
2417 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
2420 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
2421 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
2422 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
2424 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2425 if (!sqp->qp.ibqp.qp_num)
2426 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
2428 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
2429 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2430 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2431 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2432 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
2433 sqp->qkey : wr->remote_qkey);
2434 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
2436 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
2439 pr_err("built UD header of size %d:\n", header_size);
2440 for (i = 0; i < header_size / 4; ++i) {
2442 pr_err(" [%02x] ", i * 4);
2444 be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
2445 if ((i + 1) % 8 == 0)
2452 * Inline data segments may not cross a 64 byte boundary. If
2453 * our UD header is bigger than the space available up to the
2454 * next 64 byte boundary in the WQE, use two inline data
2455 * segments to hold the UD header.
2457 spc = MLX4_INLINE_ALIGN -
2458 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2459 if (header_size <= spc) {
2460 inl->byte_count = cpu_to_be32(1 << 31 | header_size);
2461 memcpy(inl + 1, sqp->header_buf, header_size);
2464 inl->byte_count = cpu_to_be32(1 << 31 | spc);
2465 memcpy(inl + 1, sqp->header_buf, spc);
2467 inl = (void *) (inl + 1) + spc;
2468 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
2470 * Need a barrier here to make sure all the data is
2471 * visible before the byte_count field is set.
2472 * Otherwise the HCA prefetcher could grab the 64-byte
2473 * chunk with this inline segment and get a valid (!=
2474 * 0xffffffff) byte count but stale data, and end up
2475 * generating a packet with bad headers.
2477 * The first inline segment's byte_count field doesn't
2478 * need a barrier, because it comes after a
2479 * control/MLX segment and therefore is at an offset
2483 inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
2488 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
2492 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
2495 struct mlx4_ib_cq *cq;
2497 cur = wq->head - wq->tail;
2498 if (likely(cur + nreq < wq->max_post))
2502 spin_lock(&cq->lock);
2503 cur = wq->head - wq->tail;
2504 spin_unlock(&cq->lock);
2506 return cur + nreq >= wq->max_post;
2509 static __be32 convert_access(int acc)
2511 return (acc & IB_ACCESS_REMOTE_ATOMIC ?
2512 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) |
2513 (acc & IB_ACCESS_REMOTE_WRITE ?
2514 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
2515 (acc & IB_ACCESS_REMOTE_READ ?
2516 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
2517 (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
2518 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
2521 static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
2522 struct ib_reg_wr *wr)
2524 struct mlx4_ib_mr *mr = to_mmr(wr->mr);
2526 fseg->flags = convert_access(wr->access);
2527 fseg->mem_key = cpu_to_be32(wr->key);
2528 fseg->buf_list = cpu_to_be64(mr->page_map);
2529 fseg->start_addr = cpu_to_be64(mr->ibmr.iova);
2530 fseg->reg_len = cpu_to_be64(mr->ibmr.length);
2531 fseg->offset = 0; /* XXX -- is this just for ZBVA? */
2532 fseg->page_size = cpu_to_be32(ilog2(mr->ibmr.page_size));
2533 fseg->reserved[0] = 0;
2534 fseg->reserved[1] = 0;
2537 static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg,
2538 struct ib_bind_mw_wr *wr)
2541 convert_access(wr->bind_info.mw_access_flags) &
2542 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
2543 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
2544 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
2546 if (wr->mw->type == IB_MW_TYPE_2)
2547 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
2548 if (wr->bind_info.mw_access_flags & IB_ZERO_BASED)
2549 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
2550 bseg->new_rkey = cpu_to_be32(wr->rkey);
2551 bseg->lkey = cpu_to_be32(wr->bind_info.mr->lkey);
2552 bseg->addr = cpu_to_be64(wr->bind_info.addr);
2553 bseg->length = cpu_to_be64(wr->bind_info.length);
2556 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
2558 memset(iseg, 0, sizeof(*iseg));
2559 iseg->mem_key = cpu_to_be32(rkey);
2562 static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
2563 u64 remote_addr, u32 rkey)
2565 rseg->raddr = cpu_to_be64(remote_addr);
2566 rseg->rkey = cpu_to_be32(rkey);
2570 static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
2571 struct ib_atomic_wr *wr)
2573 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
2574 aseg->swap_add = cpu_to_be64(wr->swap);
2575 aseg->compare = cpu_to_be64(wr->compare_add);
2576 } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
2577 aseg->swap_add = cpu_to_be64(wr->compare_add);
2578 aseg->compare = cpu_to_be64(wr->compare_add_mask);
2580 aseg->swap_add = cpu_to_be64(wr->compare_add);
2586 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
2587 struct ib_atomic_wr *wr)
2589 aseg->swap_add = cpu_to_be64(wr->swap);
2590 aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
2591 aseg->compare = cpu_to_be64(wr->compare_add);
2592 aseg->compare_mask = cpu_to_be64(wr->compare_add_mask);
2595 static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
2596 struct ib_ud_wr *wr)
2598 memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
2599 dseg->dqpn = cpu_to_be32(wr->remote_qpn);
2600 dseg->qkey = cpu_to_be32(wr->remote_qkey);
2601 dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
2602 memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
2605 static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2606 struct mlx4_wqe_datagram_seg *dseg,
2607 struct ib_ud_wr *wr,
2608 enum mlx4_ib_qp_type qpt)
2610 union mlx4_ext_av *av = &to_mah(wr->ah)->av;
2611 struct mlx4_av sqp_av = {0};
2612 int port = *((u8 *) &av->ib.port_pd) & 0x3;
2614 /* force loopback */
2615 sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000);
2616 sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */
2617 sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel &
2618 cpu_to_be32(0xf0000000);
2620 memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
2621 if (qpt == MLX4_IB_QPT_PROXY_GSI)
2622 dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
2624 dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
2625 /* Use QKEY from the QP context, which is set by master */
2626 dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
2629 static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
2631 struct mlx4_wqe_inline_seg *inl = wqe;
2632 struct mlx4_ib_tunnel_header hdr;
2633 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2637 memcpy(&hdr.av, &ah->av, sizeof hdr.av);
2638 hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
2639 hdr.pkey_index = cpu_to_be16(wr->pkey_index);
2640 hdr.qkey = cpu_to_be32(wr->remote_qkey);
2641 memcpy(hdr.mac, ah->av.eth.mac, 6);
2642 hdr.vlan = ah->av.eth.vlan;
2644 spc = MLX4_INLINE_ALIGN -
2645 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
2646 if (sizeof (hdr) <= spc) {
2647 memcpy(inl + 1, &hdr, sizeof (hdr));
2649 inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr));
2652 memcpy(inl + 1, &hdr, spc);
2654 inl->byte_count = cpu_to_be32(1 << 31 | spc);
2656 inl = (void *) (inl + 1) + spc;
2657 memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
2659 inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc));
2664 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
2667 static void set_mlx_icrc_seg(void *dseg)
2670 struct mlx4_wqe_inline_seg *iseg = dseg;
2675 * Need a barrier here before writing the byte_count field to
2676 * make sure that all the data is visible before the
2677 * byte_count field is set. Otherwise, if the segment begins
2678 * a new cacheline, the HCA prefetcher could grab the 64-byte
2679 * chunk and get a valid (!= * 0xffffffff) byte count but
2680 * stale data, and end up sending the wrong data.
2684 iseg->byte_count = cpu_to_be32((1 << 31) | 4);
2687 static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2689 dseg->lkey = cpu_to_be32(sg->lkey);
2690 dseg->addr = cpu_to_be64(sg->addr);
2693 * Need a barrier here before writing the byte_count field to
2694 * make sure that all the data is visible before the
2695 * byte_count field is set. Otherwise, if the segment begins
2696 * a new cacheline, the HCA prefetcher could grab the 64-byte
2697 * chunk and get a valid (!= * 0xffffffff) byte count but
2698 * stale data, and end up sending the wrong data.
2702 dseg->byte_count = cpu_to_be32(sg->length);
2705 static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2707 dseg->byte_count = cpu_to_be32(sg->length);
2708 dseg->lkey = cpu_to_be32(sg->lkey);
2709 dseg->addr = cpu_to_be64(sg->addr);
2712 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
2713 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
2714 __be32 *lso_hdr_sz, __be32 *blh)
2716 unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
2718 if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
2719 *blh = cpu_to_be32(1 << 6);
2721 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
2722 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
2725 memcpy(wqe->header, wr->header, wr->hlen);
2727 *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen);
2728 *lso_seg_len = halign;
2732 static __be32 send_ieth(struct ib_send_wr *wr)
2734 switch (wr->opcode) {
2735 case IB_WR_SEND_WITH_IMM:
2736 case IB_WR_RDMA_WRITE_WITH_IMM:
2737 return wr->ex.imm_data;
2739 case IB_WR_SEND_WITH_INV:
2740 return cpu_to_be32(wr->ex.invalidate_rkey);
2747 static void add_zero_len_inline(void *wqe)
2749 struct mlx4_wqe_inline_seg *inl = wqe;
2751 inl->byte_count = cpu_to_be32(1 << 31);
2754 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2755 struct ib_send_wr **bad_wr)
2757 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2759 struct mlx4_wqe_ctrl_seg *ctrl;
2760 struct mlx4_wqe_data_seg *dseg;
2761 unsigned long flags;
2765 int uninitialized_var(stamp);
2766 int uninitialized_var(size);
2767 unsigned uninitialized_var(seglen);
2770 __be32 uninitialized_var(lso_hdr_sz);
2773 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2775 spin_lock_irqsave(&qp->sq.lock, flags);
2776 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
2783 ind = qp->sq_next_wqe;
2785 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2789 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
2795 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
2801 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
2802 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
2805 (wr->send_flags & IB_SEND_SIGNALED ?
2806 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
2807 (wr->send_flags & IB_SEND_SOLICITED ?
2808 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
2809 ((wr->send_flags & IB_SEND_IP_CSUM) ?
2810 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
2811 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
2814 ctrl->imm = send_ieth(wr);
2816 wqe += sizeof *ctrl;
2817 size = sizeof *ctrl / 16;
2819 switch (qp->mlx4_ib_qp_type) {
2820 case MLX4_IB_QPT_RC:
2821 case MLX4_IB_QPT_UC:
2822 switch (wr->opcode) {
2823 case IB_WR_ATOMIC_CMP_AND_SWP:
2824 case IB_WR_ATOMIC_FETCH_AND_ADD:
2825 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
2826 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
2827 atomic_wr(wr)->rkey);
2828 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2830 set_atomic_seg(wqe, atomic_wr(wr));
2831 wqe += sizeof (struct mlx4_wqe_atomic_seg);
2833 size += (sizeof (struct mlx4_wqe_raddr_seg) +
2834 sizeof (struct mlx4_wqe_atomic_seg)) / 16;
2838 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2839 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
2840 atomic_wr(wr)->rkey);
2841 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2843 set_masked_atomic_seg(wqe, atomic_wr(wr));
2844 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
2846 size += (sizeof (struct mlx4_wqe_raddr_seg) +
2847 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
2851 case IB_WR_RDMA_READ:
2852 case IB_WR_RDMA_WRITE:
2853 case IB_WR_RDMA_WRITE_WITH_IMM:
2854 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2856 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2857 size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
2860 case IB_WR_LOCAL_INV:
2861 ctrl->srcrb_flags |=
2862 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
2863 set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
2864 wqe += sizeof (struct mlx4_wqe_local_inval_seg);
2865 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
2869 ctrl->srcrb_flags |=
2870 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
2871 set_reg_seg(wqe, reg_wr(wr));
2872 wqe += sizeof(struct mlx4_wqe_fmr_seg);
2873 size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
2877 ctrl->srcrb_flags |=
2878 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
2879 set_bind_seg(wqe, bind_mw_wr(wr));
2880 wqe += sizeof(struct mlx4_wqe_bind_seg);
2881 size += sizeof(struct mlx4_wqe_bind_seg) / 16;
2884 /* No extra segments required for sends */
2889 case MLX4_IB_QPT_TUN_SMI_OWNER:
2890 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
2892 if (unlikely(err)) {
2897 size += seglen / 16;
2899 case MLX4_IB_QPT_TUN_SMI:
2900 case MLX4_IB_QPT_TUN_GSI:
2901 /* this is a UD qp used in MAD responses to slaves. */
2902 set_datagram_seg(wqe, ud_wr(wr));
2903 /* set the forced-loopback bit in the data seg av */
2904 *(__be32 *) wqe |= cpu_to_be32(0x80000000);
2905 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2906 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2908 case MLX4_IB_QPT_UD:
2909 set_datagram_seg(wqe, ud_wr(wr));
2910 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2911 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2913 if (wr->opcode == IB_WR_LSO) {
2914 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
2916 if (unlikely(err)) {
2920 lso_wqe = (__be32 *) wqe;
2922 size += seglen / 16;
2926 case MLX4_IB_QPT_PROXY_SMI_OWNER:
2927 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
2929 if (unlikely(err)) {
2934 size += seglen / 16;
2935 /* to start tunnel header on a cache-line boundary */
2936 add_zero_len_inline(wqe);
2939 build_tunnel_header(ud_wr(wr), wqe, &seglen);
2941 size += seglen / 16;
2943 case MLX4_IB_QPT_PROXY_SMI:
2944 case MLX4_IB_QPT_PROXY_GSI:
2945 /* If we are tunneling special qps, this is a UD qp.
2946 * In this case we first add a UD segment targeting
2947 * the tunnel qp, and then add a header with address
2949 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
2951 qp->mlx4_ib_qp_type);
2952 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2953 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2954 build_tunnel_header(ud_wr(wr), wqe, &seglen);
2956 size += seglen / 16;
2959 case MLX4_IB_QPT_SMI:
2960 case MLX4_IB_QPT_GSI:
2961 err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
2963 if (unlikely(err)) {
2968 size += seglen / 16;
2976 * Write data segments in reverse order, so as to
2977 * overwrite cacheline stamp last within each
2978 * cacheline. This avoids issues with WQE
2983 dseg += wr->num_sge - 1;
2984 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
2986 /* Add one more inline data segment for ICRC for MLX sends */
2987 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
2988 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
2989 qp->mlx4_ib_qp_type &
2990 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) {
2991 set_mlx_icrc_seg(dseg + 1);
2992 size += sizeof (struct mlx4_wqe_data_seg) / 16;
2995 for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
2996 set_data_seg(dseg, wr->sg_list + i);
2999 * Possibly overwrite stamping in cacheline with LSO
3000 * segment only after making sure all data segments
3004 *lso_wqe = lso_hdr_sz;
3006 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
3007 MLX4_WQE_CTRL_FENCE : 0) | size;
3010 * Make sure descriptor is fully written before
3011 * setting ownership bit (because HW can start
3012 * executing as soon as we do).
3016 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
3022 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
3023 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
3025 stamp = ind + qp->sq_spare_wqes;
3026 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
3029 * We can improve latency by not stamping the last
3030 * send queue WQE until after ringing the doorbell, so
3031 * only stamp here if there are still more WQEs to post.
3033 * Same optimization applies to padding with NOP wqe
3034 * in case of WQE shrinking (used to prevent wrap-around
3035 * in the middle of WR).
3038 stamp_send_wqe(qp, stamp, size * 16);
3039 ind = pad_wraparound(qp, ind);
3045 qp->sq.head += nreq;
3048 * Make sure that descriptors are written before
3053 writel(qp->doorbell_qpn,
3054 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3057 * Make sure doorbells don't leak out of SQ spinlock
3058 * and reach the HCA out of order.
3062 stamp_send_wqe(qp, stamp, size * 16);
3064 ind = pad_wraparound(qp, ind);
3065 qp->sq_next_wqe = ind;
3068 spin_unlock_irqrestore(&qp->sq.lock, flags);
3073 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3074 struct ib_recv_wr **bad_wr)
3076 struct mlx4_ib_qp *qp = to_mqp(ibqp);
3077 struct mlx4_wqe_data_seg *scat;
3078 unsigned long flags;
3084 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
3086 max_gs = qp->rq.max_gs;
3087 spin_lock_irqsave(&qp->rq.lock, flags);
3089 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
3096 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
3098 for (nreq = 0; wr; ++nreq, wr = wr->next) {
3099 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
3105 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
3111 scat = get_recv_wqe(qp, ind);
3113 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
3114 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
3115 ib_dma_sync_single_for_device(ibqp->device,
3116 qp->sqp_proxy_rcv[ind].map,
3117 sizeof (struct mlx4_ib_proxy_sqp_hdr),
3120 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr));
3121 /* use dma lkey from upper layer entry */
3122 scat->lkey = cpu_to_be32(wr->sg_list->lkey);
3123 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
3128 for (i = 0; i < wr->num_sge; ++i)
3129 __set_data_seg(scat + i, wr->sg_list + i);
3132 scat[i].byte_count = 0;
3133 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
3137 qp->rq.wrid[ind] = wr->wr_id;
3139 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
3144 qp->rq.head += nreq;
3147 * Make sure that descriptors are written before
3152 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
3155 spin_unlock_irqrestore(&qp->rq.lock, flags);
3160 static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
3162 switch (mlx4_state) {
3163 case MLX4_QP_STATE_RST: return IB_QPS_RESET;
3164 case MLX4_QP_STATE_INIT: return IB_QPS_INIT;
3165 case MLX4_QP_STATE_RTR: return IB_QPS_RTR;
3166 case MLX4_QP_STATE_RTS: return IB_QPS_RTS;
3167 case MLX4_QP_STATE_SQ_DRAINING:
3168 case MLX4_QP_STATE_SQD: return IB_QPS_SQD;
3169 case MLX4_QP_STATE_SQER: return IB_QPS_SQE;
3170 case MLX4_QP_STATE_ERR: return IB_QPS_ERR;
3175 static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
3177 switch (mlx4_mig_state) {
3178 case MLX4_QP_PM_ARMED: return IB_MIG_ARMED;
3179 case MLX4_QP_PM_REARM: return IB_MIG_REARM;
3180 case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
3185 static int to_ib_qp_access_flags(int mlx4_flags)
3189 if (mlx4_flags & MLX4_QP_BIT_RRE)
3190 ib_flags |= IB_ACCESS_REMOTE_READ;
3191 if (mlx4_flags & MLX4_QP_BIT_RWE)
3192 ib_flags |= IB_ACCESS_REMOTE_WRITE;
3193 if (mlx4_flags & MLX4_QP_BIT_RAE)
3194 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3199 static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
3200 struct mlx4_qp_path *path)
3202 struct mlx4_dev *dev = ibdev->dev;
3205 memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
3206 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
3208 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
3211 is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
3212 IB_LINK_LAYER_ETHERNET;
3214 ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
3215 ((path->sched_queue & 4) << 1);
3217 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
3219 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
3220 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
3221 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
3222 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
3223 if (ib_ah_attr->ah_flags) {
3224 ib_ah_attr->grh.sgid_index = path->mgid_index;
3225 ib_ah_attr->grh.hop_limit = path->hop_limit;
3226 ib_ah_attr->grh.traffic_class =
3227 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3228 ib_ah_attr->grh.flow_label =
3229 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3230 memcpy(ib_ah_attr->grh.dgid.raw,
3231 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
3235 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3236 struct ib_qp_init_attr *qp_init_attr)
3238 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
3239 struct mlx4_ib_qp *qp = to_mqp(ibqp);
3240 struct mlx4_qp_context context;
3244 mutex_lock(&qp->mutex);
3246 if (qp->state == IB_QPS_RESET) {
3247 qp_attr->qp_state = IB_QPS_RESET;
3251 err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
3257 mlx4_state = be32_to_cpu(context.flags) >> 28;
3259 qp->state = to_ib_qp_state(mlx4_state);
3260 qp_attr->qp_state = qp->state;
3261 qp_attr->path_mtu = context.mtu_msgmax >> 5;
3262 qp_attr->path_mig_state =
3263 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
3264 qp_attr->qkey = be32_to_cpu(context.qkey);
3265 qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
3266 qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff;
3267 qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff;
3268 qp_attr->qp_access_flags =
3269 to_ib_qp_access_flags(be32_to_cpu(context.params2));
3271 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3272 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
3273 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
3274 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
3275 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
3278 qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
3279 if (qp_attr->qp_state == IB_QPS_INIT)
3280 qp_attr->port_num = qp->port;
3282 qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
3284 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3285 qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
3287 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
3289 qp_attr->max_dest_rd_atomic =
3290 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
3291 qp_attr->min_rnr_timer =
3292 (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
3293 qp_attr->timeout = context.pri_path.ackto >> 3;
3294 qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7;
3295 qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7;
3296 qp_attr->alt_timeout = context.alt_path.ackto >> 3;
3299 qp_attr->cur_qp_state = qp_attr->qp_state;
3300 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3301 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3303 if (!ibqp->uobject) {
3304 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3305 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3307 qp_attr->cap.max_send_wr = 0;
3308 qp_attr->cap.max_send_sge = 0;
3312 * We don't support inline sends for kernel QPs (yet), and we
3313 * don't know what userspace's value should be.
3315 qp_attr->cap.max_inline_data = 0;
3317 qp_init_attr->cap = qp_attr->cap;
3319 qp_init_attr->create_flags = 0;
3320 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3321 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3323 if (qp->flags & MLX4_IB_QP_LSO)
3324 qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
3326 if (qp->flags & MLX4_IB_QP_NETIF)
3327 qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
3329 qp_init_attr->sq_sig_type =
3330 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
3331 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3334 mutex_unlock(&qp->mutex);