2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
39 #include "iscsi_iser.h"
41 #define ISCSI_ISER_MAX_CONN 8
42 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN)
46 static int iser_cq_poll_limit = 512;
48 static void iser_cq_tasklet_fn(unsigned long data);
49 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
51 static void iser_cq_event_callback(struct ib_event *cause, void *context)
53 iser_err("got cq event %d \n", cause->event);
56 static void iser_qp_event_callback(struct ib_event *cause, void *context)
58 iser_err("got qp event %d\n",cause->event);
61 static void iser_event_handler(struct ib_event_handler *handler,
62 struct ib_event *event)
64 iser_err("async event %d on device %s port %d\n", event->event,
65 event->device->name, event->element.port_num);
69 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
70 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
73 * returns 0 on success, -1 on failure
75 static int iser_create_device_ib_res(struct iser_device *device)
77 struct ib_device_attr *dev_attr = &device->dev_attr;
80 ret = ib_query_device(device->ib_device, dev_attr);
82 pr_warn("Query device failed for %s\n", device->ib_device->name);
86 /* Assign function handles - based on FMR support */
87 if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
88 device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
89 iser_info("FMR supported, using FMR for registration\n");
90 device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
91 device->iser_free_rdma_reg_res = iser_free_fmr_pool;
92 device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
93 device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
95 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
96 iser_info("FastReg supported, using FastReg for registration\n");
97 device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
98 device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
99 device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
100 device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
102 iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
106 device->comps_used = min(ISER_MAX_CQ,
107 device->ib_device->num_comp_vectors);
108 iser_info("using %d CQs, device %s supports %d vectors\n",
109 device->comps_used, device->ib_device->name,
110 device->ib_device->num_comp_vectors);
112 device->pd = ib_alloc_pd(device->ib_device);
113 if (IS_ERR(device->pd))
116 for (i = 0; i < device->comps_used; i++) {
117 struct iser_comp *comp = &device->comps[i];
119 comp->device = device;
120 comp->cq = ib_create_cq(device->ib_device,
122 iser_cq_event_callback,
125 if (IS_ERR(comp->cq)) {
130 if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
133 tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
134 (unsigned long)comp);
137 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
138 IB_ACCESS_REMOTE_WRITE |
139 IB_ACCESS_REMOTE_READ);
140 if (IS_ERR(device->mr))
143 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
145 if (ib_register_event_handler(&device->event_handler))
151 ib_dereg_mr(device->mr);
153 for (i = 0; i < device->comps_used; i++)
154 tasklet_kill(&device->comps[i].tasklet);
156 for (i = 0; i < device->comps_used; i++) {
157 struct iser_comp *comp = &device->comps[i];
160 ib_destroy_cq(comp->cq);
162 ib_dealloc_pd(device->pd);
164 iser_err("failed to allocate an IB resource\n");
169 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
170 * CQ and PD created with the device associated with the adapator.
172 static void iser_free_device_ib_res(struct iser_device *device)
175 BUG_ON(device->mr == NULL);
177 for (i = 0; i < device->comps_used; i++) {
178 struct iser_comp *comp = &device->comps[i];
180 tasklet_kill(&comp->tasklet);
181 ib_destroy_cq(comp->cq);
185 (void)ib_unregister_event_handler(&device->event_handler);
186 (void)ib_dereg_mr(device->mr);
187 (void)ib_dealloc_pd(device->pd);
194 * iser_create_fmr_pool - Creates FMR pool and page_vector
196 * returns 0 on success, or errno code on failure
198 int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
200 struct iser_device *device = ib_conn->device;
201 struct ib_fmr_pool_param params;
204 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
205 (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
207 if (!ib_conn->fmr.page_vec)
210 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
212 params.page_shift = SHIFT_4K;
213 /* when the first/last SG element are not start/end *
214 * page aligned, the map whould be of N+1 pages */
215 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
216 /* make the pool size twice the max number of SCSI commands *
217 * the ML is expected to queue, watermark for unmap at 50% */
218 params.pool_size = cmds_max * 2;
219 params.dirty_watermark = cmds_max;
221 params.flush_function = NULL;
222 params.access = (IB_ACCESS_LOCAL_WRITE |
223 IB_ACCESS_REMOTE_WRITE |
224 IB_ACCESS_REMOTE_READ);
226 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms);
227 if (!IS_ERR(ib_conn->fmr.pool))
230 /* no FMR => no need for page_vec */
231 kfree(ib_conn->fmr.page_vec);
232 ib_conn->fmr.page_vec = NULL;
234 ret = PTR_ERR(ib_conn->fmr.pool);
235 ib_conn->fmr.pool = NULL;
236 if (ret != -ENOSYS) {
237 iser_err("FMR allocation failed, err %d\n", ret);
240 iser_warn("FMRs are not supported, using unaligned mode\n");
246 * iser_free_fmr_pool - releases the FMR pool and page vec
248 void iser_free_fmr_pool(struct ib_conn *ib_conn)
250 iser_info("freeing conn %p fmr pool %p\n",
251 ib_conn, ib_conn->fmr.pool);
253 if (ib_conn->fmr.pool != NULL)
254 ib_destroy_fmr_pool(ib_conn->fmr.pool);
256 ib_conn->fmr.pool = NULL;
258 kfree(ib_conn->fmr.page_vec);
259 ib_conn->fmr.page_vec = NULL;
263 iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
264 bool pi_enable, struct fast_reg_descriptor *desc)
268 desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
269 ISCSI_ISER_SG_TABLESIZE + 1);
270 if (IS_ERR(desc->data_frpl)) {
271 ret = PTR_ERR(desc->data_frpl);
272 iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
274 return PTR_ERR(desc->data_frpl);
277 desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
278 if (IS_ERR(desc->data_mr)) {
279 ret = PTR_ERR(desc->data_mr);
280 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
281 goto fast_reg_mr_failure;
283 desc->reg_indicators |= ISER_DATA_KEY_VALID;
286 struct ib_mr_init_attr mr_init_attr = {0};
287 struct iser_pi_context *pi_ctx = NULL;
289 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
291 iser_err("Failed to allocate pi context\n");
293 goto pi_ctx_alloc_failure;
295 pi_ctx = desc->pi_ctx;
297 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
298 ISCSI_ISER_SG_TABLESIZE);
299 if (IS_ERR(pi_ctx->prot_frpl)) {
300 ret = PTR_ERR(pi_ctx->prot_frpl);
301 iser_err("Failed to allocate prot frpl ret=%d\n",
303 goto prot_frpl_failure;
306 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
307 ISCSI_ISER_SG_TABLESIZE + 1);
308 if (IS_ERR(pi_ctx->prot_mr)) {
309 ret = PTR_ERR(pi_ctx->prot_mr);
310 iser_err("Failed to allocate prot frmr ret=%d\n",
312 goto prot_mr_failure;
314 desc->reg_indicators |= ISER_PROT_KEY_VALID;
316 mr_init_attr.max_reg_descriptors = 2;
317 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
318 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
319 if (IS_ERR(pi_ctx->sig_mr)) {
320 ret = PTR_ERR(pi_ctx->sig_mr);
321 iser_err("Failed to allocate signature enabled mr err=%d\n",
325 desc->reg_indicators |= ISER_SIG_KEY_VALID;
327 desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
329 iser_dbg("Create fr_desc %p page_list %p\n",
330 desc, desc->data_frpl->page_list);
334 ib_dereg_mr(desc->pi_ctx->prot_mr);
336 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
339 pi_ctx_alloc_failure:
340 ib_dereg_mr(desc->data_mr);
342 ib_free_fast_reg_page_list(desc->data_frpl);
348 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
349 * for fast registration work requests.
350 * returns 0 on success, or errno code on failure
352 int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
354 struct iser_device *device = ib_conn->device;
355 struct fast_reg_descriptor *desc;
358 INIT_LIST_HEAD(&ib_conn->fastreg.pool);
359 ib_conn->fastreg.pool_size = 0;
360 for (i = 0; i < cmds_max; i++) {
361 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
363 iser_err("Failed to allocate a new fast_reg descriptor\n");
368 ret = iser_create_fastreg_desc(device->ib_device, device->pd,
369 ib_conn->pi_support, desc);
371 iser_err("Failed to create fastreg descriptor err=%d\n",
377 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
378 ib_conn->fastreg.pool_size++;
384 iser_free_fastreg_pool(ib_conn);
389 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
391 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
393 struct fast_reg_descriptor *desc, *tmp;
396 if (list_empty(&ib_conn->fastreg.pool))
399 iser_info("freeing conn %p fr pool\n", ib_conn);
401 list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
402 list_del(&desc->list);
403 ib_free_fast_reg_page_list(desc->data_frpl);
404 ib_dereg_mr(desc->data_mr);
406 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
407 ib_dereg_mr(desc->pi_ctx->prot_mr);
408 ib_destroy_mr(desc->pi_ctx->sig_mr);
415 if (i < ib_conn->fastreg.pool_size)
416 iser_warn("pool still has %d regions registered\n",
417 ib_conn->fastreg.pool_size - i);
421 * iser_create_ib_conn_res - Queue-Pair (QP)
423 * returns 0 on success, -1 on failure
425 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
427 struct iser_device *device;
428 struct ib_qp_init_attr init_attr;
430 int index, min_index = 0;
432 BUG_ON(ib_conn->device == NULL);
434 device = ib_conn->device;
436 memset(&init_attr, 0, sizeof init_attr);
438 mutex_lock(&ig.connlist_mutex);
439 /* select the CQ with the minimal number of usages */
440 for (index = 0; index < device->comps_used; index++) {
441 if (device->comps[index].active_qps <
442 device->comps[min_index].active_qps)
445 ib_conn->comp = &device->comps[min_index];
446 ib_conn->comp->active_qps++;
447 mutex_unlock(&ig.connlist_mutex);
448 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
450 init_attr.event_handler = iser_qp_event_callback;
451 init_attr.qp_context = (void *)ib_conn;
452 init_attr.send_cq = ib_conn->comp->cq;
453 init_attr.recv_cq = ib_conn->comp->cq;
454 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
455 init_attr.cap.max_send_sge = 2;
456 init_attr.cap.max_recv_sge = 1;
457 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
458 init_attr.qp_type = IB_QPT_RC;
459 if (ib_conn->pi_support) {
460 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
461 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
463 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
466 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
470 ib_conn->qp = ib_conn->cma_id->qp;
471 iser_info("setting conn %p cma_id %p qp %p\n",
472 ib_conn, ib_conn->cma_id,
473 ib_conn->cma_id->qp);
477 iser_err("unable to alloc mem or create resource, err %d\n", ret);
482 * based on the resolved device node GUID see if there already allocated
483 * device for this device. If there's no such, create one.
486 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
488 struct iser_device *device;
490 mutex_lock(&ig.device_list_mutex);
492 list_for_each_entry(device, &ig.device_list, ig_list)
493 /* find if there's a match using the node GUID */
494 if (device->ib_device->node_guid == cma_id->device->node_guid)
497 device = kzalloc(sizeof *device, GFP_KERNEL);
501 /* assign this device to the device */
502 device->ib_device = cma_id->device;
503 /* init the device and link it into ig device list */
504 if (iser_create_device_ib_res(device)) {
509 list_add(&device->ig_list, &ig.device_list);
514 mutex_unlock(&ig.device_list_mutex);
518 /* if there's no demand for this device, release it */
519 static void iser_device_try_release(struct iser_device *device)
521 mutex_lock(&ig.device_list_mutex);
523 iser_info("device %p refcount %d\n", device, device->refcount);
524 if (!device->refcount) {
525 iser_free_device_ib_res(device);
526 list_del(&device->ig_list);
529 mutex_unlock(&ig.device_list_mutex);
533 * Called with state mutex held
535 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
536 enum iser_conn_state comp,
537 enum iser_conn_state exch)
541 ret = (iser_conn->state == comp);
543 iser_conn->state = exch;
548 void iser_release_work(struct work_struct *work)
550 struct iser_conn *iser_conn;
552 iser_conn = container_of(work, struct iser_conn, release_work);
554 /* Wait for conn_stop to complete */
555 wait_for_completion(&iser_conn->stop_completion);
556 /* Wait for IB resouces cleanup to complete */
557 wait_for_completion(&iser_conn->ib_completion);
559 mutex_lock(&iser_conn->state_mutex);
560 iser_conn->state = ISER_CONN_DOWN;
561 mutex_unlock(&iser_conn->state_mutex);
563 iser_conn_release(iser_conn);
567 * iser_free_ib_conn_res - release IB related resources
568 * @iser_conn: iser connection struct
569 * @destroy_device: indicator if we need to try to release
570 * the iser device (only iscsi shutdown and DEVICE_REMOVAL
573 * This routine is called with the iser state mutex held
574 * so the cm_id removal is out of here. It is Safe to
575 * be invoked multiple times.
577 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
580 struct ib_conn *ib_conn = &iser_conn->ib_conn;
581 struct iser_device *device = ib_conn->device;
583 iser_info("freeing conn %p cma_id %p qp %p\n",
584 iser_conn, ib_conn->cma_id, ib_conn->qp);
586 iser_free_rx_descriptors(iser_conn);
588 if (ib_conn->qp != NULL) {
589 ib_conn->comp->active_qps--;
590 rdma_destroy_qp(ib_conn->cma_id);
594 if (destroy_device && device != NULL) {
595 iser_device_try_release(device);
596 ib_conn->device = NULL;
601 * Frees all conn objects and deallocs conn descriptor
603 void iser_conn_release(struct iser_conn *iser_conn)
605 struct ib_conn *ib_conn = &iser_conn->ib_conn;
607 mutex_lock(&ig.connlist_mutex);
608 list_del(&iser_conn->conn_list);
609 mutex_unlock(&ig.connlist_mutex);
611 mutex_lock(&iser_conn->state_mutex);
612 if (iser_conn->state != ISER_CONN_DOWN)
613 iser_warn("iser conn %p state %d, expected state down.\n",
614 iser_conn, iser_conn->state);
616 * In case we never got to bind stage, we still need to
617 * release IB resources (which is safe to call more than once).
619 iser_free_ib_conn_res(iser_conn, true);
620 mutex_unlock(&iser_conn->state_mutex);
622 if (ib_conn->cma_id != NULL) {
623 rdma_destroy_id(ib_conn->cma_id);
624 ib_conn->cma_id = NULL;
631 * triggers start of the disconnect procedures and wait for them to be done
632 * Called with state mutex held
634 int iser_conn_terminate(struct iser_conn *iser_conn)
636 struct ib_conn *ib_conn = &iser_conn->ib_conn;
639 /* terminate the iser conn only if the conn state is UP */
640 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
641 ISER_CONN_TERMINATING))
644 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
646 /* suspend queuing of new iscsi commands */
647 if (iser_conn->iscsi_conn)
648 iscsi_suspend_queue(iser_conn->iscsi_conn);
651 * In case we didn't already clean up the cma_id (peer initiated
652 * a disconnection), we need to Cause the CMA to change the QP
655 if (ib_conn->cma_id) {
656 err = rdma_disconnect(ib_conn->cma_id);
658 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
661 wait_for_completion(&ib_conn->flush_comp);
668 * Called with state mutex held
670 static void iser_connect_error(struct rdma_cm_id *cma_id)
672 struct iser_conn *iser_conn;
674 iser_conn = (struct iser_conn *)cma_id->context;
675 iser_conn->state = ISER_CONN_DOWN;
679 * Called with state mutex held
681 static void iser_addr_handler(struct rdma_cm_id *cma_id)
683 struct iser_device *device;
684 struct iser_conn *iser_conn;
685 struct ib_conn *ib_conn;
688 iser_conn = (struct iser_conn *)cma_id->context;
689 if (iser_conn->state != ISER_CONN_PENDING)
693 ib_conn = &iser_conn->ib_conn;
694 device = iser_device_find_by_ib_device(cma_id);
696 iser_err("device lookup/creation failed\n");
697 iser_connect_error(cma_id);
701 ib_conn->device = device;
703 /* connection T10-PI support */
704 if (iser_pi_enable) {
705 if (!(device->dev_attr.device_cap_flags &
706 IB_DEVICE_SIGNATURE_HANDOVER)) {
707 iser_warn("T10-PI requested but not supported on %s, "
708 "continue without T10-PI\n",
709 ib_conn->device->ib_device->name);
710 ib_conn->pi_support = false;
712 ib_conn->pi_support = true;
716 ret = rdma_resolve_route(cma_id, 1000);
718 iser_err("resolve route failed: %d\n", ret);
719 iser_connect_error(cma_id);
725 * Called with state mutex held
727 static void iser_route_handler(struct rdma_cm_id *cma_id)
729 struct rdma_conn_param conn_param;
731 struct iser_cm_hdr req_hdr;
732 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
733 struct ib_conn *ib_conn = &iser_conn->ib_conn;
734 struct iser_device *device = ib_conn->device;
736 if (iser_conn->state != ISER_CONN_PENDING)
740 ret = iser_create_ib_conn_res(ib_conn);
744 memset(&conn_param, 0, sizeof conn_param);
745 conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
746 conn_param.initiator_depth = 1;
747 conn_param.retry_count = 7;
748 conn_param.rnr_retry_count = 6;
750 memset(&req_hdr, 0, sizeof(req_hdr));
751 req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
752 ISER_SEND_W_INV_NOT_SUPPORTED);
753 conn_param.private_data = (void *)&req_hdr;
754 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
756 ret = rdma_connect(cma_id, &conn_param);
758 iser_err("failure connecting: %d\n", ret);
764 iser_connect_error(cma_id);
767 static void iser_connected_handler(struct rdma_cm_id *cma_id)
769 struct iser_conn *iser_conn;
770 struct ib_qp_attr attr;
771 struct ib_qp_init_attr init_attr;
773 iser_conn = (struct iser_conn *)cma_id->context;
774 if (iser_conn->state != ISER_CONN_PENDING)
778 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
779 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
781 iser_conn->state = ISER_CONN_UP;
782 complete(&iser_conn->up_completion);
785 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
787 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
789 if (iser_conn_terminate(iser_conn)) {
790 if (iser_conn->iscsi_conn)
791 iscsi_conn_failure(iser_conn->iscsi_conn,
792 ISCSI_ERR_CONN_FAILED);
794 iser_err("iscsi_iser connection isn't bound\n");
798 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
801 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
804 * We are not guaranteed that we visited disconnected_handler
805 * by now, call it here to be safe that we handle CM drep
808 iser_disconnected_handler(cma_id);
809 iser_free_ib_conn_res(iser_conn, destroy_device);
810 complete(&iser_conn->ib_completion);
813 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
815 struct iser_conn *iser_conn;
818 iser_conn = (struct iser_conn *)cma_id->context;
819 iser_info("event %d status %d conn %p id %p\n",
820 event->event, event->status, cma_id->context, cma_id);
822 mutex_lock(&iser_conn->state_mutex);
823 switch (event->event) {
824 case RDMA_CM_EVENT_ADDR_RESOLVED:
825 iser_addr_handler(cma_id);
827 case RDMA_CM_EVENT_ROUTE_RESOLVED:
828 iser_route_handler(cma_id);
830 case RDMA_CM_EVENT_ESTABLISHED:
831 iser_connected_handler(cma_id);
833 case RDMA_CM_EVENT_ADDR_ERROR:
834 case RDMA_CM_EVENT_ROUTE_ERROR:
835 case RDMA_CM_EVENT_CONNECT_ERROR:
836 case RDMA_CM_EVENT_UNREACHABLE:
837 case RDMA_CM_EVENT_REJECTED:
838 iser_connect_error(cma_id);
840 case RDMA_CM_EVENT_DISCONNECTED:
841 case RDMA_CM_EVENT_ADDR_CHANGE:
842 iser_disconnected_handler(cma_id);
844 case RDMA_CM_EVENT_DEVICE_REMOVAL:
846 * we *must* destroy the device as we cannot rely
847 * on iscsid to be around to initiate error handling.
848 * also implicitly destroy the cma_id.
850 iser_cleanup_handler(cma_id, true);
851 iser_conn->ib_conn.cma_id = NULL;
854 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
855 iser_cleanup_handler(cma_id, false);
858 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
861 mutex_unlock(&iser_conn->state_mutex);
866 void iser_conn_init(struct iser_conn *iser_conn)
868 iser_conn->state = ISER_CONN_INIT;
869 iser_conn->ib_conn.post_recv_buf_count = 0;
870 atomic_set(&iser_conn->ib_conn.post_send_buf_count, 0);
871 init_completion(&iser_conn->ib_conn.flush_comp);
872 init_completion(&iser_conn->stop_completion);
873 init_completion(&iser_conn->ib_completion);
874 init_completion(&iser_conn->up_completion);
875 INIT_LIST_HEAD(&iser_conn->conn_list);
876 spin_lock_init(&iser_conn->ib_conn.lock);
877 mutex_init(&iser_conn->state_mutex);
881 * starts the process of connecting to the target
882 * sleeps until the connection is established or rejected
884 int iser_connect(struct iser_conn *iser_conn,
885 struct sockaddr *src_addr,
886 struct sockaddr *dst_addr,
889 struct ib_conn *ib_conn = &iser_conn->ib_conn;
892 mutex_lock(&iser_conn->state_mutex);
894 sprintf(iser_conn->name, "%pISp", dst_addr);
896 iser_info("connecting to: %s\n", iser_conn->name);
898 /* the device is known only --after-- address resolution */
899 ib_conn->device = NULL;
901 iser_conn->state = ISER_CONN_PENDING;
903 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
905 RDMA_PS_TCP, IB_QPT_RC);
906 if (IS_ERR(ib_conn->cma_id)) {
907 err = PTR_ERR(ib_conn->cma_id);
908 iser_err("rdma_create_id failed: %d\n", err);
912 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
914 iser_err("rdma_resolve_addr failed: %d\n", err);
919 wait_for_completion_interruptible(&iser_conn->up_completion);
921 if (iser_conn->state != ISER_CONN_UP) {
923 goto connect_failure;
926 mutex_unlock(&iser_conn->state_mutex);
928 mutex_lock(&ig.connlist_mutex);
929 list_add(&iser_conn->conn_list, &ig.connlist);
930 mutex_unlock(&ig.connlist_mutex);
934 ib_conn->cma_id = NULL;
936 iser_conn->state = ISER_CONN_DOWN;
938 mutex_unlock(&iser_conn->state_mutex);
939 iser_conn_release(iser_conn);
944 * iser_reg_page_vec - Register physical memory
946 * returns: 0 on success, errno code on failure
948 int iser_reg_page_vec(struct ib_conn *ib_conn,
949 struct iser_page_vec *page_vec,
950 struct iser_mem_reg *mem_reg)
952 struct ib_pool_fmr *mem;
957 page_list = page_vec->pages;
958 io_addr = page_list[0];
960 mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
966 status = (int)PTR_ERR(mem);
967 iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
971 mem_reg->lkey = mem->fmr->lkey;
972 mem_reg->rkey = mem->fmr->rkey;
973 mem_reg->len = page_vec->length * SIZE_4K;
974 mem_reg->va = io_addr;
976 mem_reg->mem_h = (void *)mem;
978 mem_reg->va += page_vec->offset;
979 mem_reg->len = page_vec->data_size;
981 iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
982 "entry[0]: (0x%08lx,%ld)] -> "
983 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
984 page_vec, page_vec->length,
985 (unsigned long)page_vec->pages[0],
986 (unsigned long)page_vec->data_size,
987 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
988 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
993 * Unregister (previosuly registered using FMR) memory.
994 * If memory is non-FMR does nothing.
996 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
997 enum iser_data_dir cmd_dir)
999 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1005 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
1007 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
1009 iser_err("ib_fmr_pool_unmap failed %d\n", ret);
1014 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
1015 enum iser_data_dir cmd_dir)
1017 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1018 struct iser_conn *iser_conn = iser_task->iser_conn;
1019 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1020 struct fast_reg_descriptor *desc = reg->mem_h;
1027 spin_lock_bh(&ib_conn->lock);
1028 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
1029 spin_unlock_bh(&ib_conn->lock);
1032 int iser_post_recvl(struct iser_conn *iser_conn)
1034 struct ib_recv_wr rx_wr, *rx_wr_failed;
1035 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1039 sge.addr = iser_conn->login_resp_dma;
1040 sge.length = ISER_RX_LOGIN_SIZE;
1041 sge.lkey = ib_conn->device->mr->lkey;
1043 rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf;
1044 rx_wr.sg_list = &sge;
1048 ib_conn->post_recv_buf_count++;
1049 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1051 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1052 ib_conn->post_recv_buf_count--;
1057 int iser_post_recvm(struct iser_conn *iser_conn, int count)
1059 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1061 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1062 unsigned int my_rx_head = iser_conn->rx_desc_head;
1063 struct iser_rx_desc *rx_desc;
1065 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1066 rx_desc = &iser_conn->rx_descs[my_rx_head];
1067 rx_wr->wr_id = (unsigned long)rx_desc;
1068 rx_wr->sg_list = &rx_desc->rx_sg;
1070 rx_wr->next = rx_wr + 1;
1071 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1075 rx_wr->next = NULL; /* mark end of work requests list */
1077 ib_conn->post_recv_buf_count += count;
1078 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1080 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1081 ib_conn->post_recv_buf_count -= count;
1083 iser_conn->rx_desc_head = my_rx_head;
1089 * iser_start_send - Initiate a Send DTO operation
1091 * returns 0 on success, -1 on failure
1093 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
1096 struct ib_send_wr send_wr, *send_wr_failed;
1098 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1099 tx_desc->dma_addr, ISER_HEADERS_LEN,
1102 send_wr.next = NULL;
1103 send_wr.wr_id = (unsigned long)tx_desc;
1104 send_wr.sg_list = tx_desc->tx_sg;
1105 send_wr.num_sge = tx_desc->num_sge;
1106 send_wr.opcode = IB_WR_SEND;
1107 send_wr.send_flags = IB_SEND_SIGNALED;
1109 atomic_inc(&ib_conn->post_send_buf_count);
1111 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1113 iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1114 atomic_dec(&ib_conn->post_send_buf_count);
1120 * is_iser_tx_desc - Indicate if the completion wr_id
1121 * is a TX descriptor or not.
1122 * @iser_conn: iser connection
1123 * @wr_id: completion WR identifier
1125 * Since we cannot rely on wc opcode in FLUSH errors
1126 * we must work around it by checking if the wr_id address
1127 * falls in the iser connection rx_descs buffer. If so
1128 * it is an RX descriptor, otherwize it is a TX.
1131 is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
1133 void *start = iser_conn->rx_descs;
1134 int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);
1136 if (wr_id >= start && wr_id < start + len)
1143 * iser_handle_comp_error() - Handle error completion
1144 * @ib_conn: connection RDMA resources
1145 * @wc: work completion
1147 * Notes: We may handle a FLUSH error completion and in this case
1148 * we only cleanup in case TX type was DATAOUT. For non-FLUSH
1149 * error completion we should also notify iscsi layer that
1150 * connection is failed (in case we passed bind stage).
1153 iser_handle_comp_error(struct ib_conn *ib_conn,
1156 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
1159 if (wc->status != IB_WC_WR_FLUSH_ERR)
1160 if (iser_conn->iscsi_conn)
1161 iscsi_conn_failure(iser_conn->iscsi_conn,
1162 ISCSI_ERR_CONN_FAILED);
1164 if (is_iser_tx_desc(iser_conn, (void *)wc->wr_id)) {
1165 struct iser_tx_desc *desc = (struct iser_tx_desc *)wc->wr_id;
1167 atomic_dec(&ib_conn->post_send_buf_count);
1168 if (desc->type == ISCSI_TX_DATAOUT)
1169 kmem_cache_free(ig.desc_cache, desc);
1171 ib_conn->post_recv_buf_count--;
1176 * iser_handle_wc - handle a single work completion
1177 * @wc: work completion
1179 * Soft-IRQ context, work completion can be either
1180 * SEND or RECV, and can turn out successful or
1181 * with error (or flush error).
1183 static void iser_handle_wc(struct ib_wc *wc)
1185 struct ib_conn *ib_conn;
1186 struct iser_tx_desc *tx_desc;
1187 struct iser_rx_desc *rx_desc;
1189 ib_conn = wc->qp->qp_context;
1190 if (wc->status == IB_WC_SUCCESS) {
1191 if (wc->opcode == IB_WC_RECV) {
1192 rx_desc = (struct iser_rx_desc *)wc->wr_id;
1193 iser_rcv_completion(rx_desc, wc->byte_len,
1196 if (wc->opcode == IB_WC_SEND) {
1197 tx_desc = (struct iser_tx_desc *)wc->wr_id;
1198 iser_snd_completion(tx_desc, ib_conn);
1199 atomic_dec(&ib_conn->post_send_buf_count);
1201 iser_err("Unknown wc opcode %d\n", wc->opcode);
1204 if (wc->status != IB_WC_WR_FLUSH_ERR)
1205 iser_err("wr id %llx status %d vend_err %x\n",
1206 wc->wr_id, wc->status, wc->vendor_err);
1208 iser_dbg("flush error: wr id %llx\n", wc->wr_id);
1210 if (wc->wr_id != ISER_FASTREG_LI_WRID)
1211 iser_handle_comp_error(ib_conn, wc);
1213 /* complete in case all flush errors were consumed */
1214 if (ib_conn->post_recv_buf_count == 0 &&
1215 atomic_read(&ib_conn->post_send_buf_count) == 0)
1216 complete(&ib_conn->flush_comp);
1221 * iser_cq_tasklet_fn - iSER completion polling loop
1222 * @data: iSER completion context
1224 * Soft-IRQ context, polling connection CQ until
1225 * either CQ was empty or we exausted polling budget
1227 static void iser_cq_tasklet_fn(unsigned long data)
1229 struct iser_comp *comp = (struct iser_comp *)data;
1230 struct ib_cq *cq = comp->cq;
1234 while (ib_poll_cq(cq, 1, &wc) == 1) {
1235 iser_handle_wc(&wc);
1237 if (++completed >= iser_cq_poll_limit)
1242 * It is assumed here that arming CQ only once its empty
1243 * would not cause interrupts to be missed.
1245 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1247 iser_dbg("got %d completions\n", completed);
1250 static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
1252 struct iser_comp *comp = cq_context;
1254 tasklet_schedule(&comp->tasklet);
1257 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1258 enum iser_data_dir cmd_dir, sector_t *sector)
1260 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1261 struct fast_reg_descriptor *desc = reg->mem_h;
1262 unsigned long sector_size = iser_task->sc->device->sector_size;
1263 struct ib_mr_status mr_status;
1266 if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
1267 desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
1268 ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
1269 IB_MR_CHECK_SIG_STATUS, &mr_status);
1271 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1275 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1276 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1278 do_div(sector_off, sector_size + 8);
1279 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1281 pr_err("PI error found type %d at sector %llx "
1282 "expected %x vs actual %x\n",
1283 mr_status.sig_err.err_type,
1284 (unsigned long long)*sector,
1285 mr_status.sig_err.expected,
1286 mr_status.sig_err.actual);
1288 switch (mr_status.sig_err.err_type) {
1289 case IB_SIG_BAD_GUARD:
1291 case IB_SIG_BAD_REFTAG:
1293 case IB_SIG_BAD_APPTAG:
1301 /* Not alot we can do here, return ambiguous guard error */