1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 int isert_debug_level = 0;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
53 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
56 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
58 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
61 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 isert_rdma_post_recvl(struct isert_conn *isert_conn);
65 isert_rdma_accept(struct isert_conn *isert_conn);
66 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
69 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
71 return (conn->pi_support &&
72 cmd->prot_op != TARGET_PROT_NORMAL);
77 isert_qp_event_callback(struct ib_event *e, void *context)
79 struct isert_conn *isert_conn = (struct isert_conn *)context;
81 isert_err("isert_qp_event_callback event: %d\n", e->event);
83 case IB_EVENT_COMM_EST:
84 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
86 case IB_EVENT_QP_LAST_WQE_REACHED:
87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
95 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
99 ret = ib_query_device(ib_dev, devattr);
101 isert_err("ib_query_device() failed: %d\n", ret);
104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
111 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
113 struct isert_device *device = isert_conn->conn_device;
114 struct ib_qp_init_attr attr;
115 struct isert_comp *comp;
118 mutex_lock(&device_list_mutex);
119 for (i = 0; i < device->comps_used; i++)
120 if (device->comps[i].active_qps <
121 device->comps[min].active_qps)
123 comp = &device->comps[min];
125 isert_info("conn %p, using comp %p min_index: %d\n",
126 isert_conn, comp, min);
127 mutex_unlock(&device_list_mutex);
129 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
130 attr.event_handler = isert_qp_event_callback;
131 attr.qp_context = isert_conn;
132 attr.send_cq = comp->cq;
133 attr.recv_cq = comp->cq;
134 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
135 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
137 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
138 * work-around for RDMA_READs with ConnectX-2.
140 * Also, still make sure to have at least two SGEs for
141 * outgoing control PDU responses.
143 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
144 isert_conn->max_sge = attr.cap.max_send_sge;
146 attr.cap.max_recv_sge = 1;
147 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
148 attr.qp_type = IB_QPT_RC;
149 if (device->pi_capable)
150 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
152 isert_dbg("isert_conn_setup_qp cma_id->device: %p\n",
154 isert_dbg("isert_conn_setup_qp conn_pd->device: %p\n",
155 isert_conn->conn_pd->device);
157 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
159 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
162 isert_conn->conn_qp = cma_id->qp;
163 isert_dbg("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
167 mutex_lock(&device_list_mutex);
169 mutex_unlock(&device_list_mutex);
175 isert_cq_event_callback(struct ib_event *e, void *context)
177 isert_dbg("isert_cq_event_callback event: %d\n", e->event);
181 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
183 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
184 struct iser_rx_desc *rx_desc;
185 struct ib_sge *rx_sg;
189 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
190 sizeof(struct iser_rx_desc), GFP_KERNEL);
191 if (!isert_conn->conn_rx_descs)
194 rx_desc = isert_conn->conn_rx_descs;
196 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
197 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
198 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
199 if (ib_dma_mapping_error(ib_dev, dma_addr))
202 rx_desc->dma_addr = dma_addr;
204 rx_sg = &rx_desc->rx_sg;
205 rx_sg->addr = rx_desc->dma_addr;
206 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
207 rx_sg->lkey = isert_conn->conn_mr->lkey;
210 isert_conn->conn_rx_desc_head = 0;
214 rx_desc = isert_conn->conn_rx_descs;
215 for (j = 0; j < i; j++, rx_desc++) {
216 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
217 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
219 kfree(isert_conn->conn_rx_descs);
220 isert_conn->conn_rx_descs = NULL;
226 isert_free_rx_descriptors(struct isert_conn *isert_conn)
228 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
229 struct iser_rx_desc *rx_desc;
232 if (!isert_conn->conn_rx_descs)
235 rx_desc = isert_conn->conn_rx_descs;
236 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
237 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
238 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
241 kfree(isert_conn->conn_rx_descs);
242 isert_conn->conn_rx_descs = NULL;
245 static void isert_cq_work(struct work_struct *);
246 static void isert_cq_callback(struct ib_cq *, void *);
249 isert_create_device_ib_res(struct isert_device *device)
251 struct ib_device *ib_dev = device->ib_device;
252 struct ib_device_attr *dev_attr;
256 dev_attr = &device->dev_attr;
257 ret = isert_query_device(ib_dev, dev_attr);
261 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
263 /* asign function handlers */
264 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
265 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
266 device->use_fastreg = 1;
267 device->reg_rdma_mem = isert_reg_rdma;
268 device->unreg_rdma_mem = isert_unreg_rdma;
270 device->use_fastreg = 0;
271 device->reg_rdma_mem = isert_map_rdma;
272 device->unreg_rdma_mem = isert_unmap_cmd;
275 /* Check signature cap */
276 device->pi_capable = dev_attr->device_cap_flags &
277 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
279 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
280 device->ib_device->num_comp_vectors));
281 isert_info("Using %d CQs, %s supports %d vectors support "
282 "Fast registration %d pi_capable %d\n",
283 device->comps_used, device->ib_device->name,
284 device->ib_device->num_comp_vectors, device->use_fastreg,
287 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
289 if (!device->comps) {
290 isert_err("Unable to allocate completion contexts\n");
294 for (i = 0; i < device->comps_used; i++) {
295 struct isert_comp *comp = &device->comps[i];
297 comp->device = device;
298 INIT_WORK(&comp->work, isert_cq_work);
299 comp->cq = ib_create_cq(device->ib_device,
301 isert_cq_event_callback,
304 if (IS_ERR(comp->cq)) {
305 ret = PTR_ERR(comp->cq);
310 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
318 for (i = 0; i < device->comps_used; i++) {
319 struct isert_comp *comp = &device->comps[i];
322 cancel_work_sync(&comp->work);
323 ib_destroy_cq(comp->cq);
326 kfree(device->comps);
332 isert_free_device_ib_res(struct isert_device *device)
336 isert_info("device %p\n", device);
338 for (i = 0; i < device->comps_used; i++) {
339 struct isert_comp *comp = &device->comps[i];
341 cancel_work_sync(&comp->work);
342 ib_destroy_cq(comp->cq);
345 kfree(device->comps);
349 isert_device_try_release(struct isert_device *device)
351 mutex_lock(&device_list_mutex);
353 if (!device->refcount) {
354 isert_free_device_ib_res(device);
355 list_del(&device->dev_node);
358 mutex_unlock(&device_list_mutex);
361 static struct isert_device *
362 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
364 struct isert_device *device;
367 mutex_lock(&device_list_mutex);
368 list_for_each_entry(device, &device_list, dev_node) {
369 if (device->ib_device->node_guid == cma_id->device->node_guid) {
371 mutex_unlock(&device_list_mutex);
376 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
378 mutex_unlock(&device_list_mutex);
379 return ERR_PTR(-ENOMEM);
382 INIT_LIST_HEAD(&device->dev_node);
384 device->ib_device = cma_id->device;
385 ret = isert_create_device_ib_res(device);
388 mutex_unlock(&device_list_mutex);
393 list_add_tail(&device->dev_node, &device_list);
394 mutex_unlock(&device_list_mutex);
400 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
402 struct fast_reg_descriptor *fr_desc, *tmp;
405 if (list_empty(&isert_conn->conn_fr_pool))
408 isert_dbg("Freeing conn %p fastreg pool", isert_conn);
410 list_for_each_entry_safe(fr_desc, tmp,
411 &isert_conn->conn_fr_pool, list) {
412 list_del(&fr_desc->list);
413 ib_free_fast_reg_page_list(fr_desc->data_frpl);
414 ib_dereg_mr(fr_desc->data_mr);
415 if (fr_desc->pi_ctx) {
416 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
417 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
418 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
419 kfree(fr_desc->pi_ctx);
425 if (i < isert_conn->conn_fr_pool_size)
426 isert_warn("Pool still has %d regions registered\n",
427 isert_conn->conn_fr_pool_size - i);
431 isert_create_pi_ctx(struct fast_reg_descriptor *desc,
432 struct ib_device *device,
435 struct ib_mr_init_attr mr_init_attr;
436 struct pi_context *pi_ctx;
439 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
441 isert_err("Failed to allocate pi context\n");
445 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
446 ISCSI_ISER_SG_TABLESIZE);
447 if (IS_ERR(pi_ctx->prot_frpl)) {
448 isert_err("Failed to allocate prot frpl err=%ld\n",
449 PTR_ERR(pi_ctx->prot_frpl));
450 ret = PTR_ERR(pi_ctx->prot_frpl);
454 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
455 if (IS_ERR(pi_ctx->prot_mr)) {
456 isert_err("Failed to allocate prot frmr err=%ld\n",
457 PTR_ERR(pi_ctx->prot_mr));
458 ret = PTR_ERR(pi_ctx->prot_mr);
461 desc->ind |= ISERT_PROT_KEY_VALID;
463 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
464 mr_init_attr.max_reg_descriptors = 2;
465 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
466 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
467 if (IS_ERR(pi_ctx->sig_mr)) {
468 isert_err("Failed to allocate signature enabled mr err=%ld\n",
469 PTR_ERR(pi_ctx->sig_mr));
470 ret = PTR_ERR(pi_ctx->sig_mr);
474 desc->pi_ctx = pi_ctx;
475 desc->ind |= ISERT_SIG_KEY_VALID;
476 desc->ind &= ~ISERT_PROTECTED;
481 ib_dereg_mr(desc->pi_ctx->prot_mr);
483 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
491 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
492 struct fast_reg_descriptor *fr_desc)
496 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
497 ISCSI_ISER_SG_TABLESIZE);
498 if (IS_ERR(fr_desc->data_frpl)) {
499 isert_err("Failed to allocate data frpl err=%ld\n",
500 PTR_ERR(fr_desc->data_frpl));
501 return PTR_ERR(fr_desc->data_frpl);
504 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
505 if (IS_ERR(fr_desc->data_mr)) {
506 isert_err("Failed to allocate data frmr err=%ld\n",
507 PTR_ERR(fr_desc->data_mr));
508 ret = PTR_ERR(fr_desc->data_mr);
511 fr_desc->ind |= ISERT_DATA_KEY_VALID;
513 isert_dbg("Created fr_desc %p\n", fr_desc);
518 ib_free_fast_reg_page_list(fr_desc->data_frpl);
524 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
526 struct fast_reg_descriptor *fr_desc;
527 struct isert_device *device = isert_conn->conn_device;
528 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
529 struct se_node_acl *se_nacl = se_sess->se_node_acl;
532 * Setup the number of FRMRs based upon the number of tags
533 * available to session in iscsi_target_locate_portal().
535 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
536 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
538 isert_conn->conn_fr_pool_size = 0;
539 for (i = 0; i < tag_num; i++) {
540 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
542 isert_err("Failed to allocate fast_reg descriptor\n");
547 ret = isert_create_fr_desc(device->ib_device,
548 isert_conn->conn_pd, fr_desc);
550 isert_err("Failed to create fastreg descriptor err=%d\n",
556 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
557 isert_conn->conn_fr_pool_size++;
560 isert_dbg("Creating conn %p fastreg pool size=%d",
561 isert_conn, isert_conn->conn_fr_pool_size);
566 isert_conn_free_fastreg_pool(isert_conn);
571 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
573 struct isert_np *isert_np = cma_id->context;
574 struct iscsi_np *np = isert_np->np;
575 struct isert_conn *isert_conn;
576 struct isert_device *device;
577 struct ib_device *ib_dev = cma_id->device;
580 spin_lock_bh(&np->np_thread_lock);
582 spin_unlock_bh(&np->np_thread_lock);
583 isert_dbg("iscsi_np is not enabled, reject connect request\n");
584 return rdma_reject(cma_id, NULL, 0);
586 spin_unlock_bh(&np->np_thread_lock);
588 isert_dbg("Entering isert_connect_request cma_id: %p, context: %p\n",
589 cma_id, cma_id->context);
591 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
593 isert_err("Unable to allocate isert_conn\n");
596 isert_conn->state = ISER_CONN_INIT;
597 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
598 init_completion(&isert_conn->conn_login_comp);
599 init_completion(&isert_conn->login_req_comp);
600 init_completion(&isert_conn->conn_wait);
601 kref_init(&isert_conn->conn_kref);
602 mutex_init(&isert_conn->conn_mutex);
603 spin_lock_init(&isert_conn->conn_lock);
604 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
606 isert_conn->conn_cm_id = cma_id;
608 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
609 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
610 if (!isert_conn->login_buf) {
611 isert_err("Unable to allocate isert_conn->login_buf\n");
616 isert_conn->login_req_buf = isert_conn->login_buf;
617 isert_conn->login_rsp_buf = isert_conn->login_buf +
618 ISCSI_DEF_MAX_RECV_SEG_LEN;
619 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
620 isert_conn->login_buf, isert_conn->login_req_buf,
621 isert_conn->login_rsp_buf);
623 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
624 (void *)isert_conn->login_req_buf,
625 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
627 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
629 isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
631 isert_conn->login_req_dma = 0;
635 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
636 (void *)isert_conn->login_rsp_buf,
637 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
639 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
641 isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
643 isert_conn->login_rsp_dma = 0;
644 goto out_req_dma_map;
647 device = isert_device_find_by_ib_dev(cma_id);
648 if (IS_ERR(device)) {
649 ret = PTR_ERR(device);
650 goto out_rsp_dma_map;
653 /* Set max inflight RDMA READ requests */
654 isert_conn->initiator_depth = min_t(u8,
655 event->param.conn.initiator_depth,
656 device->dev_attr.max_qp_init_rd_atom);
657 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
659 isert_conn->conn_device = device;
660 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
661 if (IS_ERR(isert_conn->conn_pd)) {
662 ret = PTR_ERR(isert_conn->conn_pd);
663 isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
668 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
669 IB_ACCESS_LOCAL_WRITE);
670 if (IS_ERR(isert_conn->conn_mr)) {
671 ret = PTR_ERR(isert_conn->conn_mr);
672 isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
677 ret = isert_conn_setup_qp(isert_conn, cma_id);
681 ret = isert_rdma_post_recvl(isert_conn);
685 ret = isert_rdma_accept(isert_conn);
689 mutex_lock(&isert_np->np_accept_mutex);
690 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
691 mutex_unlock(&isert_np->np_accept_mutex);
693 isert_info("np %p: Allow accept_np to continue\n", np);
694 up(&isert_np->np_sem);
698 ib_dereg_mr(isert_conn->conn_mr);
700 ib_dealloc_pd(isert_conn->conn_pd);
702 isert_device_try_release(device);
704 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
705 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
707 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
708 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
710 kfree(isert_conn->login_buf);
713 rdma_reject(cma_id, NULL, 0);
718 isert_connect_release(struct isert_conn *isert_conn)
720 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
721 struct isert_device *device = isert_conn->conn_device;
723 isert_dbg("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
725 if (device && device->use_fastreg)
726 isert_conn_free_fastreg_pool(isert_conn);
728 isert_free_rx_descriptors(isert_conn);
729 rdma_destroy_id(isert_conn->conn_cm_id);
731 if (isert_conn->conn_qp) {
732 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
734 isert_dbg("dec completion context %p active_qps\n", comp);
735 mutex_lock(&device_list_mutex);
737 mutex_unlock(&device_list_mutex);
739 ib_destroy_qp(isert_conn->conn_qp);
742 ib_dereg_mr(isert_conn->conn_mr);
743 ib_dealloc_pd(isert_conn->conn_pd);
745 if (isert_conn->login_buf) {
746 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
747 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
748 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
749 ISCSI_DEF_MAX_RECV_SEG_LEN,
751 kfree(isert_conn->login_buf);
756 isert_device_try_release(device);
758 isert_dbg("Leaving isert_connect_release >>>>>>>>>>>>\n");
762 isert_connected_handler(struct rdma_cm_id *cma_id)
764 struct isert_conn *isert_conn = cma_id->qp->qp_context;
766 isert_info("conn %p\n", isert_conn);
768 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
769 isert_warn("conn %p connect_release is running\n", isert_conn);
773 mutex_lock(&isert_conn->conn_mutex);
774 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
775 isert_conn->state = ISER_CONN_UP;
776 mutex_unlock(&isert_conn->conn_mutex);
780 isert_release_conn_kref(struct kref *kref)
782 struct isert_conn *isert_conn = container_of(kref,
783 struct isert_conn, conn_kref);
785 isert_dbg("Calling isert_connect_release for final kref %s/%d\n",
786 current->comm, current->pid);
788 isert_connect_release(isert_conn);
792 isert_put_conn(struct isert_conn *isert_conn)
794 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
798 * isert_conn_terminate() - Initiate connection termination
799 * @isert_conn: isert connection struct
802 * In case the connection state is FULL_FEATURE, move state
803 * to TEMINATING and start teardown sequence (rdma_disconnect).
804 * In case the connection state is UP, complete flush as well.
806 * This routine must be called with conn_mutex held. Thus it is
807 * safe to call multiple times.
810 isert_conn_terminate(struct isert_conn *isert_conn)
814 switch (isert_conn->state) {
815 case ISER_CONN_TERMINATING:
818 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
819 isert_info("Terminating conn %p state %d\n",
820 isert_conn, isert_conn->state);
821 isert_conn->state = ISER_CONN_TERMINATING;
822 err = rdma_disconnect(isert_conn->conn_cm_id);
824 isert_warn("Failed rdma_disconnect isert_conn %p\n",
828 isert_warn("conn %p teminating in state %d\n",
829 isert_conn, isert_conn->state);
834 isert_np_cma_handler(struct isert_np *isert_np,
835 enum rdma_cm_event_type event)
837 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
840 case RDMA_CM_EVENT_DEVICE_REMOVAL:
841 isert_np->np_cm_id = NULL;
843 case RDMA_CM_EVENT_ADDR_CHANGE:
844 isert_np->np_cm_id = isert_setup_id(isert_np);
845 if (IS_ERR(isert_np->np_cm_id)) {
846 isert_err("isert np %p setup id failed: %ld\n",
847 isert_np, PTR_ERR(isert_np->np_cm_id));
848 isert_np->np_cm_id = NULL;
852 isert_err("isert np %p Unexpected event %d\n",
860 isert_disconnected_handler(struct rdma_cm_id *cma_id,
861 enum rdma_cm_event_type event)
863 struct isert_np *isert_np = cma_id->context;
864 struct isert_conn *isert_conn;
866 if (isert_np->np_cm_id == cma_id)
867 return isert_np_cma_handler(cma_id->context, event);
869 isert_conn = cma_id->qp->qp_context;
871 mutex_lock(&isert_conn->conn_mutex);
872 isert_conn_terminate(isert_conn);
873 mutex_unlock(&isert_conn->conn_mutex);
875 isert_info("conn %p completing conn_wait\n", isert_conn);
876 complete(&isert_conn->conn_wait);
882 isert_connect_error(struct rdma_cm_id *cma_id)
884 struct isert_conn *isert_conn = cma_id->qp->qp_context;
886 isert_put_conn(isert_conn);
890 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
894 isert_dbg("isert_cma_handler: event %d status %d conn %p id %p\n",
895 event->event, event->status, cma_id->context, cma_id);
897 switch (event->event) {
898 case RDMA_CM_EVENT_CONNECT_REQUEST:
899 ret = isert_connect_request(cma_id, event);
901 isert_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
904 case RDMA_CM_EVENT_ESTABLISHED:
905 isert_connected_handler(cma_id);
907 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
908 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
909 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
910 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
911 ret = isert_disconnected_handler(cma_id, event->event);
913 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
914 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
915 case RDMA_CM_EVENT_CONNECT_ERROR:
916 isert_connect_error(cma_id);
919 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
927 isert_post_recv(struct isert_conn *isert_conn, u32 count)
929 struct ib_recv_wr *rx_wr, *rx_wr_failed;
931 unsigned int rx_head = isert_conn->conn_rx_desc_head;
932 struct iser_rx_desc *rx_desc;
934 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
935 rx_desc = &isert_conn->conn_rx_descs[rx_head];
936 rx_wr->wr_id = (uintptr_t)rx_desc;
937 rx_wr->sg_list = &rx_desc->rx_sg;
939 rx_wr->next = rx_wr + 1;
940 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
944 rx_wr->next = NULL; /* mark end of work requests list */
946 isert_conn->post_recv_buf_count += count;
947 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
950 isert_err("ib_post_recv() failed with ret: %d\n", ret);
951 isert_conn->post_recv_buf_count -= count;
953 isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
954 isert_conn->conn_rx_desc_head = rx_head;
960 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
962 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
963 struct ib_send_wr send_wr, *send_wr_failed;
966 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
967 ISER_HEADERS_LEN, DMA_TO_DEVICE);
970 send_wr.wr_id = (uintptr_t)tx_desc;
971 send_wr.sg_list = tx_desc->tx_sg;
972 send_wr.num_sge = tx_desc->num_sge;
973 send_wr.opcode = IB_WR_SEND;
974 send_wr.send_flags = IB_SEND_SIGNALED;
976 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
978 isert_err("ib_post_send() failed, ret: %d\n", ret);
984 isert_create_send_desc(struct isert_conn *isert_conn,
985 struct isert_cmd *isert_cmd,
986 struct iser_tx_desc *tx_desc)
988 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
990 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
991 ISER_HEADERS_LEN, DMA_TO_DEVICE);
993 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
994 tx_desc->iser_header.flags = ISER_VER;
996 tx_desc->num_sge = 1;
997 tx_desc->isert_cmd = isert_cmd;
999 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
1000 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1001 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1006 isert_init_tx_hdrs(struct isert_conn *isert_conn,
1007 struct iser_tx_desc *tx_desc)
1009 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1012 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1013 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1014 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1015 isert_err("ib_dma_mapping_error() failed\n");
1019 tx_desc->dma_addr = dma_addr;
1020 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1021 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1022 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1024 isert_dbg("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
1025 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
1026 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
1032 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1033 struct ib_send_wr *send_wr)
1035 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1037 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1038 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
1039 send_wr->opcode = IB_WR_SEND;
1040 send_wr->sg_list = &tx_desc->tx_sg[0];
1041 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
1042 send_wr->send_flags = IB_SEND_SIGNALED;
1046 isert_rdma_post_recvl(struct isert_conn *isert_conn)
1048 struct ib_recv_wr rx_wr, *rx_wr_fail;
1052 memset(&sge, 0, sizeof(struct ib_sge));
1053 sge.addr = isert_conn->login_req_dma;
1054 sge.length = ISER_RX_LOGIN_SIZE;
1055 sge.lkey = isert_conn->conn_mr->lkey;
1057 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1058 sge.addr, sge.length, sge.lkey);
1060 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1061 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1062 rx_wr.sg_list = &sge;
1065 isert_conn->post_recv_buf_count++;
1066 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1068 isert_err("ib_post_recv() failed: %d\n", ret);
1069 isert_conn->post_recv_buf_count--;
1072 isert_dbg("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1077 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1080 struct isert_conn *isert_conn = conn->context;
1081 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1082 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1085 isert_create_send_desc(isert_conn, NULL, tx_desc);
1087 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1088 sizeof(struct iscsi_hdr));
1090 isert_init_tx_hdrs(isert_conn, tx_desc);
1093 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1095 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1096 length, DMA_TO_DEVICE);
1098 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1100 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1101 length, DMA_TO_DEVICE);
1103 tx_dsg->addr = isert_conn->login_rsp_dma;
1104 tx_dsg->length = length;
1105 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1106 tx_desc->num_sge = 2;
1108 if (!login->login_failed) {
1109 if (login->login_complete) {
1110 if (!conn->sess->sess_ops->SessionType &&
1111 isert_conn->conn_device->use_fastreg) {
1112 ret = isert_conn_create_fastreg_pool(isert_conn);
1114 isert_err("Conn: %p failed to create"
1115 " fastreg pool\n", isert_conn);
1120 ret = isert_alloc_rx_descriptors(isert_conn);
1124 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1128 /* Now we are in FULL_FEATURE phase */
1129 mutex_lock(&isert_conn->conn_mutex);
1130 isert_conn->state = ISER_CONN_FULL_FEATURE;
1131 mutex_unlock(&isert_conn->conn_mutex);
1135 ret = isert_rdma_post_recvl(isert_conn);
1140 ret = isert_post_send(isert_conn, tx_desc);
1148 isert_rx_login_req(struct isert_conn *isert_conn)
1150 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1151 int rx_buflen = isert_conn->login_req_len;
1152 struct iscsi_conn *conn = isert_conn->conn;
1153 struct iscsi_login *login = conn->conn_login;
1156 isert_info("conn %p\n", isert_conn);
1158 WARN_ON_ONCE(!login);
1160 if (login->first_request) {
1161 struct iscsi_login_req *login_req =
1162 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1164 * Setup the initial iscsi_login values from the leading
1165 * login request PDU.
1167 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1168 login->current_stage =
1169 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1171 login->version_min = login_req->min_version;
1172 login->version_max = login_req->max_version;
1173 memcpy(login->isid, login_req->isid, 6);
1174 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1175 login->init_task_tag = login_req->itt;
1176 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1177 login->cid = be16_to_cpu(login_req->cid);
1178 login->tsih = be16_to_cpu(login_req->tsih);
1181 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1183 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1184 isert_dbg("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1185 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1186 memcpy(login->req_buf, &rx_desc->data[0], size);
1188 if (login->first_request) {
1189 complete(&isert_conn->conn_login_comp);
1192 schedule_delayed_work(&conn->login_work, 0);
1195 static struct iscsi_cmd
1196 *isert_allocate_cmd(struct iscsi_conn *conn)
1198 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1199 struct isert_cmd *isert_cmd;
1200 struct iscsi_cmd *cmd;
1202 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1204 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1207 isert_cmd = iscsit_priv_cmd(cmd);
1208 isert_cmd->conn = isert_conn;
1209 isert_cmd->iscsi_cmd = cmd;
1215 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1216 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1217 struct iser_rx_desc *rx_desc, unsigned char *buf)
1219 struct iscsi_conn *conn = isert_conn->conn;
1220 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1221 struct scatterlist *sg;
1222 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1223 bool dump_payload = false;
1225 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1229 imm_data = cmd->immediate_data;
1230 imm_data_len = cmd->first_burst_len;
1231 unsol_data = cmd->unsolicited_data;
1233 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1236 } else if (rc > 0) {
1237 dump_payload = true;
1244 sg = &cmd->se_cmd.t_data_sg[0];
1245 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1247 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1248 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1250 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1252 cmd->write_data_done += imm_data_len;
1254 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1255 spin_lock_bh(&cmd->istate_lock);
1256 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1257 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1258 spin_unlock_bh(&cmd->istate_lock);
1262 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1264 if (!rc && dump_payload == false && unsol_data)
1265 iscsit_set_unsoliticed_dataout(cmd);
1266 else if (dump_payload && imm_data)
1267 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1273 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1274 struct iser_rx_desc *rx_desc, unsigned char *buf)
1276 struct scatterlist *sg_start;
1277 struct iscsi_conn *conn = isert_conn->conn;
1278 struct iscsi_cmd *cmd = NULL;
1279 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1280 u32 unsol_data_len = ntoh24(hdr->dlength);
1281 int rc, sg_nents, sg_off, page_off;
1283 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1289 * FIXME: Unexpected unsolicited_data out
1291 if (!cmd->unsolicited_data) {
1292 isert_err("Received unexpected solicited data payload\n");
1297 isert_dbg("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1298 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1300 sg_off = cmd->write_data_done / PAGE_SIZE;
1301 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1302 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1303 page_off = cmd->write_data_done % PAGE_SIZE;
1305 * FIXME: Non page-aligned unsolicited_data out
1308 isert_err("Received unexpected non-page aligned data payload\n");
1312 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1313 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1315 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1318 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1326 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1327 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1330 struct iscsi_conn *conn = isert_conn->conn;
1331 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1334 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1338 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1341 return iscsit_process_nop_out(conn, cmd, hdr);
1345 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1346 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1347 struct iscsi_text *hdr)
1349 struct iscsi_conn *conn = isert_conn->conn;
1350 u32 payload_length = ntoh24(hdr->dlength);
1352 unsigned char *text_in;
1354 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1358 text_in = kzalloc(payload_length, GFP_KERNEL);
1360 isert_err("Unable to allocate text_in of payload_length: %u\n",
1364 cmd->text_in_ptr = text_in;
1366 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1368 return iscsit_process_text_cmd(conn, cmd, hdr);
1372 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1373 uint32_t read_stag, uint64_t read_va,
1374 uint32_t write_stag, uint64_t write_va)
1376 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1377 struct iscsi_conn *conn = isert_conn->conn;
1378 struct iscsi_session *sess = conn->sess;
1379 struct iscsi_cmd *cmd;
1380 struct isert_cmd *isert_cmd;
1382 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1384 if (sess->sess_ops->SessionType &&
1385 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1386 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1387 " ignoring\n", opcode);
1392 case ISCSI_OP_SCSI_CMD:
1393 cmd = isert_allocate_cmd(conn);
1397 isert_cmd = iscsit_priv_cmd(cmd);
1398 isert_cmd->read_stag = read_stag;
1399 isert_cmd->read_va = read_va;
1400 isert_cmd->write_stag = write_stag;
1401 isert_cmd->write_va = write_va;
1403 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1404 rx_desc, (unsigned char *)hdr);
1406 case ISCSI_OP_NOOP_OUT:
1407 cmd = isert_allocate_cmd(conn);
1411 isert_cmd = iscsit_priv_cmd(cmd);
1412 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1413 rx_desc, (unsigned char *)hdr);
1415 case ISCSI_OP_SCSI_DATA_OUT:
1416 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1417 (unsigned char *)hdr);
1419 case ISCSI_OP_SCSI_TMFUNC:
1420 cmd = isert_allocate_cmd(conn);
1424 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1425 (unsigned char *)hdr);
1427 case ISCSI_OP_LOGOUT:
1428 cmd = isert_allocate_cmd(conn);
1432 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1435 cmd = isert_allocate_cmd(conn);
1439 isert_cmd = iscsit_priv_cmd(cmd);
1440 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1441 rx_desc, (struct iscsi_text *)hdr);
1444 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1453 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1455 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1456 uint64_t read_va = 0, write_va = 0;
1457 uint32_t read_stag = 0, write_stag = 0;
1460 switch (iser_hdr->flags & 0xF0) {
1462 if (iser_hdr->flags & ISER_RSV) {
1463 read_stag = be32_to_cpu(iser_hdr->read_stag);
1464 read_va = be64_to_cpu(iser_hdr->read_va);
1465 isert_dbg("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1466 read_stag, (unsigned long long)read_va);
1468 if (iser_hdr->flags & ISER_WSV) {
1469 write_stag = be32_to_cpu(iser_hdr->write_stag);
1470 write_va = be64_to_cpu(iser_hdr->write_va);
1471 isert_dbg("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1472 write_stag, (unsigned long long)write_va);
1475 isert_dbg("ISER ISCSI_CTRL PDU\n");
1478 isert_err("iSER Hello message\n");
1481 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1485 rc = isert_rx_opcode(isert_conn, rx_desc,
1486 read_stag, read_va, write_stag, write_va);
1490 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1493 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1494 struct iscsi_hdr *hdr;
1496 int rx_buflen, outstanding;
1498 if ((char *)desc == isert_conn->login_req_buf) {
1499 rx_dma = isert_conn->login_req_dma;
1500 rx_buflen = ISER_RX_LOGIN_SIZE;
1501 isert_dbg("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1504 rx_dma = desc->dma_addr;
1505 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1506 isert_dbg("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1510 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1512 hdr = &desc->iscsi_header;
1513 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1514 hdr->opcode, hdr->itt, hdr->flags,
1515 (int)(xfer_len - ISER_HEADERS_LEN));
1517 if ((char *)desc == isert_conn->login_req_buf) {
1518 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1519 if (isert_conn->conn) {
1520 struct iscsi_login *login = isert_conn->conn->conn_login;
1522 if (login && !login->first_request)
1523 isert_rx_login_req(isert_conn);
1525 mutex_lock(&isert_conn->conn_mutex);
1526 complete(&isert_conn->login_req_comp);
1527 mutex_unlock(&isert_conn->conn_mutex);
1529 isert_rx_do_work(desc, isert_conn);
1532 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1535 isert_conn->post_recv_buf_count--;
1536 isert_dbg("iSERT: Decremented post_recv_buf_count: %d\n",
1537 isert_conn->post_recv_buf_count);
1539 if ((char *)desc == isert_conn->login_req_buf)
1542 outstanding = isert_conn->post_recv_buf_count;
1543 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1544 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1545 ISERT_MIN_POSTED_RX);
1546 err = isert_post_recv(isert_conn, count);
1548 isert_err("isert_post_recv() count: %d failed, %d\n",
1555 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1556 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1557 enum iser_ib_op_code op, struct isert_data_buf *data)
1559 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1561 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1562 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1564 data->len = length - offset;
1565 data->offset = offset;
1566 data->sg_off = data->offset / PAGE_SIZE;
1568 data->sg = &sg[data->sg_off];
1569 data->nents = min_t(unsigned int, nents - data->sg_off,
1570 ISCSI_ISER_SG_TABLESIZE);
1571 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1574 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1576 if (unlikely(!data->dma_nents)) {
1577 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1581 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1582 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1588 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1590 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1592 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1593 memset(data, 0, sizeof(*data));
1599 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1601 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1603 isert_dbg("isert_unmap_cmd: %p\n", isert_cmd);
1606 isert_dbg("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1607 isert_unmap_data_buf(isert_conn, &wr->data);
1611 isert_dbg("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1617 isert_dbg("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1624 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1626 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1627 LIST_HEAD(unmap_list);
1629 isert_dbg("unreg_fastreg_cmd: %p\n", isert_cmd);
1632 isert_dbg("unreg_fastreg_cmd: %p free fr_desc %p\n",
1633 isert_cmd, wr->fr_desc);
1634 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1635 isert_unmap_data_buf(isert_conn, &wr->prot);
1636 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1638 spin_lock_bh(&isert_conn->conn_lock);
1639 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1640 spin_unlock_bh(&isert_conn->conn_lock);
1645 isert_dbg("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1646 isert_unmap_data_buf(isert_conn, &wr->data);
1654 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1656 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1657 struct isert_conn *isert_conn = isert_cmd->conn;
1658 struct iscsi_conn *conn = isert_conn->conn;
1659 struct isert_device *device = isert_conn->conn_device;
1661 isert_dbg("Entering isert_put_cmd: %p\n", isert_cmd);
1663 switch (cmd->iscsi_opcode) {
1664 case ISCSI_OP_SCSI_CMD:
1665 spin_lock_bh(&conn->cmd_lock);
1666 if (!list_empty(&cmd->i_conn_node))
1667 list_del_init(&cmd->i_conn_node);
1668 spin_unlock_bh(&conn->cmd_lock);
1670 if (cmd->data_direction == DMA_TO_DEVICE) {
1671 iscsit_stop_dataout_timer(cmd);
1673 * Check for special case during comp_err where
1674 * WRITE_PENDING has been handed off from core,
1675 * but requires an extra target_put_sess_cmd()
1676 * before transport_generic_free_cmd() below.
1679 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1680 struct se_cmd *se_cmd = &cmd->se_cmd;
1682 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1686 device->unreg_rdma_mem(isert_cmd, isert_conn);
1687 transport_generic_free_cmd(&cmd->se_cmd, 0);
1689 case ISCSI_OP_SCSI_TMFUNC:
1690 spin_lock_bh(&conn->cmd_lock);
1691 if (!list_empty(&cmd->i_conn_node))
1692 list_del_init(&cmd->i_conn_node);
1693 spin_unlock_bh(&conn->cmd_lock);
1695 transport_generic_free_cmd(&cmd->se_cmd, 0);
1697 case ISCSI_OP_REJECT:
1698 case ISCSI_OP_NOOP_OUT:
1700 spin_lock_bh(&conn->cmd_lock);
1701 if (!list_empty(&cmd->i_conn_node))
1702 list_del_init(&cmd->i_conn_node);
1703 spin_unlock_bh(&conn->cmd_lock);
1706 * Handle special case for REJECT when iscsi_add_reject*() has
1707 * overwritten the original iscsi_opcode assignment, and the
1708 * associated cmd->se_cmd needs to be released.
1710 if (cmd->se_cmd.se_tfo != NULL) {
1711 isert_dbg("Calling transport_generic_free_cmd from"
1712 " isert_put_cmd for 0x%02x\n",
1714 transport_generic_free_cmd(&cmd->se_cmd, 0);
1721 iscsit_release_cmd(cmd);
1727 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1729 if (tx_desc->dma_addr != 0) {
1730 isert_dbg("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1731 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1732 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1733 tx_desc->dma_addr = 0;
1738 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1739 struct ib_device *ib_dev, bool comp_err)
1741 if (isert_cmd->pdu_buf_dma != 0) {
1742 isert_dbg("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1743 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1744 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1745 isert_cmd->pdu_buf_dma = 0;
1748 isert_unmap_tx_desc(tx_desc, ib_dev);
1749 isert_put_cmd(isert_cmd, comp_err);
1753 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1755 struct ib_mr_status mr_status;
1758 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1760 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1761 goto fail_mr_status;
1764 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1766 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1768 switch (mr_status.sig_err.err_type) {
1769 case IB_SIG_BAD_GUARD:
1770 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1772 case IB_SIG_BAD_REFTAG:
1773 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1775 case IB_SIG_BAD_APPTAG:
1776 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1779 sec_offset_err = mr_status.sig_err.sig_err_offset;
1780 do_div(sec_offset_err, block_size);
1781 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1783 isert_err("isert: PI error found type %d at sector 0x%llx "
1784 "expected 0x%x vs actual 0x%x\n",
1785 mr_status.sig_err.err_type,
1786 (unsigned long long)se_cmd->bad_sector,
1787 mr_status.sig_err.expected,
1788 mr_status.sig_err.actual);
1797 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1798 struct isert_cmd *isert_cmd)
1800 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1801 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1802 struct se_cmd *se_cmd = &cmd->se_cmd;
1803 struct isert_conn *isert_conn = isert_cmd->conn;
1804 struct isert_device *device = isert_conn->conn_device;
1807 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1808 ret = isert_check_pi_status(se_cmd,
1809 wr->fr_desc->pi_ctx->sig_mr);
1810 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1813 device->unreg_rdma_mem(isert_cmd, isert_conn);
1814 wr->send_wr_num = 0;
1816 transport_send_check_condition_and_sense(se_cmd,
1819 isert_put_response(isert_conn->conn, cmd);
1823 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1824 struct isert_cmd *isert_cmd)
1826 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1827 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1828 struct se_cmd *se_cmd = &cmd->se_cmd;
1829 struct isert_conn *isert_conn = isert_cmd->conn;
1830 struct isert_device *device = isert_conn->conn_device;
1833 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1834 ret = isert_check_pi_status(se_cmd,
1835 wr->fr_desc->pi_ctx->sig_mr);
1836 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1839 iscsit_stop_dataout_timer(cmd);
1840 device->unreg_rdma_mem(isert_cmd, isert_conn);
1841 cmd->write_data_done = wr->data.len;
1842 wr->send_wr_num = 0;
1844 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1845 spin_lock_bh(&cmd->istate_lock);
1846 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1847 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1848 spin_unlock_bh(&cmd->istate_lock);
1851 transport_send_check_condition_and_sense(se_cmd,
1854 target_execute_cmd(se_cmd);
1858 isert_do_control_comp(struct work_struct *work)
1860 struct isert_cmd *isert_cmd = container_of(work,
1861 struct isert_cmd, comp_work);
1862 struct isert_conn *isert_conn = isert_cmd->conn;
1863 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1864 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1866 switch (cmd->i_state) {
1867 case ISTATE_SEND_TASKMGTRSP:
1868 isert_dbg("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1870 iscsit_tmr_post_handler(cmd, cmd->conn);
1872 cmd->i_state = ISTATE_SENT_STATUS;
1873 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1875 case ISTATE_SEND_REJECT:
1876 isert_dbg("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1878 cmd->i_state = ISTATE_SENT_STATUS;
1879 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1881 case ISTATE_SEND_LOGOUTRSP:
1882 isert_dbg("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1884 iscsit_logout_post_handler(cmd, cmd->conn);
1886 case ISTATE_SEND_TEXTRSP:
1887 cmd->i_state = ISTATE_SENT_STATUS;
1888 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1891 isert_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1898 isert_response_completion(struct iser_tx_desc *tx_desc,
1899 struct isert_cmd *isert_cmd,
1900 struct isert_conn *isert_conn,
1901 struct ib_device *ib_dev)
1903 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1905 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1906 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1907 cmd->i_state == ISTATE_SEND_REJECT ||
1908 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1909 isert_unmap_tx_desc(tx_desc, ib_dev);
1911 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1912 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1916 cmd->i_state = ISTATE_SENT_STATUS;
1917 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1921 isert_send_completion(struct iser_tx_desc *tx_desc,
1922 struct isert_conn *isert_conn)
1924 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1925 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1926 struct isert_rdma_wr *wr;
1929 isert_unmap_tx_desc(tx_desc, ib_dev);
1932 wr = &isert_cmd->rdma_wr;
1934 switch (wr->iser_ib_op) {
1936 isert_err("isert_send_completion: Got ISER_IB_RECV\n");
1940 isert_dbg("isert_send_completion: Got ISER_IB_SEND\n");
1941 isert_response_completion(tx_desc, isert_cmd,
1942 isert_conn, ib_dev);
1944 case ISER_IB_RDMA_WRITE:
1945 isert_dbg("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1946 isert_completion_rdma_write(tx_desc, isert_cmd);
1948 case ISER_IB_RDMA_READ:
1949 isert_dbg("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1951 isert_completion_rdma_read(tx_desc, isert_cmd);
1954 isert_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1961 * is_isert_tx_desc() - Indicate if the completion wr_id
1962 * is a TX descriptor or not.
1963 * @isert_conn: iser connection
1964 * @wr_id: completion WR identifier
1966 * Since we cannot rely on wc opcode in FLUSH errors
1967 * we must work around it by checking if the wr_id address
1968 * falls in the iser connection rx_descs buffer. If so
1969 * it is an RX descriptor, otherwize it is a TX.
1972 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
1974 void *start = isert_conn->conn_rx_descs;
1975 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
1977 if (wr_id >= start && wr_id < start + len)
1984 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
1986 if (wc->wr_id == ISER_BEACON_WRID) {
1987 isert_info("conn %p completing conn_wait_comp_err\n",
1989 complete(&isert_conn->conn_wait_comp_err);
1991 if (is_isert_tx_desc(isert_conn, (void *)wc->wr_id)) {
1992 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1993 struct isert_cmd *isert_cmd;
1994 struct iser_tx_desc *desc;
1996 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1997 isert_cmd = desc->isert_cmd;
1999 isert_unmap_tx_desc(desc, ib_dev);
2001 isert_completion_put(desc, isert_cmd, ib_dev, true);
2003 isert_conn->post_recv_buf_count--;
2004 if (!isert_conn->post_recv_buf_count)
2005 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2010 isert_handle_wc(struct ib_wc *wc)
2012 struct isert_conn *isert_conn;
2013 struct iser_tx_desc *tx_desc;
2014 struct iser_rx_desc *rx_desc;
2016 isert_conn = wc->qp->qp_context;
2017 if (likely(wc->status == IB_WC_SUCCESS)) {
2018 if (wc->opcode == IB_WC_RECV) {
2019 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2020 isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
2022 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2023 isert_send_completion(tx_desc, isert_conn);
2026 if (wc->status != IB_WC_WR_FLUSH_ERR)
2027 isert_err("wr id %llx status %d vend_err %x\n",
2028 wc->wr_id, wc->status, wc->vendor_err);
2030 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
2032 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2033 isert_cq_comp_err(isert_conn, wc);
2038 isert_cq_work(struct work_struct *work)
2040 enum { isert_poll_budget = 65536 };
2041 struct isert_comp *comp = container_of(work, struct isert_comp,
2043 struct ib_wc *const wcs = comp->wcs;
2044 int i, n, completed = 0;
2046 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2047 for (i = 0; i < n; i++)
2048 isert_handle_wc(&wcs[i]);
2051 if (completed >= isert_poll_budget)
2055 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2059 isert_cq_callback(struct ib_cq *cq, void *context)
2061 struct isert_comp *comp = context;
2063 queue_work(isert_comp_wq, &comp->work);
2067 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2069 struct ib_send_wr *wr_failed;
2072 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2075 isert_err("ib_post_send failed with %d\n", ret);
2082 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2084 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2085 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2086 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2087 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2088 &isert_cmd->tx_desc.iscsi_header;
2090 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2091 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2092 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2094 * Attach SENSE DATA payload to iSCSI Response PDU
2096 if (cmd->se_cmd.sense_buffer &&
2097 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2098 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2099 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2100 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2101 u32 padding, pdu_len;
2103 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2105 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2107 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2108 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2109 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2111 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2112 (void *)cmd->sense_buffer, pdu_len,
2115 isert_cmd->pdu_buf_len = pdu_len;
2116 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2117 tx_dsg->length = pdu_len;
2118 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2119 isert_cmd->tx_desc.num_sge = 2;
2122 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2124 isert_dbg("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2126 return isert_post_response(isert_conn, isert_cmd);
2130 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2132 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2133 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2134 struct isert_device *device = isert_conn->conn_device;
2136 spin_lock_bh(&conn->cmd_lock);
2137 if (!list_empty(&cmd->i_conn_node))
2138 list_del_init(&cmd->i_conn_node);
2139 spin_unlock_bh(&conn->cmd_lock);
2141 if (cmd->data_direction == DMA_TO_DEVICE)
2142 iscsit_stop_dataout_timer(cmd);
2144 device->unreg_rdma_mem(isert_cmd, isert_conn);
2147 static enum target_prot_op
2148 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2150 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2151 struct isert_device *device = isert_conn->conn_device;
2153 if (conn->tpg->tpg_attrib.t10_pi) {
2154 if (device->pi_capable) {
2155 isert_info("conn %p PI offload enabled\n", isert_conn);
2156 isert_conn->pi_support = true;
2157 return TARGET_PROT_ALL;
2161 isert_info("conn %p PI offload disabled\n", isert_conn);
2162 isert_conn->pi_support = false;
2164 return TARGET_PROT_NORMAL;
2168 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2169 bool nopout_response)
2171 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2172 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2173 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2175 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2176 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2177 &isert_cmd->tx_desc.iscsi_header,
2179 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2180 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2182 isert_dbg("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2184 return isert_post_response(isert_conn, isert_cmd);
2188 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2190 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2191 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2192 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2194 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2195 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2196 &isert_cmd->tx_desc.iscsi_header);
2197 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2198 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2200 isert_dbg("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2202 return isert_post_response(isert_conn, isert_cmd);
2206 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2208 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2209 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2210 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2212 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2213 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2214 &isert_cmd->tx_desc.iscsi_header);
2215 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2216 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2218 isert_dbg("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2220 return isert_post_response(isert_conn, isert_cmd);
2224 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2226 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2227 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2228 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2229 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2230 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2231 struct iscsi_reject *hdr =
2232 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2234 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2235 iscsit_build_reject(cmd, conn, hdr);
2236 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2238 hton24(hdr->dlength, ISCSI_HDR_LEN);
2239 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2240 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2242 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2243 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2244 tx_dsg->length = ISCSI_HDR_LEN;
2245 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2246 isert_cmd->tx_desc.num_sge = 2;
2248 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2250 isert_dbg("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2252 return isert_post_response(isert_conn, isert_cmd);
2256 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2258 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2259 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2260 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2261 struct iscsi_text_rsp *hdr =
2262 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2266 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2267 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2272 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2275 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2276 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2277 void *txt_rsp_buf = cmd->buf_ptr;
2279 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2280 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2282 isert_cmd->pdu_buf_len = txt_rsp_len;
2283 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2284 tx_dsg->length = txt_rsp_len;
2285 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2286 isert_cmd->tx_desc.num_sge = 2;
2288 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2290 isert_dbg("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2292 return isert_post_response(isert_conn, isert_cmd);
2296 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2297 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2298 u32 data_left, u32 offset)
2300 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2301 struct scatterlist *sg_start, *tmp_sg;
2302 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2303 u32 sg_off, page_off;
2304 int i = 0, sg_nents;
2306 sg_off = offset / PAGE_SIZE;
2307 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2308 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2309 page_off = offset % PAGE_SIZE;
2311 send_wr->sg_list = ib_sge;
2312 send_wr->num_sge = sg_nents;
2313 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2315 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2317 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2318 isert_dbg("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2319 (unsigned long long)tmp_sg->dma_address,
2320 tmp_sg->length, page_off);
2322 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2323 ib_sge->length = min_t(u32, data_left,
2324 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2325 ib_sge->lkey = isert_conn->conn_mr->lkey;
2327 isert_dbg("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2328 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2330 data_left -= ib_sge->length;
2332 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2335 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2336 send_wr->sg_list, send_wr->num_sge);
2342 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2343 struct isert_rdma_wr *wr)
2345 struct se_cmd *se_cmd = &cmd->se_cmd;
2346 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2347 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2348 struct isert_data_buf *data = &wr->data;
2349 struct ib_send_wr *send_wr;
2350 struct ib_sge *ib_sge;
2351 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2352 int ret = 0, i, ib_sge_cnt;
2354 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2356 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2357 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2358 se_cmd->t_data_nents, se_cmd->data_length,
2359 offset, wr->iser_ib_op, &wr->data);
2363 data_left = data->len;
2364 offset = data->offset;
2366 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2368 isert_warn("Unable to allocate ib_sge\n");
2372 wr->ib_sge = ib_sge;
2374 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2375 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2378 isert_dbg("Unable to allocate wr->send_wr\n");
2383 wr->isert_cmd = isert_cmd;
2384 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2386 for (i = 0; i < wr->send_wr_num; i++) {
2387 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2388 data_len = min(data_left, rdma_write_max);
2390 send_wr->send_flags = 0;
2391 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2392 send_wr->opcode = IB_WR_RDMA_WRITE;
2393 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2394 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2395 if (i + 1 == wr->send_wr_num)
2396 send_wr->next = &isert_cmd->tx_desc.send_wr;
2398 send_wr->next = &wr->send_wr[i + 1];
2400 send_wr->opcode = IB_WR_RDMA_READ;
2401 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2402 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2403 if (i + 1 == wr->send_wr_num)
2404 send_wr->send_flags = IB_SEND_SIGNALED;
2406 send_wr->next = &wr->send_wr[i + 1];
2409 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2410 send_wr, data_len, offset);
2411 ib_sge += ib_sge_cnt;
2414 va_offset += data_len;
2415 data_left -= data_len;
2420 isert_unmap_data_buf(isert_conn, data);
2426 isert_map_fr_pagelist(struct ib_device *ib_dev,
2427 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2429 u64 start_addr, end_addr, page, chunk_start = 0;
2430 struct scatterlist *tmp_sg;
2431 int i = 0, new_chunk, last_ent, n_pages;
2435 last_ent = sg_nents - 1;
2436 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2437 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2439 chunk_start = start_addr;
2440 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2442 isert_dbg("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2443 i, (unsigned long long)tmp_sg->dma_address,
2446 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2452 page = chunk_start & PAGE_MASK;
2454 fr_pl[n_pages++] = page;
2455 isert_dbg("Mapped page_list[%d] page_addr: 0x%16llx\n",
2458 } while (page < end_addr);
2465 isert_fast_reg_mr(struct isert_conn *isert_conn,
2466 struct fast_reg_descriptor *fr_desc,
2467 struct isert_data_buf *mem,
2468 enum isert_indicator ind,
2471 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2473 struct ib_fast_reg_page_list *frpl;
2474 struct ib_send_wr fr_wr, inv_wr;
2475 struct ib_send_wr *bad_wr, *wr = NULL;
2476 int ret, pagelist_len;
2480 if (mem->dma_nents == 1) {
2481 sge->lkey = isert_conn->conn_mr->lkey;
2482 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2483 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2484 isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2485 __func__, __LINE__, sge->addr, sge->length,
2490 if (ind == ISERT_DATA_KEY_VALID) {
2491 /* Registering data buffer */
2492 mr = fr_desc->data_mr;
2493 frpl = fr_desc->data_frpl;
2495 /* Registering protection buffer */
2496 mr = fr_desc->pi_ctx->prot_mr;
2497 frpl = fr_desc->pi_ctx->prot_frpl;
2500 page_off = mem->offset % PAGE_SIZE;
2502 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2503 fr_desc, mem->nents, mem->offset);
2505 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2506 &frpl->page_list[0]);
2508 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
2509 memset(&inv_wr, 0, sizeof(inv_wr));
2510 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2511 inv_wr.opcode = IB_WR_LOCAL_INV;
2512 inv_wr.ex.invalidate_rkey = mr->rkey;
2515 key = (u8)(mr->rkey & 0x000000FF);
2516 ib_update_fast_reg_key(mr, ++key);
2519 /* Prepare FASTREG WR */
2520 memset(&fr_wr, 0, sizeof(fr_wr));
2521 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2522 fr_wr.opcode = IB_WR_FAST_REG_MR;
2523 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2524 fr_wr.wr.fast_reg.page_list = frpl;
2525 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2526 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2527 fr_wr.wr.fast_reg.length = mem->len;
2528 fr_wr.wr.fast_reg.rkey = mr->rkey;
2529 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2536 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2538 isert_err("fast registration failed, ret:%d\n", ret);
2541 fr_desc->ind &= ~ind;
2543 sge->lkey = mr->lkey;
2544 sge->addr = frpl->page_list[0] + page_off;
2545 sge->length = mem->len;
2547 isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2548 __func__, __LINE__, sge->addr, sge->length,
2555 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2556 struct ib_sig_domain *domain)
2558 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2559 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2560 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2561 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2563 * At the moment we hard code those, but if in the future
2564 * the target core would like to use it, we will take it
2567 domain->sig.dif.apptag_check_mask = 0xffff;
2568 domain->sig.dif.app_escape = true;
2569 domain->sig.dif.ref_escape = true;
2570 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2571 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2572 domain->sig.dif.ref_remap = true;
2576 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2578 switch (se_cmd->prot_op) {
2579 case TARGET_PROT_DIN_INSERT:
2580 case TARGET_PROT_DOUT_STRIP:
2581 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2582 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2584 case TARGET_PROT_DOUT_INSERT:
2585 case TARGET_PROT_DIN_STRIP:
2586 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2587 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2589 case TARGET_PROT_DIN_PASS:
2590 case TARGET_PROT_DOUT_PASS:
2591 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2592 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2595 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2603 isert_set_prot_checks(u8 prot_checks)
2605 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2606 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2607 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2611 isert_reg_sig_mr(struct isert_conn *isert_conn,
2612 struct se_cmd *se_cmd,
2613 struct isert_rdma_wr *rdma_wr,
2614 struct fast_reg_descriptor *fr_desc)
2616 struct ib_send_wr sig_wr, inv_wr;
2617 struct ib_send_wr *bad_wr, *wr = NULL;
2618 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2619 struct ib_sig_attrs sig_attrs;
2623 memset(&sig_attrs, 0, sizeof(sig_attrs));
2624 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2628 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2630 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2631 memset(&inv_wr, 0, sizeof(inv_wr));
2632 inv_wr.opcode = IB_WR_LOCAL_INV;
2633 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2634 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2637 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2638 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2641 memset(&sig_wr, 0, sizeof(sig_wr));
2642 sig_wr.opcode = IB_WR_REG_SIG_MR;
2643 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2644 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2646 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2647 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2648 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2649 if (se_cmd->t_prot_sg)
2650 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2657 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2659 isert_err("fast registration failed, ret:%d\n", ret);
2662 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2664 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2665 rdma_wr->ib_sg[SIG].addr = 0;
2666 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2667 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2668 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2670 * We have protection guards on the wire
2671 * so we need to set a larget transfer
2673 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2675 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2676 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2677 rdma_wr->ib_sg[SIG].lkey);
2683 isert_handle_prot_cmd(struct isert_conn *isert_conn,
2684 struct isert_cmd *isert_cmd,
2685 struct isert_rdma_wr *wr)
2687 struct isert_device *device = isert_conn->conn_device;
2688 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2691 if (!wr->fr_desc->pi_ctx) {
2692 ret = isert_create_pi_ctx(wr->fr_desc,
2694 isert_conn->conn_pd);
2696 isert_err("conn %p failed to allocate pi_ctx\n",
2702 if (se_cmd->t_prot_sg) {
2703 ret = isert_map_data_buf(isert_conn, isert_cmd,
2705 se_cmd->t_prot_nents,
2706 se_cmd->prot_length,
2707 0, wr->iser_ib_op, &wr->prot);
2709 isert_err("conn %p failed to map protection buffer\n",
2714 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2715 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2716 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2718 isert_err("conn %p failed to fast reg mr\n",
2720 goto unmap_prot_cmd;
2724 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2726 isert_err("conn %p failed to fast reg mr\n",
2728 goto unmap_prot_cmd;
2730 wr->fr_desc->ind |= ISERT_PROTECTED;
2735 if (se_cmd->t_prot_sg)
2736 isert_unmap_data_buf(isert_conn, &wr->prot);
2742 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2743 struct isert_rdma_wr *wr)
2745 struct se_cmd *se_cmd = &cmd->se_cmd;
2746 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2747 struct isert_conn *isert_conn = conn->context;
2748 struct fast_reg_descriptor *fr_desc = NULL;
2749 struct ib_send_wr *send_wr;
2750 struct ib_sge *ib_sg;
2753 unsigned long flags;
2755 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2757 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2758 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2759 se_cmd->t_data_nents, se_cmd->data_length,
2760 offset, wr->iser_ib_op, &wr->data);
2764 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2765 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2766 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2767 struct fast_reg_descriptor, list);
2768 list_del(&fr_desc->list);
2769 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2770 wr->fr_desc = fr_desc;
2773 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2774 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2778 if (isert_prot_cmd(isert_conn, se_cmd)) {
2779 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2783 ib_sg = &wr->ib_sg[SIG];
2785 ib_sg = &wr->ib_sg[DATA];
2788 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2789 wr->ib_sge = &wr->s_ib_sge;
2790 wr->send_wr_num = 1;
2791 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2792 wr->send_wr = &wr->s_send_wr;
2793 wr->isert_cmd = isert_cmd;
2795 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2796 send_wr->sg_list = &wr->s_ib_sge;
2797 send_wr->num_sge = 1;
2798 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2799 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2800 send_wr->opcode = IB_WR_RDMA_WRITE;
2801 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2802 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2803 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2804 0 : IB_SEND_SIGNALED;
2806 send_wr->opcode = IB_WR_RDMA_READ;
2807 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2808 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2809 send_wr->send_flags = IB_SEND_SIGNALED;
2816 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2817 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2818 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2820 isert_unmap_data_buf(isert_conn, &wr->data);
2826 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2828 struct se_cmd *se_cmd = &cmd->se_cmd;
2829 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2830 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2831 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2832 struct isert_device *device = isert_conn->conn_device;
2833 struct ib_send_wr *wr_failed;
2836 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2837 isert_cmd, se_cmd->data_length);
2838 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2839 rc = device->reg_rdma_mem(conn, cmd, wr);
2841 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2845 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2847 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2849 isert_create_send_desc(isert_conn, isert_cmd,
2850 &isert_cmd->tx_desc);
2851 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2852 &isert_cmd->tx_desc.iscsi_header);
2853 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2854 isert_init_send_wr(isert_conn, isert_cmd,
2855 &isert_cmd->tx_desc.send_wr);
2856 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2857 wr->send_wr_num += 1;
2860 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2862 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2864 if (!isert_prot_cmd(isert_conn, se_cmd))
2865 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2866 "READ\n", isert_cmd);
2868 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2875 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2877 struct se_cmd *se_cmd = &cmd->se_cmd;
2878 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2879 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2880 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2881 struct isert_device *device = isert_conn->conn_device;
2882 struct ib_send_wr *wr_failed;
2885 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2886 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2887 wr->iser_ib_op = ISER_IB_RDMA_READ;
2888 rc = device->reg_rdma_mem(conn, cmd, wr);
2890 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2894 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2896 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2898 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2905 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2910 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2911 ret = isert_put_nopin(cmd, conn, false);
2914 isert_err("Unknown immediate state: 0x%02x\n", state);
2923 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2925 struct isert_conn *isert_conn = conn->context;
2929 case ISTATE_SEND_LOGOUTRSP:
2930 ret = isert_put_logout_rsp(cmd, conn);
2932 isert_conn->logout_posted = true;
2934 case ISTATE_SEND_NOPIN:
2935 ret = isert_put_nopin(cmd, conn, true);
2937 case ISTATE_SEND_TASKMGTRSP:
2938 ret = isert_put_tm_rsp(cmd, conn);
2940 case ISTATE_SEND_REJECT:
2941 ret = isert_put_reject(cmd, conn);
2943 case ISTATE_SEND_TEXTRSP:
2944 ret = isert_put_text_rsp(cmd, conn);
2946 case ISTATE_SEND_STATUS:
2948 * Special case for sending non GOOD SCSI status from TX thread
2949 * context during pre se_cmd excecution failure.
2951 ret = isert_put_response(conn, cmd);
2954 isert_err("Unknown response state: 0x%02x\n", state);
2963 isert_setup_id(struct isert_np *isert_np)
2965 struct iscsi_np *np = isert_np->np;
2966 struct rdma_cm_id *id;
2967 struct sockaddr *sa;
2970 sa = (struct sockaddr *)&np->np_sockaddr;
2971 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2973 id = rdma_create_id(isert_cma_handler, isert_np,
2974 RDMA_PS_TCP, IB_QPT_RC);
2976 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2980 isert_dbg("id %p context %p\n", id, id->context);
2982 ret = rdma_bind_addr(id, sa);
2984 isert_err("rdma_bind_addr() failed: %d\n", ret);
2988 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
2990 isert_err("rdma_listen() failed: %d\n", ret);
2996 rdma_destroy_id(id);
2998 return ERR_PTR(ret);
3002 isert_setup_np(struct iscsi_np *np,
3003 struct __kernel_sockaddr_storage *ksockaddr)
3005 struct isert_np *isert_np;
3006 struct rdma_cm_id *isert_lid;
3009 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3011 isert_err("Unable to allocate struct isert_np\n");
3014 sema_init(&isert_np->np_sem, 0);
3015 mutex_init(&isert_np->np_accept_mutex);
3016 INIT_LIST_HEAD(&isert_np->np_accept_list);
3017 init_completion(&isert_np->np_login_comp);
3021 * Setup the np->np_sockaddr from the passed sockaddr setup
3022 * in iscsi_target_configfs.c code..
3024 memcpy(&np->np_sockaddr, ksockaddr,
3025 sizeof(struct __kernel_sockaddr_storage));
3027 isert_lid = isert_setup_id(isert_np);
3028 if (IS_ERR(isert_lid)) {
3029 ret = PTR_ERR(isert_lid);
3033 isert_np->np_cm_id = isert_lid;
3034 np->np_context = isert_np;
3045 isert_rdma_accept(struct isert_conn *isert_conn)
3047 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3048 struct rdma_conn_param cp;
3051 memset(&cp, 0, sizeof(struct rdma_conn_param));
3052 cp.initiator_depth = isert_conn->initiator_depth;
3054 cp.rnr_retry_count = 7;
3056 isert_dbg("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3058 ret = rdma_accept(cm_id, &cp);
3060 isert_err("rdma_accept() failed with: %d\n", ret);
3064 isert_dbg("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3070 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3072 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3075 isert_info("before login_req comp conn: %p\n", isert_conn);
3076 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3078 isert_err("isert_conn %p interrupted before got login req\n",
3082 reinit_completion(&isert_conn->login_req_comp);
3085 * For login requests after the first PDU, isert_rx_login_req() will
3086 * kick schedule_delayed_work(&conn->login_work) as the packet is
3087 * received, which turns this callback from iscsi_target_do_login_rx()
3090 if (!login->first_request)
3093 isert_rx_login_req(isert_conn);
3095 isert_info("before conn_login_comp conn: %p\n", conn);
3096 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3100 isert_info("processing login->req: %p\n", login->req);
3106 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3107 struct isert_conn *isert_conn)
3109 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3110 struct rdma_route *cm_route = &cm_id->route;
3111 struct sockaddr_in *sock_in;
3112 struct sockaddr_in6 *sock_in6;
3114 conn->login_family = np->np_sockaddr.ss_family;
3116 if (np->np_sockaddr.ss_family == AF_INET6) {
3117 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3118 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3119 &sock_in6->sin6_addr.in6_u);
3120 conn->login_port = ntohs(sock_in6->sin6_port);
3122 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3123 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3124 &sock_in6->sin6_addr.in6_u);
3125 conn->local_port = ntohs(sock_in6->sin6_port);
3127 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3128 sprintf(conn->login_ip, "%pI4",
3129 &sock_in->sin_addr.s_addr);
3130 conn->login_port = ntohs(sock_in->sin_port);
3132 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3133 sprintf(conn->local_ip, "%pI4",
3134 &sock_in->sin_addr.s_addr);
3135 conn->local_port = ntohs(sock_in->sin_port);
3140 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3142 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3143 struct isert_conn *isert_conn;
3144 int max_accept = 0, ret;
3147 ret = down_interruptible(&isert_np->np_sem);
3148 if (ret || max_accept > 5)
3151 spin_lock_bh(&np->np_thread_lock);
3152 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3153 spin_unlock_bh(&np->np_thread_lock);
3154 isert_dbg("np_thread_state %d for isert_accept_np\n",
3155 np->np_thread_state);
3157 * No point in stalling here when np_thread
3158 * is in state RESET/SHUTDOWN/EXIT - bail
3162 spin_unlock_bh(&np->np_thread_lock);
3164 mutex_lock(&isert_np->np_accept_mutex);
3165 if (list_empty(&isert_np->np_accept_list)) {
3166 mutex_unlock(&isert_np->np_accept_mutex);
3170 isert_conn = list_first_entry(&isert_np->np_accept_list,
3171 struct isert_conn, conn_accept_node);
3172 list_del_init(&isert_conn->conn_accept_node);
3173 mutex_unlock(&isert_np->np_accept_mutex);
3175 conn->context = isert_conn;
3176 isert_conn->conn = conn;
3179 isert_set_conn_info(np, conn, isert_conn);
3181 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3187 isert_free_np(struct iscsi_np *np)
3189 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3190 struct isert_conn *isert_conn, *n;
3192 if (isert_np->np_cm_id)
3193 rdma_destroy_id(isert_np->np_cm_id);
3196 * FIXME: At this point we don't have a good way to insure
3197 * that at this point we don't have hanging connections that
3198 * completed RDMA establishment but didn't start iscsi login
3199 * process. So work-around this by cleaning up what ever piled
3200 * up in np_accept_list.
3202 mutex_lock(&isert_np->np_accept_mutex);
3203 if (!list_empty(&isert_np->np_accept_list)) {
3204 isert_info("Still have isert connections, cleaning up...\n");
3205 list_for_each_entry_safe(isert_conn, n,
3206 &isert_np->np_accept_list,
3208 isert_info("cleaning isert_conn %p state (%d)\n",
3209 isert_conn, isert_conn->state);
3210 isert_connect_release(isert_conn);
3213 mutex_unlock(&isert_np->np_accept_mutex);
3215 np->np_context = NULL;
3219 static void isert_release_work(struct work_struct *work)
3221 struct isert_conn *isert_conn = container_of(work,
3225 isert_info("Starting release conn %p\n", isert_conn);
3227 wait_for_completion(&isert_conn->conn_wait);
3229 mutex_lock(&isert_conn->conn_mutex);
3230 isert_conn->state = ISER_CONN_DOWN;
3231 mutex_unlock(&isert_conn->conn_mutex);
3233 isert_info("Destroying conn %p\n", isert_conn);
3234 isert_put_conn(isert_conn);
3238 isert_wait4logout(struct isert_conn *isert_conn)
3240 struct iscsi_conn *conn = isert_conn->conn;
3242 if (isert_conn->logout_posted) {
3243 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3244 wait_for_completion_timeout(&conn->conn_logout_comp,
3245 SECONDS_FOR_LOGOUT_COMP * HZ);
3250 isert_wait4cmds(struct iscsi_conn *conn)
3253 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3254 target_wait_for_sess_cmds(conn->sess->se_sess);
3259 isert_wait4flush(struct isert_conn *isert_conn)
3261 struct ib_recv_wr *bad_wr;
3263 init_completion(&isert_conn->conn_wait_comp_err);
3264 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3265 /* post an indication that all flush errors were consumed */
3266 if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
3267 isert_err("conn %p failed to post beacon", isert_conn);
3271 wait_for_completion(&isert_conn->conn_wait_comp_err);
3274 static void isert_wait_conn(struct iscsi_conn *conn)
3276 struct isert_conn *isert_conn = conn->context;
3278 isert_dbg("isert_wait_conn: Starting\n");
3280 mutex_lock(&isert_conn->conn_mutex);
3282 * Only wait for conn_wait_comp_err if the isert_conn made it
3283 * into full feature phase..
3285 if (isert_conn->state == ISER_CONN_INIT) {
3286 mutex_unlock(&isert_conn->conn_mutex);
3289 isert_conn_terminate(isert_conn);
3290 mutex_unlock(&isert_conn->conn_mutex);
3292 isert_wait4cmds(conn);
3293 isert_wait4flush(isert_conn);
3294 isert_wait4logout(isert_conn);
3296 INIT_WORK(&isert_conn->release_work, isert_release_work);
3297 queue_work(isert_release_wq, &isert_conn->release_work);
3300 static void isert_free_conn(struct iscsi_conn *conn)
3302 struct isert_conn *isert_conn = conn->context;
3304 isert_put_conn(isert_conn);
3307 static struct iscsit_transport iser_target_transport = {
3309 .transport_type = ISCSI_INFINIBAND,
3310 .priv_size = sizeof(struct isert_cmd),
3311 .owner = THIS_MODULE,
3312 .iscsit_setup_np = isert_setup_np,
3313 .iscsit_accept_np = isert_accept_np,
3314 .iscsit_free_np = isert_free_np,
3315 .iscsit_wait_conn = isert_wait_conn,
3316 .iscsit_free_conn = isert_free_conn,
3317 .iscsit_get_login_rx = isert_get_login_rx,
3318 .iscsit_put_login_tx = isert_put_login_tx,
3319 .iscsit_immediate_queue = isert_immediate_queue,
3320 .iscsit_response_queue = isert_response_queue,
3321 .iscsit_get_dataout = isert_get_dataout,
3322 .iscsit_queue_data_in = isert_put_datain,
3323 .iscsit_queue_status = isert_put_response,
3324 .iscsit_aborted_task = isert_aborted_task,
3325 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3328 static int __init isert_init(void)
3332 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3333 if (!isert_comp_wq) {
3334 isert_err("Unable to allocate isert_comp_wq\n");
3339 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3340 WQ_UNBOUND_MAX_ACTIVE);
3341 if (!isert_release_wq) {
3342 isert_err("Unable to allocate isert_release_wq\n");
3344 goto destroy_comp_wq;
3347 iscsit_register_transport(&iser_target_transport);
3348 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3353 destroy_workqueue(isert_comp_wq);
3358 static void __exit isert_exit(void)
3360 flush_scheduled_work();
3361 destroy_workqueue(isert_release_wq);
3362 destroy_workqueue(isert_comp_wq);
3363 iscsit_unregister_transport(&iser_target_transport);
3364 isert_dbg("iSER_TARGET[0] - Released iser_target_transport\n");
3367 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3368 MODULE_VERSION("0.1");
3369 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3370 MODULE_LICENSE("GPL");
3372 module_init(isert_init);
3373 module_exit(isert_exit);