2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <scsi/scsi_proto.h>
45 #include <scsi/scsi_tcq.h>
46 #include <target/target_core_base.h>
47 #include <target/target_core_fabric.h>
50 /* Name of this kernel module. */
51 #define DRV_NAME "ib_srpt"
52 #define DRV_VERSION "2.0.0"
53 #define DRV_RELDATE "2011-02-14"
55 #define SRPT_ID_STRING "Linux SRP target"
58 #define pr_fmt(fmt) DRV_NAME " " fmt
60 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
69 static u64 srpt_service_guid;
70 static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
71 static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
73 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
74 module_param(srp_max_req_size, int, 0444);
75 MODULE_PARM_DESC(srp_max_req_size,
76 "Maximum size of SRP request messages in bytes.");
78 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
79 module_param(srpt_srq_size, int, 0444);
80 MODULE_PARM_DESC(srpt_srq_size,
81 "Shared receive queue (SRQ) size.");
83 static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
85 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
87 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
89 MODULE_PARM_DESC(srpt_service_guid,
90 "Using this value for ioc_guid, id_ext, and cm_listen_id"
91 " instead of using the node_guid of the first HCA.");
93 static struct ib_client srpt_client;
94 static void srpt_release_channel(struct srpt_rdma_ch *ch);
95 static int srpt_queue_status(struct se_cmd *cmd);
96 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
97 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
100 * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
103 enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
106 case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
107 case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
113 * srpt_sdev_name() - Return the name associated with the HCA.
115 * Examples are ib0, ib1, ...
117 static inline const char *srpt_sdev_name(struct srpt_device *sdev)
119 return sdev->device->name;
122 static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
125 enum rdma_ch_state state;
127 spin_lock_irqsave(&ch->spinlock, flags);
129 spin_unlock_irqrestore(&ch->spinlock, flags);
133 static enum rdma_ch_state
134 srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
137 enum rdma_ch_state prev;
139 spin_lock_irqsave(&ch->spinlock, flags);
141 ch->state = new_state;
142 spin_unlock_irqrestore(&ch->spinlock, flags);
147 * srpt_test_and_set_ch_state() - Test and set the channel state.
149 * Returns true if and only if the channel state has been set to the new state.
152 srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
153 enum rdma_ch_state new)
156 enum rdma_ch_state prev;
158 spin_lock_irqsave(&ch->spinlock, flags);
162 spin_unlock_irqrestore(&ch->spinlock, flags);
167 * srpt_event_handler() - Asynchronous IB event callback function.
169 * Callback function called by the InfiniBand core when an asynchronous IB
170 * event occurs. This callback may occur in interrupt context. See also
171 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
172 * Architecture Specification.
174 static void srpt_event_handler(struct ib_event_handler *handler,
175 struct ib_event *event)
177 struct srpt_device *sdev;
178 struct srpt_port *sport;
180 sdev = ib_get_client_data(event->device, &srpt_client);
181 if (!sdev || sdev->device != event->device)
184 pr_debug("ASYNC event= %d on device= %s\n", event->event,
185 srpt_sdev_name(sdev));
187 switch (event->event) {
188 case IB_EVENT_PORT_ERR:
189 if (event->element.port_num <= sdev->device->phys_port_cnt) {
190 sport = &sdev->port[event->element.port_num - 1];
195 case IB_EVENT_PORT_ACTIVE:
196 case IB_EVENT_LID_CHANGE:
197 case IB_EVENT_PKEY_CHANGE:
198 case IB_EVENT_SM_CHANGE:
199 case IB_EVENT_CLIENT_REREGISTER:
200 case IB_EVENT_GID_CHANGE:
201 /* Refresh port data asynchronously. */
202 if (event->element.port_num <= sdev->device->phys_port_cnt) {
203 sport = &sdev->port[event->element.port_num - 1];
204 if (!sport->lid && !sport->sm_lid)
205 schedule_work(&sport->work);
209 pr_err("received unrecognized IB event %d\n",
216 * srpt_srq_event() - SRQ event callback function.
218 static void srpt_srq_event(struct ib_event *event, void *ctx)
220 pr_info("SRQ event %d\n", event->event);
224 * srpt_qp_event() - QP event callback function.
226 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
228 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
229 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
231 switch (event->event) {
232 case IB_EVENT_COMM_EST:
233 ib_cm_notify(ch->cm_id, event->event);
235 case IB_EVENT_QP_LAST_WQE_REACHED:
236 if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
238 srpt_release_channel(ch);
240 pr_debug("%s: state %d - ignored LAST_WQE.\n",
241 ch->sess_name, srpt_get_ch_state(ch));
244 pr_err("received unrecognized IB QP event %d\n", event->event);
250 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
252 * @slot: one-based slot number.
253 * @value: four-bit value.
255 * Copies the lowest four bits of value in element slot of the array of four
256 * bit elements called c_list (controller list). The index slot is one-based.
258 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
265 tmp = c_list[id] & 0xf;
266 c_list[id] = (value << 4) | tmp;
268 tmp = c_list[id] & 0xf0;
269 c_list[id] = (value & 0xf) | tmp;
274 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
276 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
279 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
281 struct ib_class_port_info *cif;
283 cif = (struct ib_class_port_info *)mad->data;
284 memset(cif, 0, sizeof *cif);
285 cif->base_version = 1;
286 cif->class_version = 1;
287 cif->resp_time_value = 20;
289 mad->mad_hdr.status = 0;
293 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
295 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
296 * Specification. See also section B.7, table B.6 in the SRP r16a document.
298 static void srpt_get_iou(struct ib_dm_mad *mad)
300 struct ib_dm_iou_info *ioui;
304 ioui = (struct ib_dm_iou_info *)mad->data;
305 ioui->change_id = cpu_to_be16(1);
306 ioui->max_controllers = 16;
308 /* set present for slot 1 and empty for the rest */
309 srpt_set_ioc(ioui->controller_list, 1, 1);
310 for (i = 1, slot = 2; i < 16; i++, slot++)
311 srpt_set_ioc(ioui->controller_list, slot, 0);
313 mad->mad_hdr.status = 0;
317 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
319 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
320 * Architecture Specification. See also section B.7, table B.7 in the SRP
323 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
324 struct ib_dm_mad *mad)
326 struct srpt_device *sdev = sport->sdev;
327 struct ib_dm_ioc_profile *iocp;
329 iocp = (struct ib_dm_ioc_profile *)mad->data;
331 if (!slot || slot > 16) {
333 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
339 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
343 memset(iocp, 0, sizeof *iocp);
344 strcpy(iocp->id_string, SRPT_ID_STRING);
345 iocp->guid = cpu_to_be64(srpt_service_guid);
346 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
347 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
348 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
349 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
350 iocp->subsys_device_id = 0x0;
351 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
352 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
353 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
354 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
355 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
356 iocp->rdma_read_depth = 4;
357 iocp->send_size = cpu_to_be32(srp_max_req_size);
358 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
360 iocp->num_svc_entries = 1;
361 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
362 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
364 mad->mad_hdr.status = 0;
368 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
370 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
371 * Specification. See also section B.7, table B.8 in the SRP r16a document.
373 static void srpt_get_svc_entries(u64 ioc_guid,
374 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
376 struct ib_dm_svc_entries *svc_entries;
380 if (!slot || slot > 16) {
382 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
386 if (slot > 2 || lo > hi || hi > 1) {
388 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
392 svc_entries = (struct ib_dm_svc_entries *)mad->data;
393 memset(svc_entries, 0, sizeof *svc_entries);
394 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
395 snprintf(svc_entries->service_entries[0].name,
396 sizeof(svc_entries->service_entries[0].name),
398 SRP_SERVICE_NAME_PREFIX,
401 mad->mad_hdr.status = 0;
405 * srpt_mgmt_method_get() - Process a received management datagram.
406 * @sp: source port through which the MAD has been received.
407 * @rq_mad: received MAD.
408 * @rsp_mad: response MAD.
410 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
411 struct ib_dm_mad *rsp_mad)
417 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
419 case DM_ATTR_CLASS_PORT_INFO:
420 srpt_get_class_port_info(rsp_mad);
422 case DM_ATTR_IOU_INFO:
423 srpt_get_iou(rsp_mad);
425 case DM_ATTR_IOC_PROFILE:
426 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
427 srpt_get_ioc(sp, slot, rsp_mad);
429 case DM_ATTR_SVC_ENTRIES:
430 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
431 hi = (u8) ((slot >> 8) & 0xff);
432 lo = (u8) (slot & 0xff);
433 slot = (u16) ((slot >> 16) & 0xffff);
434 srpt_get_svc_entries(srpt_service_guid,
435 slot, hi, lo, rsp_mad);
438 rsp_mad->mad_hdr.status =
439 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
445 * srpt_mad_send_handler() - Post MAD-send callback function.
447 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
448 struct ib_mad_send_wc *mad_wc)
450 ib_destroy_ah(mad_wc->send_buf->ah);
451 ib_free_send_mad(mad_wc->send_buf);
455 * srpt_mad_recv_handler() - MAD reception callback function.
457 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
458 struct ib_mad_send_buf *send_buf,
459 struct ib_mad_recv_wc *mad_wc)
461 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
463 struct ib_mad_send_buf *rsp;
464 struct ib_dm_mad *dm_mad;
466 if (!mad_wc || !mad_wc->recv_buf.mad)
469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
470 mad_wc->recv_buf.grh, mad_agent->port_num);
474 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
476 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
477 mad_wc->wc->pkey_index, 0,
478 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
480 IB_MGMT_BASE_VERSION);
487 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
488 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
489 dm_mad->mad_hdr.status = 0;
491 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
492 case IB_MGMT_METHOD_GET:
493 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
495 case IB_MGMT_METHOD_SET:
496 dm_mad->mad_hdr.status =
497 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
500 dm_mad->mad_hdr.status =
501 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
505 if (!ib_post_send_mad(rsp, NULL)) {
506 ib_free_recv_mad(mad_wc);
507 /* will destroy_ah & free_send_mad in send completion */
511 ib_free_send_mad(rsp);
516 ib_free_recv_mad(mad_wc);
520 * srpt_refresh_port() - Configure a HCA port.
522 * Enable InfiniBand management datagram processing, update the cached sm_lid,
523 * lid and gid values, and register a callback function for processing MADs
524 * on the specified port.
526 * Note: It is safe to call this function more than once for the same port.
528 static int srpt_refresh_port(struct srpt_port *sport)
530 struct ib_mad_reg_req reg_req;
531 struct ib_port_modify port_modify;
532 struct ib_port_attr port_attr;
535 memset(&port_modify, 0, sizeof port_modify);
536 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
537 port_modify.clr_port_cap_mask = 0;
539 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
543 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
547 sport->sm_lid = port_attr.sm_lid;
548 sport->lid = port_attr.lid;
550 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
555 if (!sport->mad_agent) {
556 memset(®_req, 0, sizeof reg_req);
557 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
558 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
559 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
560 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
562 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
566 srpt_mad_send_handler,
567 srpt_mad_recv_handler,
569 if (IS_ERR(sport->mad_agent)) {
570 ret = PTR_ERR(sport->mad_agent);
571 sport->mad_agent = NULL;
580 port_modify.set_port_cap_mask = 0;
581 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
582 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
590 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
592 * Note: It is safe to call this function more than once for the same device.
594 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
596 struct ib_port_modify port_modify = {
597 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
599 struct srpt_port *sport;
602 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
603 sport = &sdev->port[i - 1];
604 WARN_ON(sport->port != i);
605 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
606 pr_err("disabling MAD processing failed.\n");
607 if (sport->mad_agent) {
608 ib_unregister_mad_agent(sport->mad_agent);
609 sport->mad_agent = NULL;
615 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
617 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
618 int ioctx_size, int dma_size,
619 enum dma_data_direction dir)
621 struct srpt_ioctx *ioctx;
623 ioctx = kmalloc(ioctx_size, GFP_KERNEL);
627 ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
631 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
632 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
646 * srpt_free_ioctx() - Free an SRPT I/O context structure.
648 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
649 int dma_size, enum dma_data_direction dir)
654 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
660 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
661 * @sdev: Device to allocate the I/O context ring for.
662 * @ring_size: Number of elements in the I/O context ring.
663 * @ioctx_size: I/O context size.
664 * @dma_size: DMA buffer size.
665 * @dir: DMA data direction.
667 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
668 int ring_size, int ioctx_size,
669 int dma_size, enum dma_data_direction dir)
671 struct srpt_ioctx **ring;
674 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
675 && ioctx_size != sizeof(struct srpt_send_ioctx));
677 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
680 for (i = 0; i < ring_size; ++i) {
681 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
690 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
698 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
700 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
701 struct srpt_device *sdev, int ring_size,
702 int dma_size, enum dma_data_direction dir)
706 for (i = 0; i < ring_size; ++i)
707 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
712 * srpt_get_cmd_state() - Get the state of a SCSI command.
714 static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
716 enum srpt_command_state state;
721 spin_lock_irqsave(&ioctx->spinlock, flags);
722 state = ioctx->state;
723 spin_unlock_irqrestore(&ioctx->spinlock, flags);
728 * srpt_set_cmd_state() - Set the state of a SCSI command.
730 * Does not modify the state of aborted commands. Returns the previous command
733 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
734 enum srpt_command_state new)
736 enum srpt_command_state previous;
741 spin_lock_irqsave(&ioctx->spinlock, flags);
742 previous = ioctx->state;
743 if (previous != SRPT_STATE_DONE)
745 spin_unlock_irqrestore(&ioctx->spinlock, flags);
751 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
753 * Returns true if and only if the previous command state was equal to 'old'.
755 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
756 enum srpt_command_state old,
757 enum srpt_command_state new)
759 enum srpt_command_state previous;
763 WARN_ON(old == SRPT_STATE_DONE);
764 WARN_ON(new == SRPT_STATE_NEW);
766 spin_lock_irqsave(&ioctx->spinlock, flags);
767 previous = ioctx->state;
770 spin_unlock_irqrestore(&ioctx->spinlock, flags);
771 return previous == old;
775 * srpt_post_recv() - Post an IB receive request.
777 static int srpt_post_recv(struct srpt_device *sdev,
778 struct srpt_recv_ioctx *ioctx)
781 struct ib_recv_wr wr, *bad_wr;
784 list.addr = ioctx->ioctx.dma;
785 list.length = srp_max_req_size;
786 list.lkey = sdev->pd->local_dma_lkey;
788 ioctx->ioctx.cqe.done = srpt_recv_done;
789 wr.wr_cqe = &ioctx->ioctx.cqe;
794 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
798 * srpt_post_send() - Post an IB send request.
800 * Returns zero upon success and a non-zero value upon failure.
802 static int srpt_post_send(struct srpt_rdma_ch *ch,
803 struct srpt_send_ioctx *ioctx, int len)
806 struct ib_send_wr wr, *bad_wr;
807 struct srpt_device *sdev = ch->sport->sdev;
810 atomic_inc(&ch->req_lim);
813 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
814 pr_warn("IB send queue full (needed 1)\n");
818 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
821 list.addr = ioctx->ioctx.dma;
823 list.lkey = sdev->pd->local_dma_lkey;
825 ioctx->ioctx.cqe.done = srpt_send_done;
827 wr.wr_cqe = &ioctx->ioctx.cqe;
830 wr.opcode = IB_WR_SEND;
831 wr.send_flags = IB_SEND_SIGNALED;
833 ret = ib_post_send(ch->qp, &wr, &bad_wr);
837 atomic_inc(&ch->sq_wr_avail);
838 atomic_dec(&ch->req_lim);
844 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
845 * @ioctx: Pointer to the I/O context associated with the request.
846 * @srp_cmd: Pointer to the SRP_CMD request data.
847 * @dir: Pointer to the variable to which the transfer direction will be
849 * @data_len: Pointer to the variable to which the total data length of all
850 * descriptors in the SRP_CMD request will be written.
852 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
854 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
855 * -ENOMEM when memory allocation fails and zero upon success.
857 static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
858 struct srp_cmd *srp_cmd,
859 enum dma_data_direction *dir, u64 *data_len)
861 struct srp_indirect_buf *idb;
862 struct srp_direct_buf *db;
863 unsigned add_cdb_offset;
867 * The pointer computations below will only be compiled correctly
868 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
869 * whether srp_cmd::add_data has been declared as a byte pointer.
871 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
872 && !__same_type(srp_cmd->add_data[0], (u8)0));
881 * The lower four bits of the buffer format field contain the DATA-IN
882 * buffer descriptor format, and the highest four bits contain the
883 * DATA-OUT buffer descriptor format.
886 if (srp_cmd->buf_fmt & 0xf)
887 /* DATA-IN: transfer data from target to initiator (read). */
888 *dir = DMA_FROM_DEVICE;
889 else if (srp_cmd->buf_fmt >> 4)
890 /* DATA-OUT: transfer data from initiator to target (write). */
891 *dir = DMA_TO_DEVICE;
894 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
895 * CDB LENGTH' field are reserved and the size in bytes of this field
896 * is four times the value specified in bits 3..7. Hence the "& ~3".
898 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
899 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
900 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
902 ioctx->rbufs = &ioctx->single_rbuf;
904 db = (struct srp_direct_buf *)(srp_cmd->add_data
906 memcpy(ioctx->rbufs, db, sizeof *db);
907 *data_len = be32_to_cpu(db->len);
908 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
909 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
910 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
913 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
916 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
917 pr_err("received unsupported SRP_CMD request"
918 " type (%u out + %u in != %u / %zu)\n",
919 srp_cmd->data_out_desc_cnt,
920 srp_cmd->data_in_desc_cnt,
921 be32_to_cpu(idb->table_desc.len),
928 if (ioctx->n_rbuf == 1)
929 ioctx->rbufs = &ioctx->single_rbuf;
932 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
941 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
942 *data_len = be32_to_cpu(idb->len);
949 * srpt_init_ch_qp() - Initialize queue pair attributes.
951 * Initialized the attributes of queue pair 'qp' by allowing local write,
952 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
954 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
956 struct ib_qp_attr *attr;
959 attr = kzalloc(sizeof *attr, GFP_KERNEL);
963 attr->qp_state = IB_QPS_INIT;
964 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
965 IB_ACCESS_REMOTE_WRITE;
966 attr->port_num = ch->sport->port;
967 attr->pkey_index = 0;
969 ret = ib_modify_qp(qp, attr,
970 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
978 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
979 * @ch: channel of the queue pair.
980 * @qp: queue pair to change the state of.
982 * Returns zero upon success and a negative value upon failure.
984 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
985 * If this structure ever becomes larger, it might be necessary to allocate
986 * it dynamically instead of on the stack.
988 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
990 struct ib_qp_attr qp_attr;
994 qp_attr.qp_state = IB_QPS_RTR;
995 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
999 qp_attr.max_dest_rd_atomic = 4;
1001 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1008 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1009 * @ch: channel of the queue pair.
1010 * @qp: queue pair to change the state of.
1012 * Returns zero upon success and a negative value upon failure.
1014 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1015 * If this structure ever becomes larger, it might be necessary to allocate
1016 * it dynamically instead of on the stack.
1018 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1020 struct ib_qp_attr qp_attr;
1024 qp_attr.qp_state = IB_QPS_RTS;
1025 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1029 qp_attr.max_rd_atomic = 4;
1031 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1038 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1040 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1042 struct ib_qp_attr qp_attr;
1044 qp_attr.qp_state = IB_QPS_ERR;
1045 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1049 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1051 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1052 struct srpt_send_ioctx *ioctx)
1054 struct scatterlist *sg;
1055 enum dma_data_direction dir;
1059 BUG_ON(ioctx->n_rdma && !ioctx->rdma_wrs);
1061 while (ioctx->n_rdma)
1062 kfree(ioctx->rdma_wrs[--ioctx->n_rdma].wr.sg_list);
1064 kfree(ioctx->rdma_wrs);
1065 ioctx->rdma_wrs = NULL;
1067 if (ioctx->mapped_sg_count) {
1070 dir = ioctx->cmd.data_direction;
1071 BUG_ON(dir == DMA_NONE);
1072 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1073 opposite_dma_dir(dir));
1074 ioctx->mapped_sg_count = 0;
1079 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1081 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1082 struct srpt_send_ioctx *ioctx)
1084 struct ib_device *dev = ch->sport->sdev->device;
1086 struct scatterlist *sg, *sg_orig;
1088 enum dma_data_direction dir;
1089 struct ib_rdma_wr *riu;
1090 struct srp_direct_buf *db;
1091 dma_addr_t dma_addr;
1103 dir = cmd->data_direction;
1104 BUG_ON(dir == DMA_NONE);
1106 ioctx->sg = sg = sg_orig = cmd->t_data_sg;
1107 ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
1109 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1110 opposite_dma_dir(dir));
1111 if (unlikely(!count))
1114 ioctx->mapped_sg_count = count;
1116 if (ioctx->rdma_wrs && ioctx->n_rdma_wrs)
1117 nrdma = ioctx->n_rdma_wrs;
1119 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1122 ioctx->rdma_wrs = kcalloc(nrdma, sizeof(*ioctx->rdma_wrs),
1124 if (!ioctx->rdma_wrs)
1127 ioctx->n_rdma_wrs = nrdma;
1131 tsize = cmd->data_length;
1132 dma_len = ib_sg_dma_len(dev, &sg[0]);
1133 riu = ioctx->rdma_wrs;
1136 * For each remote desc - calculate the #ib_sge.
1137 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1138 * each remote desc rdma_iu is required a rdma wr;
1140 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1144 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1145 rsize = be32_to_cpu(db->len);
1146 raddr = be64_to_cpu(db->va);
1147 riu->remote_addr = raddr;
1148 riu->rkey = be32_to_cpu(db->key);
1149 riu->wr.num_sge = 0;
1151 /* calculate how many sge required for this remote_buf */
1152 while (rsize > 0 && tsize > 0) {
1154 if (rsize >= dma_len) {
1163 dma_len = ib_sg_dma_len(
1176 riu->wr.num_sge == SRPT_DEF_SG_PER_WQE) {
1178 riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1179 sizeof(*riu->wr.sg_list),
1181 if (!riu->wr.sg_list)
1185 riu->wr.num_sge = 0;
1186 riu->remote_addr = raddr;
1187 riu->rkey = be32_to_cpu(db->key);
1192 riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1193 sizeof(*riu->wr.sg_list),
1195 if (!riu->wr.sg_list)
1200 tsize = cmd->data_length;
1201 riu = ioctx->rdma_wrs;
1203 dma_len = ib_sg_dma_len(dev, &sg[0]);
1204 dma_addr = ib_sg_dma_address(dev, &sg[0]);
1206 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1208 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1209 rsize = be32_to_cpu(db->len);
1210 sge = riu->wr.sg_list;
1213 while (rsize > 0 && tsize > 0) {
1214 sge->addr = dma_addr;
1215 sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
1217 if (rsize >= dma_len) {
1219 (tsize < dma_len) ? tsize : dma_len;
1227 dma_len = ib_sg_dma_len(
1229 dma_addr = ib_sg_dma_address(
1234 sge->length = (tsize < rsize) ? tsize : rsize;
1242 if (k == riu->wr.num_sge && rsize > 0 && tsize > 0) {
1244 sge = riu->wr.sg_list;
1246 } else if (rsize > 0 && tsize > 0)
1254 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1260 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1262 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1264 struct srpt_send_ioctx *ioctx;
1265 unsigned long flags;
1270 spin_lock_irqsave(&ch->spinlock, flags);
1271 if (!list_empty(&ch->free_list)) {
1272 ioctx = list_first_entry(&ch->free_list,
1273 struct srpt_send_ioctx, free_list);
1274 list_del(&ioctx->free_list);
1276 spin_unlock_irqrestore(&ch->spinlock, flags);
1281 BUG_ON(ioctx->ch != ch);
1282 spin_lock_init(&ioctx->spinlock);
1283 ioctx->state = SRPT_STATE_NEW;
1285 ioctx->rbufs = NULL;
1287 ioctx->n_rdma_wrs = 0;
1288 ioctx->rdma_wrs = NULL;
1289 ioctx->mapped_sg_count = 0;
1290 init_completion(&ioctx->tx_done);
1291 ioctx->queue_status_only = false;
1293 * transport_init_se_cmd() does not initialize all fields, so do it
1296 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1297 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1303 * srpt_abort_cmd() - Abort a SCSI command.
1304 * @ioctx: I/O context associated with the SCSI command.
1305 * @context: Preferred execution context.
1307 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1309 enum srpt_command_state state;
1310 unsigned long flags;
1315 * If the command is in a state where the target core is waiting for
1316 * the ib_srpt driver, change the state to the next state. Changing
1317 * the state of the command from SRPT_STATE_NEED_DATA to
1318 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
1319 * function a second time.
1322 spin_lock_irqsave(&ioctx->spinlock, flags);
1323 state = ioctx->state;
1325 case SRPT_STATE_NEED_DATA:
1326 ioctx->state = SRPT_STATE_DATA_IN;
1328 case SRPT_STATE_DATA_IN:
1329 case SRPT_STATE_CMD_RSP_SENT:
1330 case SRPT_STATE_MGMT_RSP_SENT:
1331 ioctx->state = SRPT_STATE_DONE;
1336 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1338 if (state == SRPT_STATE_DONE) {
1339 struct srpt_rdma_ch *ch = ioctx->ch;
1341 BUG_ON(ch->sess == NULL);
1343 target_put_sess_cmd(&ioctx->cmd);
1347 pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1351 case SRPT_STATE_NEW:
1352 case SRPT_STATE_DATA_IN:
1353 case SRPT_STATE_MGMT:
1355 * Do nothing - defer abort processing until
1356 * srpt_queue_response() is invoked.
1358 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
1360 case SRPT_STATE_NEED_DATA:
1361 /* DMA_TO_DEVICE (write) - RDMA read error. */
1363 /* XXX(hch): this is a horrible layering violation.. */
1364 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
1365 ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
1366 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
1368 case SRPT_STATE_CMD_RSP_SENT:
1370 * SRP_RSP sending failed or the SRP_RSP send completion has
1371 * not been received in time.
1373 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1374 target_put_sess_cmd(&ioctx->cmd);
1376 case SRPT_STATE_MGMT_RSP_SENT:
1377 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1378 target_put_sess_cmd(&ioctx->cmd);
1381 WARN(1, "Unexpected command state (%d)", state);
1390 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1391 * the data that has been transferred via IB RDMA had to be postponed until the
1392 * check_stop_free() callback. None of this is necessary anymore and needs to
1395 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1397 struct srpt_rdma_ch *ch = cq->cq_context;
1398 struct srpt_send_ioctx *ioctx =
1399 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1401 WARN_ON(ioctx->n_rdma <= 0);
1402 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1404 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1405 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1407 srpt_abort_cmd(ioctx);
1411 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1412 SRPT_STATE_DATA_IN))
1413 target_execute_cmd(&ioctx->cmd);
1415 pr_err("%s[%d]: wrong state = %d\n", __func__,
1416 __LINE__, srpt_get_cmd_state(ioctx));
1419 static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1421 struct srpt_send_ioctx *ioctx =
1422 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1424 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1425 pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
1427 srpt_abort_cmd(ioctx);
1432 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1433 * @ch: RDMA channel through which the request has been received.
1434 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1435 * be built in the buffer ioctx->buf points at and hence this function will
1436 * overwrite the request data.
1437 * @tag: tag of the request for which this response is being generated.
1438 * @status: value for the STATUS field of the SRP_RSP information unit.
1440 * Returns the size in bytes of the SRP_RSP response.
1442 * An SRP_RSP response contains a SCSI status or service response. See also
1443 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1444 * response. See also SPC-2 for more information about sense data.
1446 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1447 struct srpt_send_ioctx *ioctx, u64 tag,
1450 struct srp_rsp *srp_rsp;
1451 const u8 *sense_data;
1452 int sense_data_len, max_sense_len;
1455 * The lowest bit of all SAM-3 status codes is zero (see also
1456 * paragraph 5.3 in SAM-3).
1458 WARN_ON(status & 1);
1460 srp_rsp = ioctx->ioctx.buf;
1463 sense_data = ioctx->sense_data;
1464 sense_data_len = ioctx->cmd.scsi_sense_length;
1465 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1467 memset(srp_rsp, 0, sizeof *srp_rsp);
1468 srp_rsp->opcode = SRP_RSP;
1469 srp_rsp->req_lim_delta =
1470 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1472 srp_rsp->status = status;
1474 if (sense_data_len) {
1475 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1476 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1477 if (sense_data_len > max_sense_len) {
1478 pr_warn("truncated sense data from %d to %d"
1479 " bytes\n", sense_data_len, max_sense_len);
1480 sense_data_len = max_sense_len;
1483 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1484 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1485 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1488 return sizeof(*srp_rsp) + sense_data_len;
1492 * srpt_build_tskmgmt_rsp() - Build a task management response.
1493 * @ch: RDMA channel through which the request has been received.
1494 * @ioctx: I/O context in which the SRP_RSP response will be built.
1495 * @rsp_code: RSP_CODE that will be stored in the response.
1496 * @tag: Tag of the request for which this response is being generated.
1498 * Returns the size in bytes of the SRP_RSP response.
1500 * An SRP_RSP response contains a SCSI status or service response. See also
1501 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1504 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1505 struct srpt_send_ioctx *ioctx,
1506 u8 rsp_code, u64 tag)
1508 struct srp_rsp *srp_rsp;
1513 resp_len = sizeof(*srp_rsp) + resp_data_len;
1515 srp_rsp = ioctx->ioctx.buf;
1517 memset(srp_rsp, 0, sizeof *srp_rsp);
1519 srp_rsp->opcode = SRP_RSP;
1520 srp_rsp->req_lim_delta =
1521 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1524 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1525 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1526 srp_rsp->data[3] = rsp_code;
1531 #define NO_SUCH_LUN ((uint64_t)-1LL)
1534 * SCSI LUN addressing method. See also SAM-2 and the section about
1537 enum scsi_lun_addr_method {
1538 SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
1539 SCSI_LUN_ADDR_METHOD_FLAT = 1,
1540 SCSI_LUN_ADDR_METHOD_LUN = 2,
1541 SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
1545 * srpt_unpack_lun() - Convert from network LUN to linear LUN.
1547 * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
1548 * order (big endian) to a linear LUN. Supports three LUN addressing methods:
1549 * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
1551 static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1553 uint64_t res = NO_SUCH_LUN;
1554 int addressing_method;
1556 if (unlikely(len < 2)) {
1557 pr_err("Illegal LUN length %d, expected 2 bytes or more\n",
1564 if ((*((__be64 *)lun) &
1565 cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1569 if (*((__be16 *)&lun[2]) != 0)
1573 if (*((__be32 *)&lun[2]) != 0)
1582 addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
1583 switch (addressing_method) {
1584 case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
1585 case SCSI_LUN_ADDR_METHOD_FLAT:
1586 case SCSI_LUN_ADDR_METHOD_LUN:
1587 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1590 case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
1592 pr_err("Unimplemented LUN addressing method %u\n",
1601 pr_err("Support for multi-level LUNs has not yet been implemented\n");
1605 static int srpt_check_stop_free(struct se_cmd *cmd)
1607 struct srpt_send_ioctx *ioctx = container_of(cmd,
1608 struct srpt_send_ioctx, cmd);
1610 return target_put_sess_cmd(&ioctx->cmd);
1614 * srpt_handle_cmd() - Process SRP_CMD.
1616 static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1617 struct srpt_recv_ioctx *recv_ioctx,
1618 struct srpt_send_ioctx *send_ioctx)
1621 struct srp_cmd *srp_cmd;
1622 uint64_t unpacked_lun;
1624 enum dma_data_direction dir;
1628 BUG_ON(!send_ioctx);
1630 srp_cmd = recv_ioctx->ioctx.buf;
1631 cmd = &send_ioctx->cmd;
1632 cmd->tag = srp_cmd->tag;
1634 switch (srp_cmd->task_attr) {
1635 case SRP_CMD_SIMPLE_Q:
1636 cmd->sam_task_attr = TCM_SIMPLE_TAG;
1638 case SRP_CMD_ORDERED_Q:
1640 cmd->sam_task_attr = TCM_ORDERED_TAG;
1642 case SRP_CMD_HEAD_OF_Q:
1643 cmd->sam_task_attr = TCM_HEAD_TAG;
1646 cmd->sam_task_attr = TCM_ACA_TAG;
1650 if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
1651 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1653 ret = TCM_INVALID_CDB_FIELD;
1657 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
1658 sizeof(srp_cmd->lun));
1659 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
1660 &send_ioctx->sense_data[0], unpacked_lun, data_len,
1661 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
1663 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1669 transport_send_check_condition_and_sense(cmd, ret, 0);
1674 * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
1675 * @ch: RDMA channel of the task management request.
1676 * @fn: Task management function to perform.
1677 * @req_tag: Tag of the SRP task management request.
1678 * @mgmt_ioctx: I/O context of the task management request.
1680 * Returns zero if the target core will process the task management
1681 * request asynchronously.
1683 * Note: It is assumed that the initiator serializes tag-based task management
1686 static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
1688 struct srpt_device *sdev;
1689 struct srpt_rdma_ch *ch;
1690 struct srpt_send_ioctx *target;
1697 sdev = ch->sport->sdev;
1699 spin_lock_irq(&sdev->spinlock);
1700 for (i = 0; i < ch->rq_size; ++i) {
1701 target = ch->ioctx_ring[i];
1702 if (target->cmd.se_lun == ioctx->cmd.se_lun &&
1703 target->cmd.tag == tag &&
1704 srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
1706 /* now let the target core abort &target->cmd; */
1710 spin_unlock_irq(&sdev->spinlock);
1714 static int srp_tmr_to_tcm(int fn)
1717 case SRP_TSK_ABORT_TASK:
1718 return TMR_ABORT_TASK;
1719 case SRP_TSK_ABORT_TASK_SET:
1720 return TMR_ABORT_TASK_SET;
1721 case SRP_TSK_CLEAR_TASK_SET:
1722 return TMR_CLEAR_TASK_SET;
1723 case SRP_TSK_LUN_RESET:
1724 return TMR_LUN_RESET;
1725 case SRP_TSK_CLEAR_ACA:
1726 return TMR_CLEAR_ACA;
1733 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1735 * Returns 0 if and only if the request will be processed by the target core.
1737 * For more information about SRP_TSK_MGMT information units, see also section
1738 * 6.7 in the SRP r16a document.
1740 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1741 struct srpt_recv_ioctx *recv_ioctx,
1742 struct srpt_send_ioctx *send_ioctx)
1744 struct srp_tsk_mgmt *srp_tsk;
1746 struct se_session *sess = ch->sess;
1747 uint64_t unpacked_lun;
1752 BUG_ON(!send_ioctx);
1754 srp_tsk = recv_ioctx->ioctx.buf;
1755 cmd = &send_ioctx->cmd;
1757 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1758 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1759 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1761 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1762 send_ioctx->cmd.tag = srp_tsk->tag;
1763 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1765 send_ioctx->cmd.se_tmr_req->response =
1766 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
1769 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
1770 sizeof(srp_tsk->lun));
1772 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
1773 rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
1775 send_ioctx->cmd.se_tmr_req->response =
1776 TMR_TASK_DOES_NOT_EXIST;
1779 tag = srp_tsk->task_tag;
1781 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
1782 srp_tsk, tcm_tmr, GFP_KERNEL, tag,
1783 TARGET_SCF_ACK_KREF);
1785 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1790 transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
1794 * srpt_handle_new_iu() - Process a newly received information unit.
1795 * @ch: RDMA channel through which the information unit has been received.
1796 * @ioctx: SRPT I/O context associated with the information unit.
1798 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1799 struct srpt_recv_ioctx *recv_ioctx,
1800 struct srpt_send_ioctx *send_ioctx)
1802 struct srp_cmd *srp_cmd;
1803 enum rdma_ch_state ch_state;
1806 BUG_ON(!recv_ioctx);
1808 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1809 recv_ioctx->ioctx.dma, srp_max_req_size,
1812 ch_state = srpt_get_ch_state(ch);
1813 if (unlikely(ch_state == CH_CONNECTING)) {
1814 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1818 if (unlikely(ch_state != CH_LIVE))
1821 srp_cmd = recv_ioctx->ioctx.buf;
1822 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1824 send_ioctx = srpt_get_send_ioctx(ch);
1825 if (unlikely(!send_ioctx)) {
1826 list_add_tail(&recv_ioctx->wait_list,
1827 &ch->cmd_wait_list);
1832 switch (srp_cmd->opcode) {
1834 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1837 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1840 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1843 pr_debug("received SRP_CRED_RSP\n");
1846 pr_debug("received SRP_AER_RSP\n");
1849 pr_err("Received SRP_RSP\n");
1852 pr_err("received IU with unknown opcode 0x%x\n",
1857 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1862 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1864 struct srpt_rdma_ch *ch = cq->cq_context;
1865 struct srpt_recv_ioctx *ioctx =
1866 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1868 if (wc->status == IB_WC_SUCCESS) {
1871 req_lim = atomic_dec_return(&ch->req_lim);
1872 if (unlikely(req_lim < 0))
1873 pr_err("req_lim = %d < 0\n", req_lim);
1874 srpt_handle_new_iu(ch, ioctx, NULL);
1876 pr_info("receiving failed for ioctx %p with status %d\n",
1882 * Note: Although this has not yet been observed during tests, at least in
1883 * theory it is possible that the srpt_get_send_ioctx() call invoked by
1884 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1885 * value in each response is set to one, and it is possible that this response
1886 * makes the initiator send a new request before the send completion for that
1887 * response has been processed. This could e.g. happen if the call to
1888 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1889 * if IB retransmission causes generation of the send completion to be
1890 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1891 * are queued on cmd_wait_list. The code below processes these delayed
1892 * requests one at a time.
1894 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1896 struct srpt_rdma_ch *ch = cq->cq_context;
1897 struct srpt_send_ioctx *ioctx =
1898 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1899 enum srpt_command_state state;
1901 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1903 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1904 state != SRPT_STATE_MGMT_RSP_SENT);
1906 atomic_inc(&ch->sq_wr_avail);
1908 if (wc->status != IB_WC_SUCCESS) {
1909 pr_info("sending response for ioctx 0x%p failed"
1910 " with status %d\n", ioctx, wc->status);
1912 atomic_dec(&ch->req_lim);
1913 srpt_abort_cmd(ioctx);
1917 if (state != SRPT_STATE_DONE) {
1918 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1919 transport_generic_free_cmd(&ioctx->cmd, 0);
1921 pr_err("IB completion has been received too late for"
1922 " wr_id = %u.\n", ioctx->ioctx.index);
1926 while (!list_empty(&ch->cmd_wait_list) &&
1927 srpt_get_ch_state(ch) == CH_LIVE &&
1928 (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
1929 struct srpt_recv_ioctx *recv_ioctx;
1931 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
1932 struct srpt_recv_ioctx,
1934 list_del(&recv_ioctx->wait_list);
1935 srpt_handle_new_iu(ch, recv_ioctx, ioctx);
1940 * srpt_create_ch_ib() - Create receive and send completion queues.
1942 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1944 struct ib_qp_init_attr *qp_init;
1945 struct srpt_port *sport = ch->sport;
1946 struct srpt_device *sdev = sport->sdev;
1947 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
1950 WARN_ON(ch->rq_size < 1);
1953 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
1958 ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
1959 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1960 if (IS_ERR(ch->cq)) {
1961 ret = PTR_ERR(ch->cq);
1962 pr_err("failed to create CQ cqe= %d ret= %d\n",
1963 ch->rq_size + srp_sq_size, ret);
1967 qp_init->qp_context = (void *)ch;
1968 qp_init->event_handler
1969 = (void(*)(struct ib_event *, void*))srpt_qp_event;
1970 qp_init->send_cq = ch->cq;
1971 qp_init->recv_cq = ch->cq;
1972 qp_init->srq = sdev->srq;
1973 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1974 qp_init->qp_type = IB_QPT_RC;
1975 qp_init->cap.max_send_wr = srp_sq_size;
1976 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1978 ch->qp = ib_create_qp(sdev->pd, qp_init);
1979 if (IS_ERR(ch->qp)) {
1980 ret = PTR_ERR(ch->qp);
1981 if (ret == -ENOMEM) {
1983 if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
1984 ib_destroy_cq(ch->cq);
1988 pr_err("failed to create_qp ret= %d\n", ret);
1989 goto err_destroy_cq;
1992 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1994 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1995 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1996 qp_init->cap.max_send_wr, ch->cm_id);
1998 ret = srpt_init_ch_qp(ch, ch->qp);
2000 goto err_destroy_qp;
2007 ib_destroy_qp(ch->qp);
2013 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
2015 ib_destroy_qp(ch->qp);
2020 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
2022 * Reset the QP and make sure all resources associated with the channel will
2023 * be deallocated at an appropriate time.
2025 * Note: The caller must hold ch->sport->sdev->spinlock.
2027 static void __srpt_close_ch(struct srpt_rdma_ch *ch)
2029 enum rdma_ch_state prev_state;
2030 unsigned long flags;
2032 spin_lock_irqsave(&ch->spinlock, flags);
2033 prev_state = ch->state;
2034 switch (prev_state) {
2037 ch->state = CH_DISCONNECTING;
2042 spin_unlock_irqrestore(&ch->spinlock, flags);
2044 switch (prev_state) {
2046 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
2050 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
2051 pr_err("sending CM DREQ failed.\n");
2053 case CH_DISCONNECTING:
2062 * srpt_close_ch() - Close an RDMA channel.
2064 static void srpt_close_ch(struct srpt_rdma_ch *ch)
2066 struct srpt_device *sdev;
2068 sdev = ch->sport->sdev;
2069 spin_lock_irq(&sdev->spinlock);
2070 __srpt_close_ch(ch);
2071 spin_unlock_irq(&sdev->spinlock);
2075 * srpt_shutdown_session() - Whether or not a session may be shut down.
2077 static int srpt_shutdown_session(struct se_session *se_sess)
2079 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
2080 unsigned long flags;
2082 spin_lock_irqsave(&ch->spinlock, flags);
2083 if (ch->in_shutdown) {
2084 spin_unlock_irqrestore(&ch->spinlock, flags);
2088 ch->in_shutdown = true;
2089 target_sess_cmd_list_set_waiting(se_sess);
2090 spin_unlock_irqrestore(&ch->spinlock, flags);
2096 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
2097 * @cm_id: Pointer to the CM ID of the channel to be drained.
2099 * Note: Must be called from inside srpt_cm_handler to avoid a race between
2100 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
2101 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
2102 * waits until all target sessions for the associated IB device have been
2103 * unregistered and target session registration involves a call to
2104 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
2105 * this function has finished).
2107 static void srpt_drain_channel(struct ib_cm_id *cm_id)
2109 struct srpt_device *sdev;
2110 struct srpt_rdma_ch *ch;
2112 bool do_reset = false;
2114 WARN_ON_ONCE(irqs_disabled());
2116 sdev = cm_id->context;
2118 spin_lock_irq(&sdev->spinlock);
2119 list_for_each_entry(ch, &sdev->rch_list, list) {
2120 if (ch->cm_id == cm_id) {
2121 do_reset = srpt_test_and_set_ch_state(ch,
2122 CH_CONNECTING, CH_DRAINING) ||
2123 srpt_test_and_set_ch_state(ch,
2124 CH_LIVE, CH_DRAINING) ||
2125 srpt_test_and_set_ch_state(ch,
2126 CH_DISCONNECTING, CH_DRAINING);
2130 spin_unlock_irq(&sdev->spinlock);
2134 srpt_shutdown_session(ch->sess);
2136 ret = srpt_ch_qp_err(ch);
2138 pr_err("Setting queue pair in error state"
2139 " failed: %d\n", ret);
2144 * srpt_find_channel() - Look up an RDMA channel.
2145 * @cm_id: Pointer to the CM ID of the channel to be looked up.
2147 * Return NULL if no matching RDMA channel has been found.
2149 static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
2150 struct ib_cm_id *cm_id)
2152 struct srpt_rdma_ch *ch;
2155 WARN_ON_ONCE(irqs_disabled());
2159 spin_lock_irq(&sdev->spinlock);
2160 list_for_each_entry(ch, &sdev->rch_list, list) {
2161 if (ch->cm_id == cm_id) {
2166 spin_unlock_irq(&sdev->spinlock);
2168 return found ? ch : NULL;
2172 * srpt_release_channel() - Release channel resources.
2174 * Schedules the actual release because:
2175 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
2176 * trigger a deadlock.
2177 * - It is not safe to call TCM transport_* functions from interrupt context.
2179 static void srpt_release_channel(struct srpt_rdma_ch *ch)
2181 schedule_work(&ch->release_work);
2184 static void srpt_release_channel_work(struct work_struct *w)
2186 struct srpt_rdma_ch *ch;
2187 struct srpt_device *sdev;
2188 struct se_session *se_sess;
2190 ch = container_of(w, struct srpt_rdma_ch, release_work);
2191 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2194 sdev = ch->sport->sdev;
2200 target_wait_for_sess_cmds(se_sess);
2202 transport_deregister_session_configfs(se_sess);
2203 transport_deregister_session(se_sess);
2206 ib_destroy_cm_id(ch->cm_id);
2208 srpt_destroy_ch_ib(ch);
2210 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2211 ch->sport->sdev, ch->rq_size,
2212 ch->rsp_size, DMA_TO_DEVICE);
2214 spin_lock_irq(&sdev->spinlock);
2215 list_del(&ch->list);
2216 spin_unlock_irq(&sdev->spinlock);
2218 if (ch->release_done)
2219 complete(ch->release_done);
2221 wake_up(&sdev->ch_releaseQ);
2227 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2229 * Ownership of the cm_id is transferred to the target session if this
2230 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2232 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2233 struct ib_cm_req_event_param *param,
2236 struct srpt_device *sdev = cm_id->context;
2237 struct srpt_port *sport = &sdev->port[param->port - 1];
2238 struct srp_login_req *req;
2239 struct srp_login_rsp *rsp;
2240 struct srp_login_rej *rej;
2241 struct ib_cm_rep_param *rep_param;
2242 struct srpt_rdma_ch *ch, *tmp_ch;
2243 struct se_node_acl *se_acl;
2248 WARN_ON_ONCE(irqs_disabled());
2250 if (WARN_ON(!sdev || !private_data))
2253 req = (struct srp_login_req *)private_data;
2255 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2257 pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2258 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2259 " (guid=0x%llx:0x%llx)\n",
2260 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2261 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2262 be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2263 be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2266 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2267 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2269 rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
2270 rej = kzalloc(sizeof *rej, GFP_KERNEL);
2271 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
2273 if (!rsp || !rej || !rep_param) {
2278 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2279 rej->reason = cpu_to_be32(
2280 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2282 pr_err("rejected SRP_LOGIN_REQ because its"
2283 " length (%d bytes) is out of range (%d .. %d)\n",
2284 it_iu_len, 64, srp_max_req_size);
2288 if (!sport->enabled) {
2289 rej->reason = cpu_to_be32(
2290 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2292 pr_err("rejected SRP_LOGIN_REQ because the target port"
2293 " has not yet been enabled\n");
2297 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2298 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2300 spin_lock_irq(&sdev->spinlock);
2302 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2303 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2304 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2305 && param->port == ch->sport->port
2306 && param->listen_id == ch->sport->sdev->cm_id
2308 enum rdma_ch_state ch_state;
2310 ch_state = srpt_get_ch_state(ch);
2311 if (ch_state != CH_CONNECTING
2312 && ch_state != CH_LIVE)
2315 /* found an existing channel */
2316 pr_debug("Found existing channel %s"
2317 " cm_id= %p state= %d\n",
2318 ch->sess_name, ch->cm_id, ch_state);
2320 __srpt_close_ch(ch);
2323 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2327 spin_unlock_irq(&sdev->spinlock);
2330 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2332 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2333 || *(__be64 *)(req->target_port_id + 8) !=
2334 cpu_to_be64(srpt_service_guid)) {
2335 rej->reason = cpu_to_be32(
2336 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2338 pr_err("rejected SRP_LOGIN_REQ because it"
2339 " has an invalid target port identifier.\n");
2343 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2345 rej->reason = cpu_to_be32(
2346 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2347 pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
2352 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2353 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2354 memcpy(ch->t_port_id, req->target_port_id, 16);
2355 ch->sport = &sdev->port[param->port - 1];
2358 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2359 * for the SRP protocol to the command queue size.
2361 ch->rq_size = SRPT_RQ_SIZE;
2362 spin_lock_init(&ch->spinlock);
2363 ch->state = CH_CONNECTING;
2364 INIT_LIST_HEAD(&ch->cmd_wait_list);
2365 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2367 ch->ioctx_ring = (struct srpt_send_ioctx **)
2368 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2369 sizeof(*ch->ioctx_ring[0]),
2370 ch->rsp_size, DMA_TO_DEVICE);
2371 if (!ch->ioctx_ring)
2374 INIT_LIST_HEAD(&ch->free_list);
2375 for (i = 0; i < ch->rq_size; i++) {
2376 ch->ioctx_ring[i]->ch = ch;
2377 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2380 ret = srpt_create_ch_ib(ch);
2382 rej->reason = cpu_to_be32(
2383 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2384 pr_err("rejected SRP_LOGIN_REQ because creating"
2385 " a new RDMA channel failed.\n");
2389 ret = srpt_ch_qp_rtr(ch, ch->qp);
2391 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2392 pr_err("rejected SRP_LOGIN_REQ because enabling"
2393 " RTR failed (error code = %d)\n", ret);
2398 * Use the initator port identifier as the session name, when
2399 * checking against se_node_acl->initiatorname[] this can be
2400 * with or without preceeding '0x'.
2402 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2403 be64_to_cpu(*(__be64 *)ch->i_port_id),
2404 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2406 pr_debug("registering session %s\n", ch->sess_name);
2407 p = &ch->sess_name[0];
2409 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2410 if (IS_ERR(ch->sess)) {
2411 rej->reason = cpu_to_be32(
2412 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2413 pr_debug("Failed to create session\n");
2418 se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
2420 pr_info("Rejected login because no ACL has been"
2421 " configured yet for initiator %s.\n", ch->sess_name);
2423 * XXX: Hack to retry of ch->i_port_id without leading '0x'
2425 if (p == &ch->sess_name[0]) {
2429 rej->reason = cpu_to_be32(
2430 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2431 transport_free_session(ch->sess);
2434 ch->sess->se_node_acl = se_acl;
2436 transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
2438 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2439 ch->sess_name, ch->cm_id);
2441 /* create srp_login_response */
2442 rsp->opcode = SRP_LOGIN_RSP;
2443 rsp->tag = req->tag;
2444 rsp->max_it_iu_len = req->req_it_iu_len;
2445 rsp->max_ti_iu_len = req->req_it_iu_len;
2446 ch->max_ti_iu_len = it_iu_len;
2447 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2448 | SRP_BUF_FORMAT_INDIRECT);
2449 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2450 atomic_set(&ch->req_lim, ch->rq_size);
2451 atomic_set(&ch->req_lim_delta, 0);
2453 /* create cm reply */
2454 rep_param->qp_num = ch->qp->qp_num;
2455 rep_param->private_data = (void *)rsp;
2456 rep_param->private_data_len = sizeof *rsp;
2457 rep_param->rnr_retry_count = 7;
2458 rep_param->flow_control = 1;
2459 rep_param->failover_accepted = 0;
2461 rep_param->responder_resources = 4;
2462 rep_param->initiator_depth = 4;
2464 ret = ib_send_cm_rep(cm_id, rep_param);
2466 pr_err("sending SRP_LOGIN_REQ response failed"
2467 " (error code = %d)\n", ret);
2468 goto release_channel;
2471 spin_lock_irq(&sdev->spinlock);
2472 list_add_tail(&ch->list, &sdev->rch_list);
2473 spin_unlock_irq(&sdev->spinlock);
2478 srpt_set_ch_state(ch, CH_RELEASING);
2479 transport_deregister_session_configfs(ch->sess);
2480 transport_deregister_session(ch->sess);
2484 srpt_destroy_ch_ib(ch);
2487 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2488 ch->sport->sdev, ch->rq_size,
2489 ch->rsp_size, DMA_TO_DEVICE);
2494 rej->opcode = SRP_LOGIN_REJ;
2495 rej->tag = req->tag;
2496 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2497 | SRP_BUF_FORMAT_INDIRECT);
2499 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2500 (void *)rej, sizeof *rej);
2510 static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
2512 pr_info("Received IB REJ for cm_id %p.\n", cm_id);
2513 srpt_drain_channel(cm_id);
2517 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2519 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2520 * and that the recipient may begin transmitting (RTU = ready to use).
2522 static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
2524 struct srpt_rdma_ch *ch;
2527 ch = srpt_find_channel(cm_id->context, cm_id);
2530 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
2531 struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
2533 ret = srpt_ch_qp_rts(ch, ch->qp);
2535 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2537 list_del(&ioctx->wait_list);
2538 srpt_handle_new_iu(ch, ioctx, NULL);
2545 static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
2547 pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
2548 srpt_drain_channel(cm_id);
2551 static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
2553 pr_info("Received IB REP error for cm_id %p.\n", cm_id);
2554 srpt_drain_channel(cm_id);
2558 * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2560 static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
2562 struct srpt_rdma_ch *ch;
2563 unsigned long flags;
2564 bool send_drep = false;
2566 ch = srpt_find_channel(cm_id->context, cm_id);
2569 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
2571 spin_lock_irqsave(&ch->spinlock, flags);
2572 switch (ch->state) {
2576 ch->state = CH_DISCONNECTING;
2578 case CH_DISCONNECTING:
2581 WARN(true, "unexpected channel state %d\n", ch->state);
2584 spin_unlock_irqrestore(&ch->spinlock, flags);
2587 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2588 pr_err("Sending IB DREP failed.\n");
2589 pr_info("Received DREQ and sent DREP for session %s.\n",
2595 * srpt_cm_drep_recv() - Process reception of a DREP message.
2597 static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
2599 pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
2600 srpt_drain_channel(cm_id);
2604 * srpt_cm_handler() - IB connection manager callback function.
2606 * A non-zero return value will cause the caller destroy the CM ID.
2608 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2609 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2610 * a non-zero value in any other case will trigger a race with the
2611 * ib_destroy_cm_id() call in srpt_release_channel().
2613 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2618 switch (event->event) {
2619 case IB_CM_REQ_RECEIVED:
2620 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2621 event->private_data);
2623 case IB_CM_REJ_RECEIVED:
2624 srpt_cm_rej_recv(cm_id);
2626 case IB_CM_RTU_RECEIVED:
2627 case IB_CM_USER_ESTABLISHED:
2628 srpt_cm_rtu_recv(cm_id);
2630 case IB_CM_DREQ_RECEIVED:
2631 srpt_cm_dreq_recv(cm_id);
2633 case IB_CM_DREP_RECEIVED:
2634 srpt_cm_drep_recv(cm_id);
2636 case IB_CM_TIMEWAIT_EXIT:
2637 srpt_cm_timewait_exit(cm_id);
2639 case IB_CM_REP_ERROR:
2640 srpt_cm_rep_error(cm_id);
2642 case IB_CM_DREQ_ERROR:
2643 pr_info("Received IB DREQ ERROR event.\n");
2645 case IB_CM_MRA_RECEIVED:
2646 pr_info("Received IB MRA event\n");
2649 pr_err("received unrecognized IB CM event %d\n", event->event);
2657 * srpt_perform_rdmas() - Perform IB RDMA.
2659 * Returns zero upon success or a negative number upon failure.
2661 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2662 struct srpt_send_ioctx *ioctx)
2664 struct ib_send_wr *bad_wr;
2665 int sq_wr_avail, ret, i;
2666 enum dma_data_direction dir;
2667 const int n_rdma = ioctx->n_rdma;
2669 dir = ioctx->cmd.data_direction;
2670 if (dir == DMA_TO_DEVICE) {
2673 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2674 if (sq_wr_avail < 0) {
2675 pr_warn("IB send queue full (needed %d)\n",
2681 for (i = 0; i < n_rdma; i++) {
2682 struct ib_send_wr *wr = &ioctx->rdma_wrs[i].wr;
2684 wr->opcode = (dir == DMA_FROM_DEVICE) ?
2685 IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
2687 if (i == n_rdma - 1) {
2688 /* only get completion event for the last rdma read */
2689 if (dir == DMA_TO_DEVICE) {
2690 wr->send_flags = IB_SEND_SIGNALED;
2691 ioctx->rdma_cqe.done = srpt_rdma_read_done;
2693 ioctx->rdma_cqe.done = srpt_rdma_write_done;
2695 wr->wr_cqe = &ioctx->rdma_cqe;
2699 wr->next = &ioctx->rdma_wrs[i + 1].wr;
2703 ret = ib_post_send(ch->qp, &ioctx->rdma_wrs->wr, &bad_wr);
2705 pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
2706 __func__, __LINE__, ret, i, n_rdma);
2708 if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2709 atomic_add(n_rdma, &ch->sq_wr_avail);
2714 * srpt_xfer_data() - Start data transfer from initiator to target.
2716 static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2717 struct srpt_send_ioctx *ioctx)
2721 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2723 pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
2727 ret = srpt_perform_rdmas(ch, ioctx);
2729 if (ret == -EAGAIN || ret == -ENOMEM)
2730 pr_info("%s[%d] queue full -- ret=%d\n",
2731 __func__, __LINE__, ret);
2733 pr_err("%s[%d] fatal error -- ret=%d\n",
2734 __func__, __LINE__, ret);
2741 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2745 static int srpt_write_pending_status(struct se_cmd *se_cmd)
2747 struct srpt_send_ioctx *ioctx;
2749 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2750 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2754 * srpt_write_pending() - Start data transfer from initiator to target (write).
2756 static int srpt_write_pending(struct se_cmd *se_cmd)
2758 struct srpt_rdma_ch *ch;
2759 struct srpt_send_ioctx *ioctx;
2760 enum srpt_command_state new_state;
2761 enum rdma_ch_state ch_state;
2764 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2766 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2767 WARN_ON(new_state == SRPT_STATE_DONE);
2772 ch_state = srpt_get_ch_state(ch);
2775 WARN(true, "unexpected channel state %d\n", ch_state);
2780 case CH_DISCONNECTING:
2783 pr_debug("cmd with tag %lld: channel disconnecting\n",
2785 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2789 ret = srpt_xfer_data(ch, ioctx);
2795 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2797 switch (tcm_mgmt_status) {
2798 case TMR_FUNCTION_COMPLETE:
2799 return SRP_TSK_MGMT_SUCCESS;
2800 case TMR_FUNCTION_REJECTED:
2801 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2803 return SRP_TSK_MGMT_FAILED;
2807 * srpt_queue_response() - Transmits the response to a SCSI command.
2809 * Callback function called by the TCM core. Must not block since it can be
2810 * invoked on the context of the IB completion handler.
2812 static void srpt_queue_response(struct se_cmd *cmd)
2814 struct srpt_rdma_ch *ch;
2815 struct srpt_send_ioctx *ioctx;
2816 enum srpt_command_state state;
2817 unsigned long flags;
2819 enum dma_data_direction dir;
2823 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2827 spin_lock_irqsave(&ioctx->spinlock, flags);
2828 state = ioctx->state;
2830 case SRPT_STATE_NEW:
2831 case SRPT_STATE_DATA_IN:
2832 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2834 case SRPT_STATE_MGMT:
2835 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2838 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2839 ch, ioctx->ioctx.index, ioctx->state);
2842 spin_unlock_irqrestore(&ioctx->spinlock, flags);
2844 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
2845 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
2846 atomic_inc(&ch->req_lim_delta);
2847 srpt_abort_cmd(ioctx);
2851 dir = ioctx->cmd.data_direction;
2853 /* For read commands, transfer the data to the initiator. */
2854 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
2855 !ioctx->queue_status_only) {
2856 ret = srpt_xfer_data(ch, ioctx);
2858 pr_err("xfer_data failed for tag %llu\n",
2864 if (state != SRPT_STATE_MGMT)
2865 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2869 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2870 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2873 ret = srpt_post_send(ch, ioctx, resp_len);
2875 pr_err("sending cmd response failed for tag %llu\n",
2877 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2878 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2879 target_put_sess_cmd(&ioctx->cmd);
2883 static int srpt_queue_data_in(struct se_cmd *cmd)
2885 srpt_queue_response(cmd);
2889 static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2891 srpt_queue_response(cmd);
2894 static void srpt_aborted_task(struct se_cmd *cmd)
2896 struct srpt_send_ioctx *ioctx = container_of(cmd,
2897 struct srpt_send_ioctx, cmd);
2899 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
2902 static int srpt_queue_status(struct se_cmd *cmd)
2904 struct srpt_send_ioctx *ioctx;
2906 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2907 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2908 if (cmd->se_cmd_flags &
2909 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2910 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2911 ioctx->queue_status_only = true;
2912 srpt_queue_response(cmd);
2916 static void srpt_refresh_port_work(struct work_struct *work)
2918 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2920 srpt_refresh_port(sport);
2923 static int srpt_ch_list_empty(struct srpt_device *sdev)
2927 spin_lock_irq(&sdev->spinlock);
2928 res = list_empty(&sdev->rch_list);
2929 spin_unlock_irq(&sdev->spinlock);
2935 * srpt_release_sdev() - Free the channel resources associated with a target.
2937 static int srpt_release_sdev(struct srpt_device *sdev)
2939 struct srpt_rdma_ch *ch, *tmp_ch;
2942 WARN_ON_ONCE(irqs_disabled());
2946 spin_lock_irq(&sdev->spinlock);
2947 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
2948 __srpt_close_ch(ch);
2949 spin_unlock_irq(&sdev->spinlock);
2951 res = wait_event_interruptible(sdev->ch_releaseQ,
2952 srpt_ch_list_empty(sdev));
2954 pr_err("%s: interrupted.\n", __func__);
2959 static struct srpt_port *__srpt_lookup_port(const char *name)
2961 struct ib_device *dev;
2962 struct srpt_device *sdev;
2963 struct srpt_port *sport;
2966 list_for_each_entry(sdev, &srpt_dev_list, list) {
2971 for (i = 0; i < dev->phys_port_cnt; i++) {
2972 sport = &sdev->port[i];
2974 if (!strcmp(sport->port_guid, name))
2982 static struct srpt_port *srpt_lookup_port(const char *name)
2984 struct srpt_port *sport;
2986 spin_lock(&srpt_dev_lock);
2987 sport = __srpt_lookup_port(name);
2988 spin_unlock(&srpt_dev_lock);
2994 * srpt_add_one() - Infiniband device addition callback function.
2996 static void srpt_add_one(struct ib_device *device)
2998 struct srpt_device *sdev;
2999 struct srpt_port *sport;
3000 struct ib_srq_init_attr srq_attr;
3003 pr_debug("device = %p, device->dma_ops = %p\n", device,
3006 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
3010 sdev->device = device;
3011 INIT_LIST_HEAD(&sdev->rch_list);
3012 init_waitqueue_head(&sdev->ch_releaseQ);
3013 spin_lock_init(&sdev->spinlock);
3015 sdev->pd = ib_alloc_pd(device);
3016 if (IS_ERR(sdev->pd))
3019 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
3021 srq_attr.event_handler = srpt_srq_event;
3022 srq_attr.srq_context = (void *)sdev;
3023 srq_attr.attr.max_wr = sdev->srq_size;
3024 srq_attr.attr.max_sge = 1;
3025 srq_attr.attr.srq_limit = 0;
3026 srq_attr.srq_type = IB_SRQT_BASIC;
3028 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
3029 if (IS_ERR(sdev->srq))
3032 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
3033 __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
3036 if (!srpt_service_guid)
3037 srpt_service_guid = be64_to_cpu(device->node_guid);
3039 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3040 if (IS_ERR(sdev->cm_id))
3043 /* print out target login information */
3044 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3045 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3046 srpt_service_guid, srpt_service_guid);
3049 * We do not have a consistent service_id (ie. also id_ext of target_id)
3050 * to identify this target. We currently use the guid of the first HCA
3051 * in the system as service_id; therefore, the target_id will change
3052 * if this HCA is gone bad and replaced by different HCA
3054 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
3057 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3058 srpt_event_handler);
3059 if (ib_register_event_handler(&sdev->event_handler))
3062 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3063 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3064 sizeof(*sdev->ioctx_ring[0]),
3065 srp_max_req_size, DMA_FROM_DEVICE);
3066 if (!sdev->ioctx_ring)
3069 for (i = 0; i < sdev->srq_size; ++i)
3070 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
3072 WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
3074 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3075 sport = &sdev->port[i - 1];
3078 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3079 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3080 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3081 INIT_WORK(&sport->work, srpt_refresh_port_work);
3083 if (srpt_refresh_port(sport)) {
3084 pr_err("MAD registration failed for %s-%d.\n",
3085 srpt_sdev_name(sdev), i);
3088 snprintf(sport->port_guid, sizeof(sport->port_guid),
3090 be64_to_cpu(sport->gid.global.subnet_prefix),
3091 be64_to_cpu(sport->gid.global.interface_id));
3094 spin_lock(&srpt_dev_lock);
3095 list_add_tail(&sdev->list, &srpt_dev_list);
3096 spin_unlock(&srpt_dev_lock);
3099 ib_set_client_data(device, &srpt_client, sdev);
3100 pr_debug("added %s.\n", device->name);
3104 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3105 sdev->srq_size, srp_max_req_size,
3108 ib_unregister_event_handler(&sdev->event_handler);
3110 ib_destroy_cm_id(sdev->cm_id);
3112 ib_destroy_srq(sdev->srq);
3114 ib_dealloc_pd(sdev->pd);
3119 pr_info("%s(%s) failed.\n", __func__, device->name);
3124 * srpt_remove_one() - InfiniBand device removal callback function.
3126 static void srpt_remove_one(struct ib_device *device, void *client_data)
3128 struct srpt_device *sdev = client_data;
3132 pr_info("%s(%s): nothing to do.\n", __func__, device->name);
3136 srpt_unregister_mad_agent(sdev);
3138 ib_unregister_event_handler(&sdev->event_handler);
3140 /* Cancel any work queued by the just unregistered IB event handler. */
3141 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3142 cancel_work_sync(&sdev->port[i].work);
3144 ib_destroy_cm_id(sdev->cm_id);
3147 * Unregistering a target must happen after destroying sdev->cm_id
3148 * such that no new SRP_LOGIN_REQ information units can arrive while
3149 * destroying the target.
3151 spin_lock(&srpt_dev_lock);
3152 list_del(&sdev->list);
3153 spin_unlock(&srpt_dev_lock);
3154 srpt_release_sdev(sdev);
3156 ib_destroy_srq(sdev->srq);
3157 ib_dealloc_pd(sdev->pd);
3159 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3160 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3161 sdev->ioctx_ring = NULL;
3165 static struct ib_client srpt_client = {
3167 .add = srpt_add_one,
3168 .remove = srpt_remove_one
3171 static int srpt_check_true(struct se_portal_group *se_tpg)
3176 static int srpt_check_false(struct se_portal_group *se_tpg)
3181 static char *srpt_get_fabric_name(void)
3186 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3188 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3190 return sport->port_guid;
3193 static u16 srpt_get_tag(struct se_portal_group *tpg)
3198 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3203 static void srpt_release_cmd(struct se_cmd *se_cmd)
3205 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3206 struct srpt_send_ioctx, cmd);
3207 struct srpt_rdma_ch *ch = ioctx->ch;
3208 unsigned long flags;
3210 WARN_ON(ioctx->state != SRPT_STATE_DONE);
3211 WARN_ON(ioctx->mapped_sg_count != 0);
3213 if (ioctx->n_rbuf > 1) {
3214 kfree(ioctx->rbufs);
3215 ioctx->rbufs = NULL;
3219 spin_lock_irqsave(&ch->spinlock, flags);
3220 list_add(&ioctx->free_list, &ch->free_list);
3221 spin_unlock_irqrestore(&ch->spinlock, flags);
3225 * srpt_close_session() - Forcibly close a session.
3227 * Callback function invoked by the TCM core to clean up sessions associated
3228 * with a node ACL when the user invokes
3229 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3231 static void srpt_close_session(struct se_session *se_sess)
3233 DECLARE_COMPLETION_ONSTACK(release_done);
3234 struct srpt_rdma_ch *ch;
3235 struct srpt_device *sdev;
3238 ch = se_sess->fabric_sess_ptr;
3239 WARN_ON(ch->sess != se_sess);
3241 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
3243 sdev = ch->sport->sdev;
3244 spin_lock_irq(&sdev->spinlock);
3245 BUG_ON(ch->release_done);
3246 ch->release_done = &release_done;
3247 __srpt_close_ch(ch);
3248 spin_unlock_irq(&sdev->spinlock);
3250 res = wait_for_completion_timeout(&release_done, 60 * HZ);
3255 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
3257 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3258 * This object represents an arbitrary integer used to uniquely identify a
3259 * particular attached remote initiator port to a particular SCSI target port
3260 * within a particular SCSI target device within a particular SCSI instance.
3262 static u32 srpt_sess_get_index(struct se_session *se_sess)
3267 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3271 /* Note: only used from inside debug printk's by the TCM core. */
3272 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3274 struct srpt_send_ioctx *ioctx;
3276 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3277 return srpt_get_cmd_state(ioctx);
3281 * srpt_parse_i_port_id() - Parse an initiator port ID.
3282 * @name: ASCII representation of a 128-bit initiator port ID.
3283 * @i_port_id: Binary 128-bit port ID.
3285 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3288 unsigned len, count, leading_zero_bytes;
3292 if (strncasecmp(p, "0x", 2) == 0)
3298 count = min(len / 2, 16U);
3299 leading_zero_bytes = 16 - count;
3300 memset(i_port_id, 0, leading_zero_bytes);
3301 rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
3303 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
3310 * configfs callback function invoked for
3311 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3313 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3317 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3318 pr_err("invalid initiator port ID %s\n", name);
3324 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3327 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3328 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3330 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3333 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3334 const char *page, size_t count)
3336 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3337 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3341 ret = kstrtoul(page, 0, &val);
3343 pr_err("kstrtoul() failed with ret: %d\n", ret);
3346 if (val > MAX_SRPT_RDMA_SIZE) {
3347 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3348 MAX_SRPT_RDMA_SIZE);
3351 if (val < DEFAULT_MAX_RDMA_SIZE) {
3352 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3353 val, DEFAULT_MAX_RDMA_SIZE);
3356 sport->port_attrib.srp_max_rdma_size = val;
3361 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3364 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3365 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3367 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3370 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3371 const char *page, size_t count)
3373 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3374 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3378 ret = kstrtoul(page, 0, &val);
3380 pr_err("kstrtoul() failed with ret: %d\n", ret);
3383 if (val > MAX_SRPT_RSP_SIZE) {
3384 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3388 if (val < MIN_MAX_RSP_SIZE) {
3389 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3393 sport->port_attrib.srp_max_rsp_size = val;
3398 static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3401 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3402 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3404 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3407 static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3408 const char *page, size_t count)
3410 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3411 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3415 ret = kstrtoul(page, 0, &val);
3417 pr_err("kstrtoul() failed with ret: %d\n", ret);
3420 if (val > MAX_SRPT_SRQ_SIZE) {
3421 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3425 if (val < MIN_SRPT_SRQ_SIZE) {
3426 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3430 sport->port_attrib.srp_sq_size = val;
3435 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
3436 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
3437 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
3439 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3440 &srpt_tpg_attrib_attr_srp_max_rdma_size,
3441 &srpt_tpg_attrib_attr_srp_max_rsp_size,
3442 &srpt_tpg_attrib_attr_srp_sq_size,
3446 static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
3448 struct se_portal_group *se_tpg = to_tpg(item);
3449 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3451 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3454 static ssize_t srpt_tpg_enable_store(struct config_item *item,
3455 const char *page, size_t count)
3457 struct se_portal_group *se_tpg = to_tpg(item);
3458 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3462 ret = kstrtoul(page, 0, &tmp);
3464 pr_err("Unable to extract srpt_tpg_store_enable\n");
3468 if ((tmp != 0) && (tmp != 1)) {
3469 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3473 sport->enabled = true;
3475 sport->enabled = false;
3480 CONFIGFS_ATTR(srpt_tpg_, enable);
3482 static struct configfs_attribute *srpt_tpg_attrs[] = {
3483 &srpt_tpg_attr_enable,
3488 * configfs callback invoked for
3489 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3491 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3492 struct config_group *group,
3495 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3498 /* Initialize sport->port_wwn and sport->port_tpg_1 */
3499 res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP);
3501 return ERR_PTR(res);
3503 return &sport->port_tpg_1;
3507 * configfs callback invoked for
3508 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3510 static void srpt_drop_tpg(struct se_portal_group *tpg)
3512 struct srpt_port *sport = container_of(tpg,
3513 struct srpt_port, port_tpg_1);
3515 sport->enabled = false;
3516 core_tpg_deregister(&sport->port_tpg_1);
3520 * configfs callback invoked for
3521 * mkdir /sys/kernel/config/target/$driver/$port
3523 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3524 struct config_group *group,
3527 struct srpt_port *sport;
3530 sport = srpt_lookup_port(name);
3531 pr_debug("make_tport(%s)\n", name);
3536 return &sport->port_wwn;
3539 return ERR_PTR(ret);
3543 * configfs callback invoked for
3544 * rmdir /sys/kernel/config/target/$driver/$port
3546 static void srpt_drop_tport(struct se_wwn *wwn)
3548 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3550 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3553 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3555 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3558 CONFIGFS_ATTR_RO(srpt_wwn_, version);
3560 static struct configfs_attribute *srpt_wwn_attrs[] = {
3561 &srpt_wwn_attr_version,
3565 static const struct target_core_fabric_ops srpt_template = {
3566 .module = THIS_MODULE,
3568 .node_acl_size = sizeof(struct srpt_node_acl),
3569 .get_fabric_name = srpt_get_fabric_name,
3570 .tpg_get_wwn = srpt_get_fabric_wwn,
3571 .tpg_get_tag = srpt_get_tag,
3572 .tpg_check_demo_mode = srpt_check_false,
3573 .tpg_check_demo_mode_cache = srpt_check_true,
3574 .tpg_check_demo_mode_write_protect = srpt_check_true,
3575 .tpg_check_prod_mode_write_protect = srpt_check_false,
3576 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3577 .release_cmd = srpt_release_cmd,
3578 .check_stop_free = srpt_check_stop_free,
3579 .shutdown_session = srpt_shutdown_session,
3580 .close_session = srpt_close_session,
3581 .sess_get_index = srpt_sess_get_index,
3582 .sess_get_initiator_sid = NULL,
3583 .write_pending = srpt_write_pending,
3584 .write_pending_status = srpt_write_pending_status,
3585 .set_default_node_attributes = srpt_set_default_node_attrs,
3586 .get_cmd_state = srpt_get_tcm_cmd_state,
3587 .queue_data_in = srpt_queue_data_in,
3588 .queue_status = srpt_queue_status,
3589 .queue_tm_rsp = srpt_queue_tm_rsp,
3590 .aborted_task = srpt_aborted_task,
3592 * Setup function pointers for generic logic in
3593 * target_core_fabric_configfs.c
3595 .fabric_make_wwn = srpt_make_tport,
3596 .fabric_drop_wwn = srpt_drop_tport,
3597 .fabric_make_tpg = srpt_make_tpg,
3598 .fabric_drop_tpg = srpt_drop_tpg,
3599 .fabric_init_nodeacl = srpt_init_nodeacl,
3601 .tfc_wwn_attrs = srpt_wwn_attrs,
3602 .tfc_tpg_base_attrs = srpt_tpg_attrs,
3603 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
3607 * srpt_init_module() - Kernel module initialization.
3609 * Note: Since ib_register_client() registers callback functions, and since at
3610 * least one of these callback functions (srpt_add_one()) calls target core
3611 * functions, this driver must be registered with the target core before
3612 * ib_register_client() is called.
3614 static int __init srpt_init_module(void)
3619 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3620 pr_err("invalid value %d for kernel module parameter"
3621 " srp_max_req_size -- must be at least %d.\n",
3622 srp_max_req_size, MIN_MAX_REQ_SIZE);
3626 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3627 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3628 pr_err("invalid value %d for kernel module parameter"
3629 " srpt_srq_size -- must be in the range [%d..%d].\n",
3630 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3634 ret = target_register_template(&srpt_template);
3638 ret = ib_register_client(&srpt_client);
3640 pr_err("couldn't register IB client\n");
3641 goto out_unregister_target;
3646 out_unregister_target:
3647 target_unregister_template(&srpt_template);
3652 static void __exit srpt_cleanup_module(void)
3654 ib_unregister_client(&srpt_client);
3655 target_unregister_template(&srpt_template);
3658 module_init(srpt_init_module);
3659 module_exit(srpt_cleanup_module);