OSDN Git Service

2e2d903db838f75e6105a88f875de8acf3667162
[android-x86/kernel.git] / drivers / infiniband / ulp / iser / iser_initiator.c
1 /*
2  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
40
41 #include "iscsi_iser.h"
42
43 /* Register user buffer memory and initialize passive rdma
44  *  dto descriptor. Total data size is stored in
45  *  iser_task->data[ISER_DIR_IN].data_len
46  */
47 static int iser_prepare_read_cmd(struct iscsi_task *task,
48                                  unsigned int edtl)
49
50 {
51         struct iscsi_iser_task *iser_task = task->dd_data;
52         struct iser_device  *device = iser_task->ib_conn->device;
53         struct iser_regd_buf *regd_buf;
54         int err;
55         struct iser_hdr *hdr = &iser_task->desc.iser_header;
56         struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
57
58         err = iser_dma_map_task_data(iser_task,
59                                      buf_in,
60                                      ISER_DIR_IN,
61                                      DMA_FROM_DEVICE);
62         if (err)
63                 return err;
64
65         if (scsi_prot_sg_count(iser_task->sc)) {
66                 struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
67
68                 err = iser_dma_map_task_data(iser_task,
69                                              pbuf_in,
70                                              ISER_DIR_IN,
71                                              DMA_FROM_DEVICE);
72                 if (err)
73                         return err;
74         }
75
76         if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
77                 iser_err("Total data length: %ld, less than EDTL: "
78                          "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
79                          iser_task->data[ISER_DIR_IN].data_len, edtl,
80                          task->itt, iser_task->ib_conn);
81                 return -EINVAL;
82         }
83
84         err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
85         if (err) {
86                 iser_err("Failed to set up Data-IN RDMA\n");
87                 return err;
88         }
89         regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
90
91         hdr->flags    |= ISER_RSV;
92         hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
93         hdr->read_va   = cpu_to_be64(regd_buf->reg.va);
94
95         iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
96                  task->itt, regd_buf->reg.rkey,
97                  (unsigned long long)regd_buf->reg.va);
98
99         return 0;
100 }
101
102 /* Register user buffer memory and initialize passive rdma
103  *  dto descriptor. Total data size is stored in
104  *  task->data[ISER_DIR_OUT].data_len
105  */
106 static int
107 iser_prepare_write_cmd(struct iscsi_task *task,
108                        unsigned int imm_sz,
109                        unsigned int unsol_sz,
110                        unsigned int edtl)
111 {
112         struct iscsi_iser_task *iser_task = task->dd_data;
113         struct iser_device  *device = iser_task->ib_conn->device;
114         struct iser_regd_buf *regd_buf;
115         int err;
116         struct iser_hdr *hdr = &iser_task->desc.iser_header;
117         struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
118         struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
119
120         err = iser_dma_map_task_data(iser_task,
121                                      buf_out,
122                                      ISER_DIR_OUT,
123                                      DMA_TO_DEVICE);
124         if (err)
125                 return err;
126
127         if (scsi_prot_sg_count(iser_task->sc)) {
128                 struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
129
130                 err = iser_dma_map_task_data(iser_task,
131                                              pbuf_out,
132                                              ISER_DIR_OUT,
133                                              DMA_TO_DEVICE);
134                 if (err)
135                         return err;
136         }
137
138         if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
139                 iser_err("Total data length: %ld, less than EDTL: %d, "
140                          "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
141                          iser_task->data[ISER_DIR_OUT].data_len,
142                          edtl, task->itt, task->conn);
143                 return -EINVAL;
144         }
145
146         err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
147         if (err != 0) {
148                 iser_err("Failed to register write cmd RDMA mem\n");
149                 return err;
150         }
151
152         regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
153
154         if (unsol_sz < edtl) {
155                 hdr->flags     |= ISER_WSV;
156                 hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
157                 hdr->write_va   = cpu_to_be64(regd_buf->reg.va + unsol_sz);
158
159                 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
160                          "VA:%#llX + unsol:%d\n",
161                          task->itt, regd_buf->reg.rkey,
162                          (unsigned long long)regd_buf->reg.va, unsol_sz);
163         }
164
165         if (imm_sz > 0) {
166                 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
167                          task->itt, imm_sz);
168                 tx_dsg->addr   = regd_buf->reg.va;
169                 tx_dsg->length = imm_sz;
170                 tx_dsg->lkey   = regd_buf->reg.lkey;
171                 iser_task->desc.num_sge = 2;
172         }
173
174         return 0;
175 }
176
177 /* creates a new tx descriptor and adds header regd buffer */
178 static void iser_create_send_desc(struct iser_conn      *ib_conn,
179                                   struct iser_tx_desc   *tx_desc)
180 {
181         struct iser_device *device = ib_conn->device;
182
183         ib_dma_sync_single_for_cpu(device->ib_device,
184                 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
185
186         memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
187         tx_desc->iser_header.flags = ISER_VER;
188
189         tx_desc->num_sge = 1;
190
191         if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
192                 tx_desc->tx_sg[0].lkey = device->mr->lkey;
193                 iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
194         }
195 }
196
197 static void iser_free_login_buf(struct iser_conn *ib_conn)
198 {
199         if (!ib_conn->login_buf)
200                 return;
201
202         if (ib_conn->login_req_dma)
203                 ib_dma_unmap_single(ib_conn->device->ib_device,
204                                     ib_conn->login_req_dma,
205                                     ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
206
207         if (ib_conn->login_resp_dma)
208                 ib_dma_unmap_single(ib_conn->device->ib_device,
209                                     ib_conn->login_resp_dma,
210                                     ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
211
212         kfree(ib_conn->login_buf);
213
214         /* make sure we never redo any unmapping */
215         ib_conn->login_req_dma = 0;
216         ib_conn->login_resp_dma = 0;
217         ib_conn->login_buf = NULL;
218 }
219
220 static int iser_alloc_login_buf(struct iser_conn *ib_conn)
221 {
222         struct iser_device      *device;
223         int                     req_err, resp_err;
224
225         BUG_ON(ib_conn->device == NULL);
226
227         device = ib_conn->device;
228
229         ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
230                                      ISER_RX_LOGIN_SIZE, GFP_KERNEL);
231         if (!ib_conn->login_buf)
232                 goto out_err;
233
234         ib_conn->login_req_buf  = ib_conn->login_buf;
235         ib_conn->login_resp_buf = ib_conn->login_buf +
236                                                 ISCSI_DEF_MAX_RECV_SEG_LEN;
237
238         ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
239                                 (void *)ib_conn->login_req_buf,
240                                 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
241
242         ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
243                                 (void *)ib_conn->login_resp_buf,
244                                 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
245
246         req_err  = ib_dma_mapping_error(device->ib_device,
247                                         ib_conn->login_req_dma);
248         resp_err = ib_dma_mapping_error(device->ib_device,
249                                         ib_conn->login_resp_dma);
250
251         if (req_err || resp_err) {
252                 if (req_err)
253                         ib_conn->login_req_dma = 0;
254                 if (resp_err)
255                         ib_conn->login_resp_dma = 0;
256                 goto free_login_buf;
257         }
258         return 0;
259
260 free_login_buf:
261         iser_free_login_buf(ib_conn);
262
263 out_err:
264         iser_err("unable to alloc or map login buf\n");
265         return -ENOMEM;
266 }
267
268 int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session)
269 {
270         int i, j;
271         u64 dma_addr;
272         struct iser_rx_desc *rx_desc;
273         struct ib_sge       *rx_sg;
274         struct iser_device  *device = ib_conn->device;
275
276         ib_conn->qp_max_recv_dtos = session->cmds_max;
277         ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
278         ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2;
279
280         if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
281                 goto create_rdma_reg_res_failed;
282
283         if (iser_alloc_login_buf(ib_conn))
284                 goto alloc_login_buf_fail;
285
286         ib_conn->rx_descs = kmalloc(session->cmds_max *
287                                 sizeof(struct iser_rx_desc), GFP_KERNEL);
288         if (!ib_conn->rx_descs)
289                 goto rx_desc_alloc_fail;
290
291         rx_desc = ib_conn->rx_descs;
292
293         for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)  {
294                 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
295                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
296                 if (ib_dma_mapping_error(device->ib_device, dma_addr))
297                         goto rx_desc_dma_map_failed;
298
299                 rx_desc->dma_addr = dma_addr;
300
301                 rx_sg = &rx_desc->rx_sg;
302                 rx_sg->addr   = rx_desc->dma_addr;
303                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
304                 rx_sg->lkey   = device->mr->lkey;
305         }
306
307         ib_conn->rx_desc_head = 0;
308         return 0;
309
310 rx_desc_dma_map_failed:
311         rx_desc = ib_conn->rx_descs;
312         for (j = 0; j < i; j++, rx_desc++)
313                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
314                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
315         kfree(ib_conn->rx_descs);
316         ib_conn->rx_descs = NULL;
317 rx_desc_alloc_fail:
318         iser_free_login_buf(ib_conn);
319 alloc_login_buf_fail:
320         device->iser_free_rdma_reg_res(ib_conn);
321 create_rdma_reg_res_failed:
322         iser_err("failed allocating rx descriptors / data buffers\n");
323         return -ENOMEM;
324 }
325
326 void iser_free_rx_descriptors(struct iser_conn *ib_conn)
327 {
328         int i;
329         struct iser_rx_desc *rx_desc;
330         struct iser_device *device = ib_conn->device;
331
332         if (!ib_conn->rx_descs)
333                 goto free_login_buf;
334
335         if (device->iser_free_rdma_reg_res)
336                 device->iser_free_rdma_reg_res(ib_conn);
337
338         rx_desc = ib_conn->rx_descs;
339         for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)
340                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
341                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
342         kfree(ib_conn->rx_descs);
343         /* make sure we never redo any unmapping */
344         ib_conn->rx_descs = NULL;
345
346 free_login_buf:
347         iser_free_login_buf(ib_conn);
348 }
349
350 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
351 {
352         struct iser_conn *ib_conn = conn->dd_data;
353         struct iscsi_session *session = conn->session;
354
355         iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
356         /* check if this is the last login - going to full feature phase */
357         if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
358                 return 0;
359
360         /*
361          * Check that there is one posted recv buffer (for the last login
362          * response) and no posted send buffers left - they must have been
363          * consumed during previous login phases.
364          */
365         WARN_ON(ib_conn->post_recv_buf_count != 1);
366         WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0);
367
368         if (session->discovery_sess) {
369                 iser_info("Discovery session, re-using login RX buffer\n");
370                 return 0;
371         } else
372                 iser_info("Normal session, posting batch of RX %d buffers\n",
373                           ib_conn->min_posted_rx);
374
375         /* Initial post receive buffers */
376         if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx))
377                 return -ENOMEM;
378
379         return 0;
380 }
381
382 /**
383  * iser_send_command - send command PDU
384  */
385 int iser_send_command(struct iscsi_conn *conn,
386                       struct iscsi_task *task)
387 {
388         struct iser_conn *ib_conn = conn->dd_data;
389         struct iscsi_iser_task *iser_task = task->dd_data;
390         unsigned long edtl;
391         int err;
392         struct iser_data_buf *data_buf, *prot_buf;
393         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
394         struct scsi_cmnd *sc  =  task->sc;
395         struct iser_tx_desc *tx_desc = &iser_task->desc;
396
397         edtl = ntohl(hdr->data_length);
398
399         /* build the tx desc regd header and add it to the tx desc dto */
400         tx_desc->type = ISCSI_TX_SCSI_COMMAND;
401         iser_create_send_desc(ib_conn, tx_desc);
402
403         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
404                 data_buf = &iser_task->data[ISER_DIR_IN];
405                 prot_buf = &iser_task->prot[ISER_DIR_IN];
406         } else {
407                 data_buf = &iser_task->data[ISER_DIR_OUT];
408                 prot_buf = &iser_task->prot[ISER_DIR_OUT];
409         }
410
411         if (scsi_sg_count(sc)) { /* using a scatter list */
412                 data_buf->buf  = scsi_sglist(sc);
413                 data_buf->size = scsi_sg_count(sc);
414         }
415         data_buf->data_len = scsi_bufflen(sc);
416
417         if (scsi_prot_sg_count(sc)) {
418                 prot_buf->buf  = scsi_prot_sglist(sc);
419                 prot_buf->size = scsi_prot_sg_count(sc);
420                 prot_buf->data_len = sc->prot_sdb->length;
421         }
422
423         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
424                 err = iser_prepare_read_cmd(task, edtl);
425                 if (err)
426                         goto send_command_error;
427         }
428         if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
429                 err = iser_prepare_write_cmd(task,
430                                              task->imm_count,
431                                              task->imm_count +
432                                              task->unsol_r2t.data_length,
433                                              edtl);
434                 if (err)
435                         goto send_command_error;
436         }
437
438         iser_task->status = ISER_TASK_STATUS_STARTED;
439
440         err = iser_post_send(ib_conn, tx_desc);
441         if (!err)
442                 return 0;
443
444 send_command_error:
445         iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
446         return err;
447 }
448
449 /**
450  * iser_send_data_out - send data out PDU
451  */
452 int iser_send_data_out(struct iscsi_conn *conn,
453                        struct iscsi_task *task,
454                        struct iscsi_data *hdr)
455 {
456         struct iser_conn *ib_conn = conn->dd_data;
457         struct iscsi_iser_task *iser_task = task->dd_data;
458         struct iser_tx_desc *tx_desc = NULL;
459         struct iser_regd_buf *regd_buf;
460         unsigned long buf_offset;
461         unsigned long data_seg_len;
462         uint32_t itt;
463         int err = 0;
464         struct ib_sge *tx_dsg;
465
466         itt = (__force uint32_t)hdr->itt;
467         data_seg_len = ntoh24(hdr->dlength);
468         buf_offset   = ntohl(hdr->offset);
469
470         iser_dbg("%s itt %d dseg_len %d offset %d\n",
471                  __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
472
473         tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
474         if (tx_desc == NULL) {
475                 iser_err("Failed to alloc desc for post dataout\n");
476                 return -ENOMEM;
477         }
478
479         tx_desc->type = ISCSI_TX_DATAOUT;
480         tx_desc->iser_header.flags = ISER_VER;
481         memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
482
483         /* build the tx desc */
484         iser_initialize_task_headers(task, tx_desc);
485
486         regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
487         tx_dsg = &tx_desc->tx_sg[1];
488         tx_dsg->addr    = regd_buf->reg.va + buf_offset;
489         tx_dsg->length  = data_seg_len;
490         tx_dsg->lkey    = regd_buf->reg.lkey;
491         tx_desc->num_sge = 2;
492
493         if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
494                 iser_err("Offset:%ld & DSL:%ld in Data-Out "
495                          "inconsistent with total len:%ld, itt:%d\n",
496                          buf_offset, data_seg_len,
497                          iser_task->data[ISER_DIR_OUT].data_len, itt);
498                 err = -EINVAL;
499                 goto send_data_out_error;
500         }
501         iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
502                  itt, buf_offset, data_seg_len);
503
504
505         err = iser_post_send(ib_conn, tx_desc);
506         if (!err)
507                 return 0;
508
509 send_data_out_error:
510         kmem_cache_free(ig.desc_cache, tx_desc);
511         iser_err("conn %p failed err %d\n",conn, err);
512         return err;
513 }
514
515 int iser_send_control(struct iscsi_conn *conn,
516                       struct iscsi_task *task)
517 {
518         struct iser_conn *ib_conn = conn->dd_data;
519         struct iscsi_iser_task *iser_task = task->dd_data;
520         struct iser_tx_desc *mdesc = &iser_task->desc;
521         unsigned long data_seg_len;
522         int err = 0;
523         struct iser_device *device;
524
525         /* build the tx desc regd header and add it to the tx desc dto */
526         mdesc->type = ISCSI_TX_CONTROL;
527         iser_create_send_desc(ib_conn, mdesc);
528
529         device = ib_conn->device;
530
531         data_seg_len = ntoh24(task->hdr->dlength);
532
533         if (data_seg_len > 0) {
534                 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
535                 if (task != conn->login_task) {
536                         iser_err("data present on non login task!!!\n");
537                         goto send_control_error;
538                 }
539
540                 ib_dma_sync_single_for_cpu(device->ib_device,
541                         ib_conn->login_req_dma, task->data_count,
542                         DMA_TO_DEVICE);
543
544                 memcpy(ib_conn->login_req_buf, task->data, task->data_count);
545
546                 ib_dma_sync_single_for_device(device->ib_device,
547                         ib_conn->login_req_dma, task->data_count,
548                         DMA_TO_DEVICE);
549
550                 tx_dsg->addr    = ib_conn->login_req_dma;
551                 tx_dsg->length  = task->data_count;
552                 tx_dsg->lkey    = device->mr->lkey;
553                 mdesc->num_sge = 2;
554         }
555
556         if (task == conn->login_task) {
557                 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
558                          task->hdr->opcode, data_seg_len);
559                 err = iser_post_recvl(ib_conn);
560                 if (err)
561                         goto send_control_error;
562                 err = iser_post_rx_bufs(conn, task->hdr);
563                 if (err)
564                         goto send_control_error;
565         }
566
567         err = iser_post_send(ib_conn, mdesc);
568         if (!err)
569                 return 0;
570
571 send_control_error:
572         iser_err("conn %p failed err %d\n",conn, err);
573         return err;
574 }
575
576 /**
577  * iser_rcv_dto_completion - recv DTO completion
578  */
579 void iser_rcv_completion(struct iser_rx_desc *rx_desc,
580                          unsigned long rx_xfer_len,
581                          struct iser_conn *ib_conn)
582 {
583         struct iscsi_hdr *hdr;
584         u64 rx_dma;
585         int rx_buflen, outstanding, count, err;
586
587         /* differentiate between login to all other PDUs */
588         if ((char *)rx_desc == ib_conn->login_resp_buf) {
589                 rx_dma = ib_conn->login_resp_dma;
590                 rx_buflen = ISER_RX_LOGIN_SIZE;
591         } else {
592                 rx_dma = rx_desc->dma_addr;
593                 rx_buflen = ISER_RX_PAYLOAD_SIZE;
594         }
595
596         ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
597                         rx_buflen, DMA_FROM_DEVICE);
598
599         hdr = &rx_desc->iscsi_header;
600
601         iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
602                         hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
603
604         iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data,
605                         rx_xfer_len - ISER_HEADERS_LEN);
606
607         ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
608                                       rx_buflen, DMA_FROM_DEVICE);
609
610         /* decrementing conn->post_recv_buf_count only --after-- freeing the   *
611          * task eliminates the need to worry on tasks which are completed in   *
612          * parallel to the execution of iser_conn_term. So the code that waits *
613          * for the posted rx bufs refcount to become zero handles everything   */
614         ib_conn->post_recv_buf_count--;
615
616         if (rx_dma == ib_conn->login_resp_dma)
617                 return;
618
619         outstanding = ib_conn->post_recv_buf_count;
620         if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) {
621                 count = min(ib_conn->qp_max_recv_dtos - outstanding,
622                                                 ib_conn->min_posted_rx);
623                 err = iser_post_recvm(ib_conn, count);
624                 if (err)
625                         iser_err("posting %d rx bufs err %d\n", count, err);
626         }
627 }
628
629 void iser_snd_completion(struct iser_tx_desc *tx_desc,
630                         struct iser_conn *ib_conn)
631 {
632         struct iscsi_task *task;
633         struct iser_device *device = ib_conn->device;
634
635         if (tx_desc->type == ISCSI_TX_DATAOUT) {
636                 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
637                                         ISER_HEADERS_LEN, DMA_TO_DEVICE);
638                 kmem_cache_free(ig.desc_cache, tx_desc);
639                 tx_desc = NULL;
640         }
641
642         atomic_dec(&ib_conn->post_send_buf_count);
643
644         if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
645                 /* this arithmetic is legal by libiscsi dd_data allocation */
646                 task = (void *) ((long)(void *)tx_desc -
647                                   sizeof(struct iscsi_task));
648                 if (task->hdr->itt == RESERVED_ITT)
649                         iscsi_put_task(task);
650         }
651 }
652
653 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
654
655 {
656         iser_task->status = ISER_TASK_STATUS_INIT;
657
658         iser_task->dir[ISER_DIR_IN] = 0;
659         iser_task->dir[ISER_DIR_OUT] = 0;
660
661         iser_task->data[ISER_DIR_IN].data_len  = 0;
662         iser_task->data[ISER_DIR_OUT].data_len = 0;
663
664         iser_task->prot[ISER_DIR_IN].data_len  = 0;
665         iser_task->prot[ISER_DIR_OUT].data_len = 0;
666
667         memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
668                sizeof(struct iser_regd_buf));
669         memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
670                sizeof(struct iser_regd_buf));
671 }
672
673 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
674 {
675         struct iser_device *device = iser_task->ib_conn->device;
676         int is_rdma_data_aligned = 1;
677         int is_rdma_prot_aligned = 1;
678         int prot_count = scsi_prot_sg_count(iser_task->sc);
679
680         /* if we were reading, copy back to unaligned sglist,
681          * anyway dma_unmap and free the copy
682          */
683         if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
684                 is_rdma_data_aligned = 0;
685                 iser_finalize_rdma_unaligned_sg(iser_task,
686                                                 &iser_task->data[ISER_DIR_IN],
687                                                 &iser_task->data_copy[ISER_DIR_IN],
688                                                 ISER_DIR_IN);
689         }
690
691         if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
692                 is_rdma_data_aligned = 0;
693                 iser_finalize_rdma_unaligned_sg(iser_task,
694                                                 &iser_task->data[ISER_DIR_OUT],
695                                                 &iser_task->data_copy[ISER_DIR_OUT],
696                                                 ISER_DIR_OUT);
697         }
698
699         if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
700                 is_rdma_prot_aligned = 0;
701                 iser_finalize_rdma_unaligned_sg(iser_task,
702                                                 &iser_task->prot[ISER_DIR_IN],
703                                                 &iser_task->prot_copy[ISER_DIR_IN],
704                                                 ISER_DIR_IN);
705         }
706
707         if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
708                 is_rdma_prot_aligned = 0;
709                 iser_finalize_rdma_unaligned_sg(iser_task,
710                                                 &iser_task->prot[ISER_DIR_OUT],
711                                                 &iser_task->prot_copy[ISER_DIR_OUT],
712                                                 ISER_DIR_OUT);
713         }
714
715         if (iser_task->dir[ISER_DIR_IN]) {
716                 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
717                 if (is_rdma_data_aligned)
718                         iser_dma_unmap_task_data(iser_task,
719                                                  &iser_task->data[ISER_DIR_IN]);
720                 if (prot_count && is_rdma_prot_aligned)
721                         iser_dma_unmap_task_data(iser_task,
722                                                  &iser_task->prot[ISER_DIR_IN]);
723         }
724
725         if (iser_task->dir[ISER_DIR_OUT]) {
726                 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
727                 if (is_rdma_data_aligned)
728                         iser_dma_unmap_task_data(iser_task,
729                                                  &iser_task->data[ISER_DIR_OUT]);
730                 if (prot_count && is_rdma_prot_aligned)
731                         iser_dma_unmap_task_data(iser_task,
732                                                  &iser_task->prot[ISER_DIR_OUT]);
733         }
734 }