OSDN Git Service

target: consolidate version defines
[uclinux-h8/linux.git] / drivers / target / target_core_rd.c
1 /*******************************************************************************
2  * Filename:  target_core_rd.c
3  *
4  * This file contains the Storage Engine <-> Ramdisk transport
5  * specific functions.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37
38 #include "target_core_rd.h"
39
40 static inline struct rd_dev *RD_DEV(struct se_device *dev)
41 {
42         return container_of(dev, struct rd_dev, dev);
43 }
44
45 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
46 {
47         struct rd_host *rd_host;
48
49         rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
50         if (!rd_host) {
51                 pr_err("Unable to allocate memory for struct rd_host\n");
52                 return -ENOMEM;
53         }
54
55         rd_host->rd_host_id = host_id;
56
57         hba->hba_ptr = rd_host;
58
59         pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
60                 " Generic Target Core Stack %s\n", hba->hba_id,
61                 RD_HBA_VERSION, TARGET_CORE_VERSION);
62
63         return 0;
64 }
65
66 static void rd_detach_hba(struct se_hba *hba)
67 {
68         struct rd_host *rd_host = hba->hba_ptr;
69
70         pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
71                 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
72
73         kfree(rd_host);
74         hba->hba_ptr = NULL;
75 }
76
77 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
78                                  u32 sg_table_count)
79 {
80         struct page *pg;
81         struct scatterlist *sg;
82         u32 i, j, page_count = 0, sg_per_table;
83
84         for (i = 0; i < sg_table_count; i++) {
85                 sg = sg_table[i].sg_table;
86                 sg_per_table = sg_table[i].rd_sg_count;
87
88                 for (j = 0; j < sg_per_table; j++) {
89                         pg = sg_page(&sg[j]);
90                         if (pg) {
91                                 __free_page(pg);
92                                 page_count++;
93                         }
94                 }
95                 kfree(sg);
96         }
97
98         kfree(sg_table);
99         return page_count;
100 }
101
102 static void rd_release_device_space(struct rd_dev *rd_dev)
103 {
104         u32 page_count;
105
106         if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
107                 return;
108
109         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
110                                           rd_dev->sg_table_count);
111
112         pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
113                 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
114                 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
115                 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
116
117         rd_dev->sg_table_array = NULL;
118         rd_dev->sg_table_count = 0;
119 }
120
121
122 /*      rd_build_device_space():
123  *
124  *
125  */
126 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
127                                  u32 total_sg_needed, unsigned char init_payload)
128 {
129         u32 i = 0, j, page_offset = 0, sg_per_table;
130         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
131                                 sizeof(struct scatterlist));
132         struct page *pg;
133         struct scatterlist *sg;
134         unsigned char *p;
135
136         while (total_sg_needed) {
137                 unsigned int chain_entry = 0;
138
139                 sg_per_table = (total_sg_needed > max_sg_per_table) ?
140                         max_sg_per_table : total_sg_needed;
141
142 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
143
144                 /*
145                  * Reserve extra element for chain entry
146                  */
147                 if (sg_per_table < total_sg_needed)
148                         chain_entry = 1;
149
150 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
151
152                 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
153                                 GFP_KERNEL);
154                 if (!sg) {
155                         pr_err("Unable to allocate scatterlist array"
156                                 " for struct rd_dev\n");
157                         return -ENOMEM;
158                 }
159
160                 sg_init_table(sg, sg_per_table + chain_entry);
161
162 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
163
164                 if (i > 0) {
165                         sg_chain(sg_table[i - 1].sg_table,
166                                  max_sg_per_table + 1, sg);
167                 }
168
169 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
170
171                 sg_table[i].sg_table = sg;
172                 sg_table[i].rd_sg_count = sg_per_table;
173                 sg_table[i].page_start_offset = page_offset;
174                 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
175                                                 - 1;
176
177                 for (j = 0; j < sg_per_table; j++) {
178                         pg = alloc_pages(GFP_KERNEL, 0);
179                         if (!pg) {
180                                 pr_err("Unable to allocate scatterlist"
181                                         " pages for struct rd_dev_sg_table\n");
182                                 return -ENOMEM;
183                         }
184                         sg_assign_page(&sg[j], pg);
185                         sg[j].length = PAGE_SIZE;
186
187                         p = kmap(pg);
188                         memset(p, init_payload, PAGE_SIZE);
189                         kunmap(pg);
190                 }
191
192                 page_offset += sg_per_table;
193                 total_sg_needed -= sg_per_table;
194         }
195
196         return 0;
197 }
198
199 static int rd_build_device_space(struct rd_dev *rd_dev)
200 {
201         struct rd_dev_sg_table *sg_table;
202         u32 sg_tables, total_sg_needed;
203         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
204                                 sizeof(struct scatterlist));
205         int rc;
206
207         if (rd_dev->rd_page_count <= 0) {
208                 pr_err("Illegal page count: %u for Ramdisk device\n",
209                        rd_dev->rd_page_count);
210                 return -EINVAL;
211         }
212
213         /* Don't need backing pages for NULLIO */
214         if (rd_dev->rd_flags & RDF_NULLIO)
215                 return 0;
216
217         total_sg_needed = rd_dev->rd_page_count;
218
219         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
220
221         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
222         if (!sg_table) {
223                 pr_err("Unable to allocate memory for Ramdisk"
224                        " scatterlist tables\n");
225                 return -ENOMEM;
226         }
227
228         rd_dev->sg_table_array = sg_table;
229         rd_dev->sg_table_count = sg_tables;
230
231         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
232         if (rc)
233                 return rc;
234
235         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
236                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
237                  rd_dev->rd_dev_id, rd_dev->rd_page_count,
238                  rd_dev->sg_table_count);
239
240         return 0;
241 }
242
243 static void rd_release_prot_space(struct rd_dev *rd_dev)
244 {
245         u32 page_count;
246
247         if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
248                 return;
249
250         page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
251                                           rd_dev->sg_prot_count);
252
253         pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
254                  " Device ID: %u, pages %u in %u tables total bytes %lu\n",
255                  rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
256                  rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
257
258         rd_dev->sg_prot_array = NULL;
259         rd_dev->sg_prot_count = 0;
260 }
261
262 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
263 {
264         struct rd_dev_sg_table *sg_table;
265         u32 total_sg_needed, sg_tables;
266         u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
267                                 sizeof(struct scatterlist));
268         int rc;
269
270         if (rd_dev->rd_flags & RDF_NULLIO)
271                 return 0;
272         /*
273          * prot_length=8byte dif data
274          * tot sg needed = rd_page_count * (PGSZ/block_size) *
275          *                 (prot_length/block_size) + pad
276          * PGSZ canceled each other.
277          */
278         total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
279
280         sg_tables = (total_sg_needed / max_sg_per_table) + 1;
281
282         sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
283         if (!sg_table) {
284                 pr_err("Unable to allocate memory for Ramdisk protection"
285                        " scatterlist tables\n");
286                 return -ENOMEM;
287         }
288
289         rd_dev->sg_prot_array = sg_table;
290         rd_dev->sg_prot_count = sg_tables;
291
292         rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
293         if (rc)
294                 return rc;
295
296         pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
297                  " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
298                  rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
299
300         return 0;
301 }
302
303 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
304 {
305         struct rd_dev *rd_dev;
306         struct rd_host *rd_host = hba->hba_ptr;
307
308         rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
309         if (!rd_dev) {
310                 pr_err("Unable to allocate memory for struct rd_dev\n");
311                 return NULL;
312         }
313
314         rd_dev->rd_host = rd_host;
315
316         return &rd_dev->dev;
317 }
318
319 static int rd_configure_device(struct se_device *dev)
320 {
321         struct rd_dev *rd_dev = RD_DEV(dev);
322         struct rd_host *rd_host = dev->se_hba->hba_ptr;
323         int ret;
324
325         if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
326                 pr_debug("Missing rd_pages= parameter\n");
327                 return -EINVAL;
328         }
329
330         ret = rd_build_device_space(rd_dev);
331         if (ret < 0)
332                 goto fail;
333
334         dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
335         dev->dev_attrib.hw_max_sectors = UINT_MAX;
336         dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
337
338         rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
339
340         pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
341                 " %u pages in %u tables, %lu total bytes\n",
342                 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
343                 rd_dev->sg_table_count,
344                 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
345
346         return 0;
347
348 fail:
349         rd_release_device_space(rd_dev);
350         return ret;
351 }
352
353 static void rd_dev_call_rcu(struct rcu_head *p)
354 {
355         struct se_device *dev = container_of(p, struct se_device, rcu_head);
356         struct rd_dev *rd_dev = RD_DEV(dev);
357
358         kfree(rd_dev);
359 }
360
361 static void rd_free_device(struct se_device *dev)
362 {
363         struct rd_dev *rd_dev = RD_DEV(dev);
364
365         rd_release_device_space(rd_dev);
366         call_rcu(&dev->rcu_head, rd_dev_call_rcu);
367 }
368
369 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
370 {
371         struct rd_dev_sg_table *sg_table;
372         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
373                                 sizeof(struct scatterlist));
374
375         i = page / sg_per_table;
376         if (i < rd_dev->sg_table_count) {
377                 sg_table = &rd_dev->sg_table_array[i];
378                 if ((sg_table->page_start_offset <= page) &&
379                     (sg_table->page_end_offset >= page))
380                         return sg_table;
381         }
382
383         pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
384                         page);
385
386         return NULL;
387 }
388
389 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
390 {
391         struct rd_dev_sg_table *sg_table;
392         u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
393                                 sizeof(struct scatterlist));
394
395         i = page / sg_per_table;
396         if (i < rd_dev->sg_prot_count) {
397                 sg_table = &rd_dev->sg_prot_array[i];
398                 if ((sg_table->page_start_offset <= page) &&
399                      (sg_table->page_end_offset >= page))
400                         return sg_table;
401         }
402
403         pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
404                         page);
405
406         return NULL;
407 }
408
409 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
410 {
411         struct se_device *se_dev = cmd->se_dev;
412         struct rd_dev *dev = RD_DEV(se_dev);
413         struct rd_dev_sg_table *prot_table;
414         bool need_to_release = false;
415         struct scatterlist *prot_sg;
416         u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
417         u32 prot_offset, prot_page;
418         u32 prot_npages __maybe_unused;
419         u64 tmp;
420         sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421
422         tmp = cmd->t_task_lba * se_dev->prot_length;
423         prot_offset = do_div(tmp, PAGE_SIZE);
424         prot_page = tmp;
425
426         prot_table = rd_get_prot_table(dev, prot_page);
427         if (!prot_table)
428                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
429
430         prot_sg = &prot_table->sg_table[prot_page -
431                                         prot_table->page_start_offset];
432
433 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
434
435         prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
436                                    PAGE_SIZE);
437
438         /*
439          * Allocate temporaly contiguous scatterlist entries if prot pages
440          * straddles multiple scatterlist tables.
441          */
442         if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
443                 int i;
444
445                 prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
446                 if (!prot_sg)
447                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
448
449                 need_to_release = true;
450                 sg_init_table(prot_sg, prot_npages);
451
452                 for (i = 0; i < prot_npages; i++) {
453                         if (prot_page + i > prot_table->page_end_offset) {
454                                 prot_table = rd_get_prot_table(dev,
455                                                                 prot_page + i);
456                                 if (!prot_table) {
457                                         kfree(prot_sg);
458                                         return rc;
459                                 }
460                                 sg_unmark_end(&prot_sg[i - 1]);
461                         }
462                         prot_sg[i] = prot_table->sg_table[prot_page + i -
463                                                 prot_table->page_start_offset];
464                 }
465         }
466
467 #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
468
469         if (is_read)
470                 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
471                                     prot_sg, prot_offset);
472         else
473                 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
474                                     cmd->t_prot_sg, 0);
475
476         if (!rc)
477                 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
478
479         if (need_to_release)
480                 kfree(prot_sg);
481
482         return rc;
483 }
484
485 static sense_reason_t
486 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
487               enum dma_data_direction data_direction)
488 {
489         struct se_device *se_dev = cmd->se_dev;
490         struct rd_dev *dev = RD_DEV(se_dev);
491         struct rd_dev_sg_table *table;
492         struct scatterlist *rd_sg;
493         struct sg_mapping_iter m;
494         u32 rd_offset;
495         u32 rd_size;
496         u32 rd_page;
497         u32 src_len;
498         u64 tmp;
499         sense_reason_t rc;
500
501         if (dev->rd_flags & RDF_NULLIO) {
502                 target_complete_cmd(cmd, SAM_STAT_GOOD);
503                 return 0;
504         }
505
506         tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
507         rd_offset = do_div(tmp, PAGE_SIZE);
508         rd_page = tmp;
509         rd_size = cmd->data_length;
510
511         table = rd_get_sg_table(dev, rd_page);
512         if (!table)
513                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
514
515         rd_sg = &table->sg_table[rd_page - table->page_start_offset];
516
517         pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
518                         dev->rd_dev_id,
519                         data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
520                         cmd->t_task_lba, rd_size, rd_page, rd_offset);
521
522         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
523             data_direction == DMA_TO_DEVICE) {
524                 rc = rd_do_prot_rw(cmd, false);
525                 if (rc)
526                         return rc;
527         }
528
529         src_len = PAGE_SIZE - rd_offset;
530         sg_miter_start(&m, sgl, sgl_nents,
531                         data_direction == DMA_FROM_DEVICE ?
532                                 SG_MITER_TO_SG : SG_MITER_FROM_SG);
533         while (rd_size) {
534                 u32 len;
535                 void *rd_addr;
536
537                 sg_miter_next(&m);
538                 if (!(u32)m.length) {
539                         pr_debug("RD[%u]: invalid sgl %p len %zu\n",
540                                  dev->rd_dev_id, m.addr, m.length);
541                         sg_miter_stop(&m);
542                         return TCM_INCORRECT_AMOUNT_OF_DATA;
543                 }
544                 len = min((u32)m.length, src_len);
545                 if (len > rd_size) {
546                         pr_debug("RD[%u]: size underrun page %d offset %d "
547                                  "size %d\n", dev->rd_dev_id,
548                                  rd_page, rd_offset, rd_size);
549                         len = rd_size;
550                 }
551                 m.consumed = len;
552
553                 rd_addr = sg_virt(rd_sg) + rd_offset;
554
555                 if (data_direction == DMA_FROM_DEVICE)
556                         memcpy(m.addr, rd_addr, len);
557                 else
558                         memcpy(rd_addr, m.addr, len);
559
560                 rd_size -= len;
561                 if (!rd_size)
562                         continue;
563
564                 src_len -= len;
565                 if (src_len) {
566                         rd_offset += len;
567                         continue;
568                 }
569
570                 /* rd page completed, next one please */
571                 rd_page++;
572                 rd_offset = 0;
573                 src_len = PAGE_SIZE;
574                 if (rd_page <= table->page_end_offset) {
575                         rd_sg++;
576                         continue;
577                 }
578
579                 table = rd_get_sg_table(dev, rd_page);
580                 if (!table) {
581                         sg_miter_stop(&m);
582                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
583                 }
584
585                 /* since we increment, the first sg entry is correct */
586                 rd_sg = table->sg_table;
587         }
588         sg_miter_stop(&m);
589
590         if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
591             data_direction == DMA_FROM_DEVICE) {
592                 rc = rd_do_prot_rw(cmd, true);
593                 if (rc)
594                         return rc;
595         }
596
597         target_complete_cmd(cmd, SAM_STAT_GOOD);
598         return 0;
599 }
600
601 enum {
602         Opt_rd_pages, Opt_rd_nullio, Opt_err
603 };
604
605 static match_table_t tokens = {
606         {Opt_rd_pages, "rd_pages=%d"},
607         {Opt_rd_nullio, "rd_nullio=%d"},
608         {Opt_err, NULL}
609 };
610
611 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
612                 const char *page, ssize_t count)
613 {
614         struct rd_dev *rd_dev = RD_DEV(dev);
615         char *orig, *ptr, *opts;
616         substring_t args[MAX_OPT_ARGS];
617         int ret = 0, arg, token;
618
619         opts = kstrdup(page, GFP_KERNEL);
620         if (!opts)
621                 return -ENOMEM;
622
623         orig = opts;
624
625         while ((ptr = strsep(&opts, ",\n")) != NULL) {
626                 if (!*ptr)
627                         continue;
628
629                 token = match_token(ptr, tokens, args);
630                 switch (token) {
631                 case Opt_rd_pages:
632                         match_int(args, &arg);
633                         rd_dev->rd_page_count = arg;
634                         pr_debug("RAMDISK: Referencing Page"
635                                 " Count: %u\n", rd_dev->rd_page_count);
636                         rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
637                         break;
638                 case Opt_rd_nullio:
639                         match_int(args, &arg);
640                         if (arg != 1)
641                                 break;
642
643                         pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
644                         rd_dev->rd_flags |= RDF_NULLIO;
645                         break;
646                 default:
647                         break;
648                 }
649         }
650
651         kfree(orig);
652         return (!ret) ? count : ret;
653 }
654
655 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
656 {
657         struct rd_dev *rd_dev = RD_DEV(dev);
658
659         ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
660                         rd_dev->rd_dev_id);
661         bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
662                         "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
663                         PAGE_SIZE, rd_dev->sg_table_count,
664                         !!(rd_dev->rd_flags & RDF_NULLIO));
665         return bl;
666 }
667
668 static sector_t rd_get_blocks(struct se_device *dev)
669 {
670         struct rd_dev *rd_dev = RD_DEV(dev);
671
672         unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
673                         dev->dev_attrib.block_size) - 1;
674
675         return blocks_long;
676 }
677
678 static int rd_init_prot(struct se_device *dev)
679 {
680         struct rd_dev *rd_dev = RD_DEV(dev);
681
682         if (!dev->dev_attrib.pi_prot_type)
683                 return 0;
684
685         return rd_build_prot_space(rd_dev, dev->prot_length,
686                                    dev->dev_attrib.block_size);
687 }
688
689 static void rd_free_prot(struct se_device *dev)
690 {
691         struct rd_dev *rd_dev = RD_DEV(dev);
692
693         rd_release_prot_space(rd_dev);
694 }
695
696 static struct sbc_ops rd_sbc_ops = {
697         .execute_rw             = rd_execute_rw,
698 };
699
700 static sense_reason_t
701 rd_parse_cdb(struct se_cmd *cmd)
702 {
703         return sbc_parse_cdb(cmd, &rd_sbc_ops);
704 }
705
706 static const struct target_backend_ops rd_mcp_ops = {
707         .name                   = "rd_mcp",
708         .inquiry_prod           = "RAMDISK-MCP",
709         .inquiry_rev            = RD_MCP_VERSION,
710         .attach_hba             = rd_attach_hba,
711         .detach_hba             = rd_detach_hba,
712         .alloc_device           = rd_alloc_device,
713         .configure_device       = rd_configure_device,
714         .free_device            = rd_free_device,
715         .parse_cdb              = rd_parse_cdb,
716         .set_configfs_dev_params = rd_set_configfs_dev_params,
717         .show_configfs_dev_params = rd_show_configfs_dev_params,
718         .get_device_type        = sbc_get_device_type,
719         .get_blocks             = rd_get_blocks,
720         .init_prot              = rd_init_prot,
721         .free_prot              = rd_free_prot,
722         .tb_dev_attrib_attrs    = sbc_attrib_attrs,
723 };
724
725 int __init rd_module_init(void)
726 {
727         return transport_backend_register(&rd_mcp_ops);
728 }
729
730 void rd_module_exit(void)
731 {
732         target_backend_unregister(&rd_mcp_ops);
733 }