2 *******************************************************************************
4 ** FILE NAME : arcmsr_hba.c
5 ** BY : Nick Cheng, C.L. Huang
6 ** Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
10 ** Web site: www.areca.com.tw
11 ** E-mail: support@areca.com.tw
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
24 ** 1. Redistributions of source code must retain the above copyright
25 ** notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 ** notice, this list of conditions and the following disclaimer in the
28 ** documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 ** derived from this software without specific prior written permission.
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
45 *******************************************************************************
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
63 #include <asm/uaccess.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi.h>
66 #include <scsi/scsi_cmnd.h>
67 #include <scsi/scsi_tcq.h>
68 #include <scsi/scsi_device.h>
69 #include <scsi/scsi_transport.h>
70 #include <scsi/scsicam.h>
72 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
73 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
75 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77 #define ARCMSR_SLEEPTIME 10
78 #define ARCMSR_RETRYCOUNT 12
80 static wait_queue_head_t wait_q;
81 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
82 struct scsi_cmnd *cmd);
83 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
84 static int arcmsr_abort(struct scsi_cmnd *);
85 static int arcmsr_bus_reset(struct scsi_cmnd *);
86 static int arcmsr_bios_param(struct scsi_device *sdev,
87 struct block_device *bdev, sector_t capacity, int *info);
88 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
89 static int arcmsr_probe(struct pci_dev *pdev,
90 const struct pci_device_id *id);
91 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
92 static int arcmsr_resume(struct pci_dev *pdev);
93 static void arcmsr_remove(struct pci_dev *pdev);
94 static void arcmsr_shutdown(struct pci_dev *pdev);
95 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
96 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
97 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
98 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
100 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
101 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
102 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
103 static void arcmsr_request_device_map(unsigned long pacb);
104 static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
105 static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
106 static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
107 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
108 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
109 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
110 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
111 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
112 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
113 static const char *arcmsr_info(struct Scsi_Host *);
114 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
115 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
116 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
117 int queue_depth, int reason)
119 if (reason != SCSI_QDEPTH_DEFAULT)
122 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
123 queue_depth = ARCMSR_MAX_CMD_PERLUN;
124 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
128 static struct scsi_host_template arcmsr_scsi_host_template = {
129 .module = THIS_MODULE,
130 .name = "Areca SAS/SATA RAID driver",
132 .queuecommand = arcmsr_queue_command,
133 .eh_abort_handler = arcmsr_abort,
134 .eh_bus_reset_handler = arcmsr_bus_reset,
135 .bios_param = arcmsr_bios_param,
136 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
137 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
138 .this_id = ARCMSR_SCSI_INITIATOR_ID,
139 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
140 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
141 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
142 .use_clustering = ENABLE_CLUSTERING,
143 .shost_attrs = arcmsr_host_attrs,
147 static struct pci_device_id arcmsr_device_id_table[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
149 .driver_data = ACB_ADAPTER_TYPE_A},
150 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
151 .driver_data = ACB_ADAPTER_TYPE_A},
152 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
153 .driver_data = ACB_ADAPTER_TYPE_A},
154 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
155 .driver_data = ACB_ADAPTER_TYPE_A},
156 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
157 .driver_data = ACB_ADAPTER_TYPE_A},
158 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
159 .driver_data = ACB_ADAPTER_TYPE_B},
160 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
161 .driver_data = ACB_ADAPTER_TYPE_B},
162 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
163 .driver_data = ACB_ADAPTER_TYPE_B},
164 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
165 .driver_data = ACB_ADAPTER_TYPE_A},
166 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
167 .driver_data = ACB_ADAPTER_TYPE_D},
168 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
169 .driver_data = ACB_ADAPTER_TYPE_A},
170 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
171 .driver_data = ACB_ADAPTER_TYPE_A},
172 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
173 .driver_data = ACB_ADAPTER_TYPE_A},
174 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
175 .driver_data = ACB_ADAPTER_TYPE_A},
176 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
177 .driver_data = ACB_ADAPTER_TYPE_A},
178 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
179 .driver_data = ACB_ADAPTER_TYPE_A},
180 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
181 .driver_data = ACB_ADAPTER_TYPE_A},
182 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
183 .driver_data = ACB_ADAPTER_TYPE_A},
184 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
185 .driver_data = ACB_ADAPTER_TYPE_A},
186 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
187 .driver_data = ACB_ADAPTER_TYPE_C},
188 {0, 0}, /* Terminating entry */
190 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
192 static struct pci_driver arcmsr_pci_driver = {
194 .id_table = arcmsr_device_id_table,
195 .probe = arcmsr_probe,
196 .remove = arcmsr_remove,
197 .suspend = arcmsr_suspend,
198 .resume = arcmsr_resume,
199 .shutdown = arcmsr_shutdown,
202 ****************************************************************************
203 ****************************************************************************
206 static void arcmsr_free_mu(struct AdapterControlBlock *acb)
208 switch (acb->adapter_type) {
209 case ACB_ADAPTER_TYPE_B:
210 case ACB_ADAPTER_TYPE_D: {
211 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
212 acb->dma_coherent2, acb->dma_coherent_handle2);
218 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
220 struct pci_dev *pdev = acb->pdev;
221 switch (acb->adapter_type){
222 case ACB_ADAPTER_TYPE_A:{
223 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
225 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
230 case ACB_ADAPTER_TYPE_B:{
231 void __iomem *mem_base0, *mem_base1;
232 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
234 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
237 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
240 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
243 acb->mem_base0 = mem_base0;
244 acb->mem_base1 = mem_base1;
247 case ACB_ADAPTER_TYPE_C:{
248 acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
250 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
253 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
254 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
259 case ACB_ADAPTER_TYPE_D: {
260 void __iomem *mem_base0;
261 unsigned long addr, range, flags;
263 addr = (unsigned long)pci_resource_start(pdev, 0);
264 range = pci_resource_len(pdev, 0);
265 flags = pci_resource_flags(pdev, 0);
266 if (flags & IORESOURCE_CACHEABLE)
267 mem_base0 = ioremap(addr, range);
269 mem_base0 = ioremap_nocache(addr, range);
271 pr_notice("arcmsr%d: memory mapping region fail\n",
275 acb->mem_base0 = mem_base0;
282 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
284 switch (acb->adapter_type) {
285 case ACB_ADAPTER_TYPE_A:{
289 case ACB_ADAPTER_TYPE_B:{
290 iounmap(acb->mem_base0);
291 iounmap(acb->mem_base1);
295 case ACB_ADAPTER_TYPE_C:{
299 case ACB_ADAPTER_TYPE_D:
300 iounmap(acb->mem_base0);
305 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
307 irqreturn_t handle_state;
308 struct AdapterControlBlock *acb = dev_id;
310 handle_state = arcmsr_interrupt(acb);
314 static int arcmsr_bios_param(struct scsi_device *sdev,
315 struct block_device *bdev, sector_t capacity, int *geom)
317 int ret, heads, sectors, cylinders, total_capacity;
318 unsigned char *buffer;/* return copy of block device's partition table */
320 buffer = scsi_bios_ptable(bdev);
322 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
327 total_capacity = capacity;
330 cylinders = total_capacity / (heads * sectors);
331 if (cylinders > 1024) {
334 cylinders = total_capacity / (heads * sectors);
342 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
344 struct MessageUnit_A __iomem *reg = acb->pmuA;
347 for (i = 0; i < 2000; i++) {
348 if (readl(®->outbound_intstatus) &
349 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
350 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
351 ®->outbound_intstatus);
355 } /* max 20 seconds */
360 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
362 struct MessageUnit_B *reg = acb->pmuB;
365 for (i = 0; i < 2000; i++) {
366 if (readl(reg->iop2drv_doorbell)
367 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
368 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
369 reg->iop2drv_doorbell);
370 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
371 reg->drv2iop_doorbell);
375 } /* max 20 seconds */
380 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
382 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
385 for (i = 0; i < 2000; i++) {
386 if (readl(&phbcmu->outbound_doorbell)
387 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
388 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
389 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
393 } /* max 20 seconds */
398 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
400 struct MessageUnit_D *reg = pACB->pmuD;
403 for (i = 0; i < 2000; i++) {
404 if (readl(reg->outbound_doorbell)
405 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
406 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
407 reg->outbound_doorbell);
411 } /* max 20 seconds */
415 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
417 struct MessageUnit_A __iomem *reg = acb->pmuA;
418 int retry_count = 30;
419 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
421 if (arcmsr_hbaA_wait_msgint_ready(acb))
425 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
426 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
428 } while (retry_count != 0);
431 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
433 struct MessageUnit_B *reg = acb->pmuB;
434 int retry_count = 30;
435 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
437 if (arcmsr_hbaB_wait_msgint_ready(acb))
441 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
442 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
444 } while (retry_count != 0);
447 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
449 struct MessageUnit_C __iomem *reg = pACB->pmuC;
450 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
451 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
452 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
454 if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
458 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
459 timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
461 } while (retry_count != 0);
465 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
467 int retry_count = 15;
468 struct MessageUnit_D *reg = pACB->pmuD;
470 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
472 if (arcmsr_hbaD_wait_msgint_ready(pACB))
476 pr_notice("arcmsr%d: wait 'flush adapter "
477 "cache' timeout, retry count down = %d\n",
478 pACB->host->host_no, retry_count);
479 } while (retry_count != 0);
482 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
484 switch (acb->adapter_type) {
486 case ACB_ADAPTER_TYPE_A: {
487 arcmsr_hbaA_flush_cache(acb);
491 case ACB_ADAPTER_TYPE_B: {
492 arcmsr_hbaB_flush_cache(acb);
495 case ACB_ADAPTER_TYPE_C: {
496 arcmsr_hbaC_flush_cache(acb);
499 case ACB_ADAPTER_TYPE_D:
500 arcmsr_hbaD_flush_cache(acb);
505 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
507 struct pci_dev *pdev = acb->pdev;
509 dma_addr_t dma_coherent_handle;
510 struct CommandControlBlock *ccb_tmp;
512 dma_addr_t cdb_phyaddr;
513 unsigned long roundup_ccbsize;
514 unsigned long max_xfer_len;
515 unsigned long max_sg_entrys;
516 uint32_t firm_config_version;
518 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
519 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
520 acb->devstate[i][j] = ARECA_RAID_GONE;
522 max_xfer_len = ARCMSR_MAX_XFER_LEN;
523 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
524 firm_config_version = acb->firm_cfg_version;
525 if((firm_config_version & 0xFF) >= 3){
526 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
527 max_sg_entrys = (max_xfer_len/4096);
529 acb->host->max_sectors = max_xfer_len/512;
530 acb->host->sg_tablesize = max_sg_entrys;
531 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
532 acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
533 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
535 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
538 acb->dma_coherent = dma_coherent;
539 acb->dma_coherent_handle = dma_coherent_handle;
540 memset(dma_coherent, 0, acb->uncache_size);
541 ccb_tmp = dma_coherent;
542 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
543 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
544 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
545 switch (acb->adapter_type) {
546 case ACB_ADAPTER_TYPE_A:
547 case ACB_ADAPTER_TYPE_B:
548 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
550 case ACB_ADAPTER_TYPE_C:
551 case ACB_ADAPTER_TYPE_D:
552 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
555 acb->pccb_pool[i] = ccb_tmp;
557 INIT_LIST_HEAD(&ccb_tmp->list);
558 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
559 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
560 dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
565 static void arcmsr_message_isr_bh_fn(struct work_struct *work)
567 struct AdapterControlBlock *acb = container_of(work,
568 struct AdapterControlBlock, arcmsr_do_message_isr_bh);
569 char *acb_dev_map = (char *)acb->device_map;
570 uint32_t __iomem *signature = NULL;
571 char __iomem *devicemap = NULL;
573 struct scsi_device *psdev;
576 switch (acb->adapter_type) {
577 case ACB_ADAPTER_TYPE_A: {
578 struct MessageUnit_A __iomem *reg = acb->pmuA;
580 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
581 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
584 case ACB_ADAPTER_TYPE_B: {
585 struct MessageUnit_B *reg = acb->pmuB;
587 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
588 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
591 case ACB_ADAPTER_TYPE_C: {
592 struct MessageUnit_C __iomem *reg = acb->pmuC;
594 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
595 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
598 case ACB_ADAPTER_TYPE_D: {
599 struct MessageUnit_D *reg = acb->pmuD;
601 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
602 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
606 atomic_inc(&acb->rq_map_token);
607 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
609 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
611 temp = readb(devicemap);
612 diff = (*acb_dev_map) ^ temp;
615 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
617 if ((diff & 0x01) == 1 &&
618 (temp & 0x01) == 1) {
619 scsi_add_device(acb->host,
621 } else if ((diff & 0x01) == 1
622 && (temp & 0x01) == 0) {
623 psdev = scsi_device_lookup(acb->host,
626 scsi_remove_device(psdev);
627 scsi_device_put(psdev);
640 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
643 struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
645 for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
646 entries[i].entry = i;
647 r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
650 acb->msix_vector_count = r;
651 for (i = 0; i < r; i++) {
652 if (request_irq(entries[i].vector,
653 arcmsr_do_interrupt, 0, "arcmsr", acb)) {
654 pr_warn("arcmsr%d: request_irq =%d failed!\n",
655 acb->host->host_no, entries[i].vector);
656 for (j = 0 ; j < i ; j++)
657 free_irq(entries[j].vector, acb);
658 pci_disable_msix(pdev);
661 acb->entries[i] = entries[i];
663 acb->acb_flags |= ACB_F_MSIX_ENABLED;
664 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
667 if (pci_enable_msi_exact(pdev, 1) < 0)
669 if (request_irq(pdev->irq, arcmsr_do_interrupt,
670 IRQF_SHARED, "arcmsr", acb)) {
671 pr_warn("arcmsr%d: request_irq =%d failed!\n",
672 acb->host->host_no, pdev->irq);
673 pci_disable_msi(pdev);
676 acb->acb_flags |= ACB_F_MSI_ENABLED;
677 pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
680 if (request_irq(pdev->irq, arcmsr_do_interrupt,
681 IRQF_SHARED, "arcmsr", acb)) {
682 pr_warn("arcmsr%d: request_irq = %d failed!\n",
683 acb->host->host_no, pdev->irq);
689 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
691 struct Scsi_Host *host;
692 struct AdapterControlBlock *acb;
695 error = pci_enable_device(pdev);
699 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
701 goto pci_disable_dev;
703 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
705 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
708 "scsi%d: No suitable DMA mask available\n",
710 goto scsi_host_release;
713 init_waitqueue_head(&wait_q);
714 bus = pdev->bus->number;
715 dev_fun = pdev->devfn;
716 acb = (struct AdapterControlBlock *) host->hostdata;
717 memset(acb,0,sizeof(struct AdapterControlBlock));
720 host->max_lun = ARCMSR_MAX_TARGETLUN;
721 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
722 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
723 host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
724 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
725 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
726 host->unique_id = (bus << 8) | dev_fun;
727 pci_set_drvdata(pdev, host);
728 pci_set_master(pdev);
729 error = pci_request_regions(pdev, "arcmsr");
731 goto scsi_host_release;
733 spin_lock_init(&acb->eh_lock);
734 spin_lock_init(&acb->ccblist_lock);
735 spin_lock_init(&acb->postq_lock);
736 spin_lock_init(&acb->doneq_lock);
737 spin_lock_init(&acb->rqbuffer_lock);
738 spin_lock_init(&acb->wqbuffer_lock);
739 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
740 ACB_F_MESSAGE_RQBUFFER_CLEARED |
741 ACB_F_MESSAGE_WQBUFFER_READED);
742 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
743 INIT_LIST_HEAD(&acb->ccb_free_list);
744 acb->adapter_type = id->driver_data;
745 error = arcmsr_remap_pciregion(acb);
747 goto pci_release_regs;
749 error = arcmsr_get_firmware_spec(acb);
751 goto unmap_pci_region;
753 error = arcmsr_alloc_ccb_pool(acb);
757 error = scsi_add_host(host, &pdev->dev);
761 if (arcmsr_request_irq(pdev, acb) == FAILED)
762 goto scsi_host_remove;
763 arcmsr_iop_init(acb);
764 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
765 atomic_set(&acb->rq_map_token, 16);
766 atomic_set(&acb->ante_token_value, 16);
767 acb->fw_flag = FW_NORMAL;
768 init_timer(&acb->eternal_timer);
769 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
770 acb->eternal_timer.data = (unsigned long) acb;
771 acb->eternal_timer.function = &arcmsr_request_device_map;
772 add_timer(&acb->eternal_timer);
773 if(arcmsr_alloc_sysfs_attr(acb))
775 scsi_scan_host(host);
778 del_timer_sync(&acb->eternal_timer);
779 flush_work(&acb->arcmsr_do_message_isr_bh);
780 arcmsr_stop_adapter_bgrb(acb);
781 arcmsr_flush_adapter_cache(acb);
782 arcmsr_free_irq(pdev, acb);
784 scsi_remove_host(host);
786 arcmsr_free_ccb_pool(acb);
790 arcmsr_unmap_pciregion(acb);
792 pci_release_regions(pdev);
796 pci_disable_device(pdev);
800 static void arcmsr_free_irq(struct pci_dev *pdev,
801 struct AdapterControlBlock *acb)
805 if (acb->acb_flags & ACB_F_MSI_ENABLED) {
806 free_irq(pdev->irq, acb);
807 pci_disable_msi(pdev);
808 } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
809 for (i = 0; i < acb->msix_vector_count; i++)
810 free_irq(acb->entries[i].vector, acb);
811 pci_disable_msix(pdev);
813 free_irq(pdev->irq, acb);
816 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
818 uint32_t intmask_org;
819 struct Scsi_Host *host = pci_get_drvdata(pdev);
820 struct AdapterControlBlock *acb =
821 (struct AdapterControlBlock *)host->hostdata;
823 intmask_org = arcmsr_disable_outbound_ints(acb);
824 arcmsr_free_irq(pdev, acb);
825 del_timer_sync(&acb->eternal_timer);
826 flush_work(&acb->arcmsr_do_message_isr_bh);
827 arcmsr_stop_adapter_bgrb(acb);
828 arcmsr_flush_adapter_cache(acb);
829 pci_set_drvdata(pdev, host);
830 pci_save_state(pdev);
831 pci_disable_device(pdev);
832 pci_set_power_state(pdev, pci_choose_state(pdev, state));
836 static int arcmsr_resume(struct pci_dev *pdev)
839 struct Scsi_Host *host = pci_get_drvdata(pdev);
840 struct AdapterControlBlock *acb =
841 (struct AdapterControlBlock *)host->hostdata;
843 pci_set_power_state(pdev, PCI_D0);
844 pci_enable_wake(pdev, PCI_D0, 0);
845 pci_restore_state(pdev);
846 if (pci_enable_device(pdev)) {
847 pr_warn("%s: pci_enable_device error\n", __func__);
850 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
852 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
854 pr_warn("scsi%d: No suitable DMA mask available\n",
856 goto controller_unregister;
859 pci_set_master(pdev);
860 if (arcmsr_request_irq(pdev, acb) == FAILED)
861 goto controller_stop;
862 arcmsr_iop_init(acb);
863 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
864 atomic_set(&acb->rq_map_token, 16);
865 atomic_set(&acb->ante_token_value, 16);
866 acb->fw_flag = FW_NORMAL;
867 init_timer(&acb->eternal_timer);
868 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
869 acb->eternal_timer.data = (unsigned long) acb;
870 acb->eternal_timer.function = &arcmsr_request_device_map;
871 add_timer(&acb->eternal_timer);
874 arcmsr_stop_adapter_bgrb(acb);
875 arcmsr_flush_adapter_cache(acb);
876 controller_unregister:
877 scsi_remove_host(host);
878 arcmsr_free_ccb_pool(acb);
879 arcmsr_unmap_pciregion(acb);
880 pci_release_regions(pdev);
882 pci_disable_device(pdev);
886 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
888 struct MessageUnit_A __iomem *reg = acb->pmuA;
889 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
890 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
892 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
893 , acb->host->host_no);
899 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
901 struct MessageUnit_B *reg = acb->pmuB;
903 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
904 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
906 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
907 , acb->host->host_no);
912 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
914 struct MessageUnit_C __iomem *reg = pACB->pmuC;
915 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
916 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
917 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
919 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
920 , pACB->host->host_no);
926 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
928 struct MessageUnit_D *reg = pACB->pmuD;
930 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
931 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
932 pr_notice("arcmsr%d: wait 'abort all outstanding "
933 "command' timeout\n", pACB->host->host_no);
939 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
942 switch (acb->adapter_type) {
943 case ACB_ADAPTER_TYPE_A: {
944 rtnval = arcmsr_hbaA_abort_allcmd(acb);
948 case ACB_ADAPTER_TYPE_B: {
949 rtnval = arcmsr_hbaB_abort_allcmd(acb);
953 case ACB_ADAPTER_TYPE_C: {
954 rtnval = arcmsr_hbaC_abort_allcmd(acb);
958 case ACB_ADAPTER_TYPE_D:
959 rtnval = arcmsr_hbaD_abort_allcmd(acb);
965 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
967 struct scsi_cmnd *pcmd = ccb->pcmd;
969 scsi_dma_unmap(pcmd);
972 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
974 struct AdapterControlBlock *acb = ccb->acb;
975 struct scsi_cmnd *pcmd = ccb->pcmd;
977 atomic_dec(&acb->ccboutstandingcount);
978 arcmsr_pci_unmap_dma(ccb);
979 ccb->startdone = ARCMSR_CCB_DONE;
980 spin_lock_irqsave(&acb->ccblist_lock, flags);
981 list_add_tail(&ccb->list, &acb->ccb_free_list);
982 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
983 pcmd->scsi_done(pcmd);
986 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
989 struct scsi_cmnd *pcmd = ccb->pcmd;
990 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
991 pcmd->result = DID_OK << 16;
993 int sense_data_length =
994 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
995 ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
996 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
997 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
998 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
999 sensebuffer->Valid = 1;
1003 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1006 switch (acb->adapter_type) {
1007 case ACB_ADAPTER_TYPE_A : {
1008 struct MessageUnit_A __iomem *reg = acb->pmuA;
1009 orig_mask = readl(®->outbound_intmask);
1010 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1011 ®->outbound_intmask);
1014 case ACB_ADAPTER_TYPE_B : {
1015 struct MessageUnit_B *reg = acb->pmuB;
1016 orig_mask = readl(reg->iop2drv_doorbell_mask);
1017 writel(0, reg->iop2drv_doorbell_mask);
1020 case ACB_ADAPTER_TYPE_C:{
1021 struct MessageUnit_C __iomem *reg = acb->pmuC;
1022 /* disable all outbound interrupt */
1023 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */
1024 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
1027 case ACB_ADAPTER_TYPE_D: {
1028 struct MessageUnit_D *reg = acb->pmuD;
1029 /* disable all outbound interrupt */
1030 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1037 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1038 struct CommandControlBlock *ccb, bool error)
1041 id = ccb->pcmd->device->id;
1042 lun = ccb->pcmd->device->lun;
1044 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1045 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1046 ccb->pcmd->result = DID_OK << 16;
1047 arcmsr_ccb_complete(ccb);
1049 switch (ccb->arcmsr_cdb.DeviceStatus) {
1050 case ARCMSR_DEV_SELECT_TIMEOUT: {
1051 acb->devstate[id][lun] = ARECA_RAID_GONE;
1052 ccb->pcmd->result = DID_NO_CONNECT << 16;
1053 arcmsr_ccb_complete(ccb);
1057 case ARCMSR_DEV_ABORTED:
1059 case ARCMSR_DEV_INIT_FAIL: {
1060 acb->devstate[id][lun] = ARECA_RAID_GONE;
1061 ccb->pcmd->result = DID_BAD_TARGET << 16;
1062 arcmsr_ccb_complete(ccb);
1066 case ARCMSR_DEV_CHECK_CONDITION: {
1067 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1068 arcmsr_report_sense_info(ccb);
1069 arcmsr_ccb_complete(ccb);
1075 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1076 but got unknown DeviceStatus = 0x%x \n"
1077 , acb->host->host_no
1080 , ccb->arcmsr_cdb.DeviceStatus);
1081 acb->devstate[id][lun] = ARECA_RAID_GONE;
1082 ccb->pcmd->result = DID_NO_CONNECT << 16;
1083 arcmsr_ccb_complete(ccb);
1089 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1092 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1093 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1094 struct scsi_cmnd *abortcmd = pCCB->pcmd;
1096 id = abortcmd->device->id;
1097 lun = abortcmd->device->lun;
1098 abortcmd->result |= DID_ABORT << 16;
1099 arcmsr_ccb_complete(pCCB);
1100 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1101 acb->host->host_no, pCCB);
1105 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1107 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1108 " ccboutstandingcount = %d \n"
1109 , acb->host->host_no
1114 , atomic_read(&acb->ccboutstandingcount));
1117 arcmsr_report_ccb_state(acb, pCCB, error);
1120 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1124 struct ARCMSR_CDB *pARCMSR_CDB;
1126 struct CommandControlBlock *pCCB;
1127 switch (acb->adapter_type) {
1129 case ACB_ADAPTER_TYPE_A: {
1130 struct MessageUnit_A __iomem *reg = acb->pmuA;
1131 uint32_t outbound_intstatus;
1132 outbound_intstatus = readl(®->outbound_intstatus) &
1133 acb->outbound_int_enable;
1134 /*clear and abort all outbound posted Q*/
1135 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
1136 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF)
1137 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
1138 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1139 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1140 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1141 arcmsr_drain_donequeue(acb, pCCB, error);
1146 case ACB_ADAPTER_TYPE_B: {
1147 struct MessageUnit_B *reg = acb->pmuB;
1148 /*clear all outbound posted Q*/
1149 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1150 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1151 flag_ccb = reg->done_qbuffer[i];
1152 if (flag_ccb != 0) {
1153 reg->done_qbuffer[i] = 0;
1154 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
1155 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1156 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1157 arcmsr_drain_donequeue(acb, pCCB, error);
1159 reg->post_qbuffer[i] = 0;
1161 reg->doneq_index = 0;
1162 reg->postq_index = 0;
1165 case ACB_ADAPTER_TYPE_C: {
1166 struct MessageUnit_C __iomem *reg = acb->pmuC;
1167 struct ARCMSR_CDB *pARCMSR_CDB;
1168 uint32_t flag_ccb, ccb_cdb_phy;
1170 struct CommandControlBlock *pCCB;
1171 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
1173 flag_ccb = readl(®->outbound_queueport_low);
1174 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1175 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
1176 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1177 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1178 arcmsr_drain_donequeue(acb, pCCB, error);
1182 case ACB_ADAPTER_TYPE_D: {
1183 struct MessageUnit_D *pmu = acb->pmuD;
1184 uint32_t ccb_cdb_phy, outbound_write_pointer;
1185 uint32_t doneq_index, index_stripped, addressLow, residual;
1187 struct CommandControlBlock *pCCB;
1189 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
1190 doneq_index = pmu->doneq_index;
1191 residual = atomic_read(&acb->ccboutstandingcount);
1192 for (i = 0; i < residual; i++) {
1193 while ((doneq_index & 0xFFF) !=
1194 (outbound_write_pointer & 0xFFF)) {
1195 if (doneq_index & 0x4000) {
1196 index_stripped = doneq_index & 0xFFF;
1197 index_stripped += 1;
1199 ARCMSR_MAX_ARC1214_DONEQUEUE;
1200 pmu->doneq_index = index_stripped ?
1201 (index_stripped | 0x4000) :
1202 (index_stripped + 1);
1204 index_stripped = doneq_index;
1205 index_stripped += 1;
1207 ARCMSR_MAX_ARC1214_DONEQUEUE;
1208 pmu->doneq_index = index_stripped ?
1210 ((index_stripped | 0x4000) + 1);
1212 doneq_index = pmu->doneq_index;
1213 addressLow = pmu->done_qbuffer[doneq_index &
1215 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1216 pARCMSR_CDB = (struct ARCMSR_CDB *)
1217 (acb->vir2phy_offset + ccb_cdb_phy);
1218 pCCB = container_of(pARCMSR_CDB,
1219 struct CommandControlBlock, arcmsr_cdb);
1220 error = (addressLow &
1221 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1223 arcmsr_drain_donequeue(acb, pCCB, error);
1225 pmu->outboundlist_read_pointer);
1228 outbound_write_pointer =
1229 pmu->done_qbuffer[0].addressLow + 1;
1230 doneq_index = pmu->doneq_index;
1232 pmu->postq_index = 0;
1233 pmu->doneq_index = 0x40FF;
1239 static void arcmsr_remove(struct pci_dev *pdev)
1241 struct Scsi_Host *host = pci_get_drvdata(pdev);
1242 struct AdapterControlBlock *acb =
1243 (struct AdapterControlBlock *) host->hostdata;
1245 arcmsr_free_sysfs_attr(acb);
1246 scsi_remove_host(host);
1247 flush_work(&acb->arcmsr_do_message_isr_bh);
1248 del_timer_sync(&acb->eternal_timer);
1249 arcmsr_disable_outbound_ints(acb);
1250 arcmsr_stop_adapter_bgrb(acb);
1251 arcmsr_flush_adapter_cache(acb);
1252 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1253 acb->acb_flags &= ~ACB_F_IOP_INITED;
1255 for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
1256 if (!atomic_read(&acb->ccboutstandingcount))
1258 arcmsr_interrupt(acb);/* FIXME: need spinlock */
1262 if (atomic_read(&acb->ccboutstandingcount)) {
1265 arcmsr_abort_allcmd(acb);
1266 arcmsr_done4abort_postqueue(acb);
1267 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1268 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1269 if (ccb->startdone == ARCMSR_CCB_START) {
1270 ccb->startdone = ARCMSR_CCB_ABORTED;
1271 ccb->pcmd->result = DID_ABORT << 16;
1272 arcmsr_ccb_complete(ccb);
1276 arcmsr_free_irq(pdev, acb);
1277 arcmsr_free_ccb_pool(acb);
1278 arcmsr_free_mu(acb);
1279 arcmsr_unmap_pciregion(acb);
1280 pci_release_regions(pdev);
1281 scsi_host_put(host);
1282 pci_disable_device(pdev);
1285 static void arcmsr_shutdown(struct pci_dev *pdev)
1287 struct Scsi_Host *host = pci_get_drvdata(pdev);
1288 struct AdapterControlBlock *acb =
1289 (struct AdapterControlBlock *)host->hostdata;
1290 del_timer_sync(&acb->eternal_timer);
1291 arcmsr_disable_outbound_ints(acb);
1292 arcmsr_free_irq(pdev, acb);
1293 flush_work(&acb->arcmsr_do_message_isr_bh);
1294 arcmsr_stop_adapter_bgrb(acb);
1295 arcmsr_flush_adapter_cache(acb);
1298 static int arcmsr_module_init(void)
1301 error = pci_register_driver(&arcmsr_pci_driver);
1305 static void arcmsr_module_exit(void)
1307 pci_unregister_driver(&arcmsr_pci_driver);
1309 module_init(arcmsr_module_init);
1310 module_exit(arcmsr_module_exit);
1312 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1316 switch (acb->adapter_type) {
1318 case ACB_ADAPTER_TYPE_A: {
1319 struct MessageUnit_A __iomem *reg = acb->pmuA;
1320 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1321 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1322 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1323 writel(mask, ®->outbound_intmask);
1324 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1328 case ACB_ADAPTER_TYPE_B: {
1329 struct MessageUnit_B *reg = acb->pmuB;
1330 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1331 ARCMSR_IOP2DRV_DATA_READ_OK |
1332 ARCMSR_IOP2DRV_CDB_DONE |
1333 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1334 writel(mask, reg->iop2drv_doorbell_mask);
1335 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1338 case ACB_ADAPTER_TYPE_C: {
1339 struct MessageUnit_C __iomem *reg = acb->pmuC;
1340 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1341 writel(intmask_org & mask, ®->host_int_mask);
1342 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1345 case ACB_ADAPTER_TYPE_D: {
1346 struct MessageUnit_D *reg = acb->pmuD;
1348 mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1349 writel(intmask_org | mask, reg->pcief0_int_enable);
1355 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1356 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1358 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1359 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1360 __le32 address_lo, address_hi;
1361 int arccdbsize = 0x30;
1364 struct scatterlist *sg;
1367 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1368 arcmsr_cdb->TargetID = pcmd->device->id;
1369 arcmsr_cdb->LUN = pcmd->device->lun;
1370 arcmsr_cdb->Function = 1;
1371 arcmsr_cdb->msgContext = 0;
1372 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1374 nseg = scsi_dma_map(pcmd);
1375 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1377 scsi_for_each_sg(pcmd, sg, nseg, i) {
1378 /* Get the physical address of the current data pointer */
1379 length = cpu_to_le32(sg_dma_len(sg));
1380 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1381 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1382 if (address_hi == 0) {
1383 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1385 pdma_sg->address = address_lo;
1386 pdma_sg->length = length;
1387 psge += sizeof (struct SG32ENTRY);
1388 arccdbsize += sizeof (struct SG32ENTRY);
1390 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1392 pdma_sg->addresshigh = address_hi;
1393 pdma_sg->address = address_lo;
1394 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1395 psge += sizeof (struct SG64ENTRY);
1396 arccdbsize += sizeof (struct SG64ENTRY);
1399 arcmsr_cdb->sgcount = (uint8_t)nseg;
1400 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1401 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1402 if ( arccdbsize > 256)
1403 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1404 if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1405 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1406 ccb->arc_cdb_size = arccdbsize;
1410 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1412 uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1413 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1414 atomic_inc(&acb->ccboutstandingcount);
1415 ccb->startdone = ARCMSR_CCB_START;
1416 switch (acb->adapter_type) {
1417 case ACB_ADAPTER_TYPE_A: {
1418 struct MessageUnit_A __iomem *reg = acb->pmuA;
1420 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1421 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1422 ®->inbound_queueport);
1424 writel(cdb_phyaddr, ®->inbound_queueport);
1428 case ACB_ADAPTER_TYPE_B: {
1429 struct MessageUnit_B *reg = acb->pmuB;
1430 uint32_t ending_index, index = reg->postq_index;
1432 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1433 reg->post_qbuffer[ending_index] = 0;
1434 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1435 reg->post_qbuffer[index] =
1436 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1438 reg->post_qbuffer[index] = cdb_phyaddr;
1441 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1442 reg->postq_index = index;
1443 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1446 case ACB_ADAPTER_TYPE_C: {
1447 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1448 uint32_t ccb_post_stamp, arc_cdb_size;
1450 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1451 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1452 if (acb->cdb_phyaddr_hi32) {
1453 writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
1454 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1456 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1460 case ACB_ADAPTER_TYPE_D: {
1461 struct MessageUnit_D *pmu = acb->pmuD;
1464 unsigned long flags;
1465 struct InBound_SRB *pinbound_srb;
1467 spin_lock_irqsave(&acb->postq_lock, flags);
1468 postq_index = pmu->postq_index;
1469 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1470 pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
1471 pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
1472 pinbound_srb->length = ccb->arc_cdb_size >> 2;
1473 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1474 if (postq_index & 0x4000) {
1475 index_stripped = postq_index & 0xFF;
1476 index_stripped += 1;
1477 index_stripped %= ARCMSR_MAX_ARC1214_POSTQUEUE;
1478 pmu->postq_index = index_stripped ?
1479 (index_stripped | 0x4000) : index_stripped;
1481 index_stripped = postq_index;
1482 index_stripped += 1;
1483 index_stripped %= ARCMSR_MAX_ARC1214_POSTQUEUE;
1484 pmu->postq_index = index_stripped ? index_stripped :
1485 (index_stripped | 0x4000);
1487 writel(postq_index, pmu->inboundlist_write_pointer);
1488 spin_unlock_irqrestore(&acb->postq_lock, flags);
1494 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1496 struct MessageUnit_A __iomem *reg = acb->pmuA;
1497 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1498 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1499 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1501 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1502 , acb->host->host_no);
1506 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1508 struct MessageUnit_B *reg = acb->pmuB;
1509 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1510 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1512 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1514 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1515 , acb->host->host_no);
1519 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1521 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1522 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1523 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1524 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1525 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1527 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1528 , pACB->host->host_no);
1533 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1535 struct MessageUnit_D *reg = pACB->pmuD;
1537 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1538 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1539 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1540 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
1541 "timeout\n", pACB->host->host_no);
1544 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1546 switch (acb->adapter_type) {
1547 case ACB_ADAPTER_TYPE_A: {
1548 arcmsr_hbaA_stop_bgrb(acb);
1552 case ACB_ADAPTER_TYPE_B: {
1553 arcmsr_hbaB_stop_bgrb(acb);
1556 case ACB_ADAPTER_TYPE_C: {
1557 arcmsr_hbaC_stop_bgrb(acb);
1560 case ACB_ADAPTER_TYPE_D:
1561 arcmsr_hbaD_stop_bgrb(acb);
1566 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1568 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
1571 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1573 switch (acb->adapter_type) {
1574 case ACB_ADAPTER_TYPE_A: {
1575 struct MessageUnit_A __iomem *reg = acb->pmuA;
1576 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
1580 case ACB_ADAPTER_TYPE_B: {
1581 struct MessageUnit_B *reg = acb->pmuB;
1582 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
1585 case ACB_ADAPTER_TYPE_C: {
1586 struct MessageUnit_C __iomem *reg = acb->pmuC;
1588 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
1591 case ACB_ADAPTER_TYPE_D: {
1592 struct MessageUnit_D *reg = acb->pmuD;
1593 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
1594 reg->inbound_doorbell);
1600 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1602 switch (acb->adapter_type) {
1603 case ACB_ADAPTER_TYPE_A: {
1604 struct MessageUnit_A __iomem *reg = acb->pmuA;
1606 ** push inbound doorbell tell iop, driver data write ok
1607 ** and wait reply on next hwinterrupt for next Qbuffer post
1609 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell);
1613 case ACB_ADAPTER_TYPE_B: {
1614 struct MessageUnit_B *reg = acb->pmuB;
1616 ** push inbound doorbell tell iop, driver data write ok
1617 ** and wait reply on next hwinterrupt for next Qbuffer post
1619 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
1622 case ACB_ADAPTER_TYPE_C: {
1623 struct MessageUnit_C __iomem *reg = acb->pmuC;
1625 ** push inbound doorbell tell iop, driver data write ok
1626 ** and wait reply on next hwinterrupt for next Qbuffer post
1628 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell);
1631 case ACB_ADAPTER_TYPE_D: {
1632 struct MessageUnit_D *reg = acb->pmuD;
1633 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
1634 reg->inbound_doorbell);
1640 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1642 struct QBUFFER __iomem *qbuffer = NULL;
1643 switch (acb->adapter_type) {
1645 case ACB_ADAPTER_TYPE_A: {
1646 struct MessageUnit_A __iomem *reg = acb->pmuA;
1647 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
1651 case ACB_ADAPTER_TYPE_B: {
1652 struct MessageUnit_B *reg = acb->pmuB;
1653 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1656 case ACB_ADAPTER_TYPE_C: {
1657 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1658 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
1661 case ACB_ADAPTER_TYPE_D: {
1662 struct MessageUnit_D *reg = acb->pmuD;
1663 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1670 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1672 struct QBUFFER __iomem *pqbuffer = NULL;
1673 switch (acb->adapter_type) {
1675 case ACB_ADAPTER_TYPE_A: {
1676 struct MessageUnit_A __iomem *reg = acb->pmuA;
1677 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer;
1681 case ACB_ADAPTER_TYPE_B: {
1682 struct MessageUnit_B *reg = acb->pmuB;
1683 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1686 case ACB_ADAPTER_TYPE_C: {
1687 struct MessageUnit_C __iomem *reg = acb->pmuC;
1688 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
1691 case ACB_ADAPTER_TYPE_D: {
1692 struct MessageUnit_D *reg = acb->pmuD;
1693 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1701 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
1702 struct QBUFFER __iomem *prbuffer)
1705 uint8_t *buf1 = NULL;
1706 uint32_t __iomem *iop_data;
1707 uint32_t iop_len, data_len, *buf2 = NULL;
1709 iop_data = (uint32_t __iomem *)prbuffer->data;
1710 iop_len = readl(&prbuffer->data_len);
1712 buf1 = kmalloc(128, GFP_ATOMIC);
1713 buf2 = (uint32_t *)buf1;
1717 while (data_len >= 4) {
1718 *buf2++ = readl(iop_data);
1723 *buf2 = readl(iop_data);
1724 buf2 = (uint32_t *)buf1;
1726 while (iop_len > 0) {
1727 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1729 acb->rqbuf_lastindex++;
1730 /* if last, index number set it to 0 */
1731 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1737 /* let IOP know data has been read */
1738 arcmsr_iop_message_read(acb);
1743 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
1744 struct QBUFFER __iomem *prbuffer) {
1747 uint8_t __iomem *iop_data;
1750 if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
1751 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
1752 iop_data = (uint8_t __iomem *)prbuffer->data;
1753 iop_len = readl(&prbuffer->data_len);
1754 while (iop_len > 0) {
1755 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex];
1756 *pQbuffer = readb(iop_data);
1757 acb->rqbuf_lastindex++;
1758 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1762 arcmsr_iop_message_read(acb);
1766 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1768 unsigned long flags;
1769 struct QBUFFER __iomem *prbuffer;
1770 int32_t buf_empty_len;
1772 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
1773 prbuffer = arcmsr_get_iop_rqbuffer(acb);
1774 buf_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) &
1775 (ARCMSR_MAX_QBUFFER - 1);
1776 if (buf_empty_len >= readl(&prbuffer->data_len)) {
1777 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
1778 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1780 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1781 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
1784 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
1787 struct QBUFFER __iomem *pwbuffer;
1788 uint8_t *buf1 = NULL;
1789 uint32_t __iomem *iop_data;
1790 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
1792 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1793 buf1 = kmalloc(128, GFP_ATOMIC);
1794 buf2 = (uint32_t *)buf1;
1798 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1799 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1800 iop_data = (uint32_t __iomem *)pwbuffer->data;
1801 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1802 && (allxfer_len < 124)) {
1803 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1805 acb->wqbuf_firstindex++;
1806 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1810 data_len = allxfer_len;
1811 buf1 = (uint8_t *)buf2;
1812 while (data_len >= 4) {
1814 writel(data, iop_data);
1820 writel(data, iop_data);
1822 writel(allxfer_len, &pwbuffer->data_len);
1824 arcmsr_iop_message_wrote(acb);
1829 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
1832 struct QBUFFER __iomem *pwbuffer;
1833 uint8_t __iomem *iop_data;
1834 int32_t allxfer_len = 0;
1836 if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
1837 arcmsr_write_ioctldata2iop_in_DWORD(acb);
1840 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1841 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1842 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1843 iop_data = (uint8_t __iomem *)pwbuffer->data;
1844 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1845 && (allxfer_len < 124)) {
1846 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1847 writeb(*pQbuffer, iop_data);
1848 acb->wqbuf_firstindex++;
1849 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1853 writel(allxfer_len, &pwbuffer->data_len);
1854 arcmsr_iop_message_wrote(acb);
1858 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1860 unsigned long flags;
1862 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
1863 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
1864 if (acb->wqbuf_firstindex != acb->wqbuf_lastindex)
1865 arcmsr_write_ioctldata2iop(acb);
1866 if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
1867 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1868 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
1871 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
1873 uint32_t outbound_doorbell;
1874 struct MessageUnit_A __iomem *reg = acb->pmuA;
1875 outbound_doorbell = readl(®->outbound_doorbell);
1877 writel(outbound_doorbell, ®->outbound_doorbell);
1878 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
1879 arcmsr_iop2drv_data_wrote_handle(acb);
1880 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
1881 arcmsr_iop2drv_data_read_handle(acb);
1882 outbound_doorbell = readl(®->outbound_doorbell);
1883 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
1884 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
1886 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
1888 uint32_t outbound_doorbell;
1889 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1891 *******************************************************************
1892 ** Maybe here we need to check wrqbuffer_lock is lock or not
1893 ** DOORBELL: din! don!
1894 ** check if there are any mail need to pack from firmware
1895 *******************************************************************
1897 outbound_doorbell = readl(®->outbound_doorbell);
1899 writel(outbound_doorbell, ®->outbound_doorbell_clear);
1900 readl(®->outbound_doorbell_clear);
1901 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
1902 arcmsr_iop2drv_data_wrote_handle(pACB);
1903 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
1904 arcmsr_iop2drv_data_read_handle(pACB);
1905 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
1906 arcmsr_hbaC_message_isr(pACB);
1907 outbound_doorbell = readl(®->outbound_doorbell);
1908 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
1909 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
1910 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
1913 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
1915 uint32_t outbound_doorbell;
1916 struct MessageUnit_D *pmu = pACB->pmuD;
1918 outbound_doorbell = readl(pmu->outbound_doorbell);
1920 writel(outbound_doorbell, pmu->outbound_doorbell);
1921 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
1922 arcmsr_hbaD_message_isr(pACB);
1923 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
1924 arcmsr_iop2drv_data_wrote_handle(pACB);
1925 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
1926 arcmsr_iop2drv_data_read_handle(pACB);
1927 outbound_doorbell = readl(pmu->outbound_doorbell);
1928 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
1929 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
1930 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
1933 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
1936 struct MessageUnit_A __iomem *reg = acb->pmuA;
1937 struct ARCMSR_CDB *pARCMSR_CDB;
1938 struct CommandControlBlock *pCCB;
1940 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) {
1941 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1942 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1943 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1944 arcmsr_drain_donequeue(acb, pCCB, error);
1947 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
1951 struct MessageUnit_B *reg = acb->pmuB;
1952 struct ARCMSR_CDB *pARCMSR_CDB;
1953 struct CommandControlBlock *pCCB;
1955 index = reg->doneq_index;
1956 while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
1957 reg->done_qbuffer[index] = 0;
1958 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
1959 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1960 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1961 arcmsr_drain_donequeue(acb, pCCB, error);
1963 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1964 reg->doneq_index = index;
1968 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
1970 struct MessageUnit_C __iomem *phbcmu;
1971 struct ARCMSR_CDB *arcmsr_cdb;
1972 struct CommandControlBlock *ccb;
1973 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
1977 /* areca cdb command done */
1978 /* Use correct offset and size for syncing */
1980 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
1982 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1983 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
1985 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
1987 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
1989 /* check if command done with no error */
1990 arcmsr_drain_donequeue(acb, ccb, error);
1992 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1993 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
1994 &phbcmu->inbound_doorbell);
2000 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2002 u32 outbound_write_pointer, doneq_index, index_stripped;
2003 uint32_t addressLow, ccb_cdb_phy;
2005 struct MessageUnit_D *pmu;
2006 struct ARCMSR_CDB *arcmsr_cdb;
2007 struct CommandControlBlock *ccb;
2008 unsigned long flags;
2010 spin_lock_irqsave(&acb->doneq_lock, flags);
2012 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
2013 doneq_index = pmu->doneq_index;
2014 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
2016 if (doneq_index & 0x4000) {
2017 index_stripped = doneq_index & 0xFFF;
2018 index_stripped += 1;
2019 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2020 pmu->doneq_index = index_stripped
2021 ? (index_stripped | 0x4000) :
2022 (index_stripped + 1);
2024 index_stripped = doneq_index;
2025 index_stripped += 1;
2026 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2027 pmu->doneq_index = index_stripped
2029 ((index_stripped | 0x4000) + 1);
2031 doneq_index = pmu->doneq_index;
2032 addressLow = pmu->done_qbuffer[doneq_index &
2034 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2035 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2037 ccb = container_of(arcmsr_cdb,
2038 struct CommandControlBlock, arcmsr_cdb);
2039 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2041 arcmsr_drain_donequeue(acb, ccb, error);
2042 writel(doneq_index, pmu->outboundlist_read_pointer);
2043 } while ((doneq_index & 0xFFF) !=
2044 (outbound_write_pointer & 0xFFF));
2046 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2047 pmu->outboundlist_interrupt_cause);
2048 readl(pmu->outboundlist_interrupt_cause);
2049 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2053 **********************************************************************************
2054 ** Handle a message interrupt
2056 ** The only message interrupt we expect is in response to a query for the current adapter config.
2057 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2058 **********************************************************************************
2060 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2062 struct MessageUnit_A __iomem *reg = acb->pmuA;
2063 /*clear interrupt and message state*/
2064 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus);
2065 schedule_work(&acb->arcmsr_do_message_isr_bh);
2067 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2069 struct MessageUnit_B *reg = acb->pmuB;
2071 /*clear interrupt and message state*/
2072 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2073 schedule_work(&acb->arcmsr_do_message_isr_bh);
2076 **********************************************************************************
2077 ** Handle a message interrupt
2079 ** The only message interrupt we expect is in response to a query for the
2080 ** current adapter config.
2081 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2082 **********************************************************************************
2084 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2086 struct MessageUnit_C __iomem *reg = acb->pmuC;
2087 /*clear interrupt and message state*/
2088 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);
2089 schedule_work(&acb->arcmsr_do_message_isr_bh);
2092 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2094 struct MessageUnit_D *reg = acb->pmuD;
2096 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2097 readl(reg->outbound_doorbell);
2098 schedule_work(&acb->arcmsr_do_message_isr_bh);
2101 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2103 uint32_t outbound_intstatus;
2104 struct MessageUnit_A __iomem *reg = acb->pmuA;
2105 outbound_intstatus = readl(®->outbound_intstatus) &
2106 acb->outbound_int_enable;
2107 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2110 writel(outbound_intstatus, ®->outbound_intstatus);
2111 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2112 arcmsr_hbaA_doorbell_isr(acb);
2113 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2114 arcmsr_hbaA_postqueue_isr(acb);
2115 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2116 arcmsr_hbaA_message_isr(acb);
2117 outbound_intstatus = readl(®->outbound_intstatus) &
2118 acb->outbound_int_enable;
2119 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2120 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2121 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2125 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2127 uint32_t outbound_doorbell;
2128 struct MessageUnit_B *reg = acb->pmuB;
2129 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2130 acb->outbound_int_enable;
2131 if (!outbound_doorbell)
2134 writel(~outbound_doorbell, reg->iop2drv_doorbell);
2135 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2136 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2137 arcmsr_iop2drv_data_wrote_handle(acb);
2138 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2139 arcmsr_iop2drv_data_read_handle(acb);
2140 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2141 arcmsr_hbaB_postqueue_isr(acb);
2142 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2143 arcmsr_hbaB_message_isr(acb);
2144 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2145 acb->outbound_int_enable;
2146 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2147 | ARCMSR_IOP2DRV_DATA_READ_OK
2148 | ARCMSR_IOP2DRV_CDB_DONE
2149 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2153 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2155 uint32_t host_interrupt_status;
2156 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2158 *********************************************
2159 ** check outbound intstatus
2160 *********************************************
2162 host_interrupt_status = readl(&phbcmu->host_int_status) &
2163 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2164 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2165 if (!host_interrupt_status)
2168 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2169 arcmsr_hbaC_doorbell_isr(pACB);
2170 /* MU post queue interrupts*/
2171 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2172 arcmsr_hbaC_postqueue_isr(pACB);
2173 host_interrupt_status = readl(&phbcmu->host_int_status);
2174 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2175 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2179 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2181 u32 host_interrupt_status;
2182 struct MessageUnit_D *pmu = pACB->pmuD;
2184 host_interrupt_status = readl(pmu->host_int_status) &
2185 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2186 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2187 if (!host_interrupt_status)
2190 /* MU post queue interrupts*/
2191 if (host_interrupt_status &
2192 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2193 arcmsr_hbaD_postqueue_isr(pACB);
2194 if (host_interrupt_status &
2195 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2196 arcmsr_hbaD_doorbell_isr(pACB);
2197 host_interrupt_status = readl(pmu->host_int_status);
2198 } while (host_interrupt_status &
2199 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2200 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2204 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2206 switch (acb->adapter_type) {
2207 case ACB_ADAPTER_TYPE_A:
2208 return arcmsr_hbaA_handle_isr(acb);
2210 case ACB_ADAPTER_TYPE_B:
2211 return arcmsr_hbaB_handle_isr(acb);
2213 case ACB_ADAPTER_TYPE_C:
2214 return arcmsr_hbaC_handle_isr(acb);
2215 case ACB_ADAPTER_TYPE_D:
2216 return arcmsr_hbaD_handle_isr(acb);
2222 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2225 /* stop adapter background rebuild */
2226 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2227 uint32_t intmask_org;
2228 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2229 intmask_org = arcmsr_disable_outbound_ints(acb);
2230 arcmsr_stop_adapter_bgrb(acb);
2231 arcmsr_flush_adapter_cache(acb);
2232 arcmsr_enable_outbound_ints(acb, intmask_org);
2238 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2242 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2243 for (i = 0; i < 15; i++) {
2244 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2245 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2246 acb->rqbuf_firstindex = 0;
2247 acb->rqbuf_lastindex = 0;
2248 arcmsr_iop_message_read(acb);
2250 } else if (acb->rqbuf_firstindex !=
2251 acb->rqbuf_lastindex) {
2252 acb->rqbuf_firstindex = 0;
2253 acb->rqbuf_lastindex = 0;
2261 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2262 struct scsi_cmnd *cmd)
2265 unsigned short use_sg;
2266 int retvalue = 0, transfer_len = 0;
2267 unsigned long flags;
2268 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2269 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
2270 (uint32_t)cmd->cmnd[6] << 16 |
2271 (uint32_t)cmd->cmnd[7] << 8 |
2272 (uint32_t)cmd->cmnd[8];
2273 struct scatterlist *sg;
2275 use_sg = scsi_sg_count(cmd);
2276 sg = scsi_sglist(cmd);
2277 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2279 retvalue = ARCMSR_MESSAGE_FAIL;
2282 transfer_len += sg->length;
2283 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2284 retvalue = ARCMSR_MESSAGE_FAIL;
2285 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
2288 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
2289 switch (controlcode) {
2290 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2291 unsigned char *ver_addr;
2292 uint8_t *pQbuffer, *ptmpQbuffer;
2293 uint32_t allxfer_len = 0;
2294 ver_addr = kmalloc(1032, GFP_ATOMIC);
2296 retvalue = ARCMSR_MESSAGE_FAIL;
2297 pr_info("%s: memory not enough!\n", __func__);
2300 ptmpQbuffer = ver_addr;
2301 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2302 if (acb->rqbuf_firstindex != acb->rqbuf_lastindex) {
2303 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2304 if (acb->rqbuf_firstindex > acb->rqbuf_lastindex) {
2305 if ((ARCMSR_MAX_QBUFFER -
2306 acb->rqbuf_firstindex) >= 1032) {
2307 memcpy(ptmpQbuffer, pQbuffer, 1032);
2308 acb->rqbuf_firstindex += 1032;
2309 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2312 if (((ARCMSR_MAX_QBUFFER -
2313 acb->rqbuf_firstindex) +
2314 acb->rqbuf_lastindex) > 1032) {
2316 pQbuffer, ARCMSR_MAX_QBUFFER
2317 - acb->rqbuf_firstindex);
2319 ARCMSR_MAX_QBUFFER -
2320 acb->rqbuf_firstindex;
2322 acb->rqbuffer, 1032 -
2324 - acb->rqbuf_firstindex));
2325 acb->rqbuf_firstindex =
2326 1032 - (ARCMSR_MAX_QBUFFER
2327 - acb->rqbuf_firstindex);
2331 pQbuffer, ARCMSR_MAX_QBUFFER
2332 - acb->rqbuf_firstindex);
2334 ARCMSR_MAX_QBUFFER -
2335 acb->rqbuf_firstindex;
2338 acb->rqbuf_lastindex);
2339 allxfer_len = ARCMSR_MAX_QBUFFER
2340 - acb->rqbuf_firstindex +
2341 acb->rqbuf_lastindex;
2342 acb->rqbuf_firstindex =
2343 acb->rqbuf_lastindex;
2347 if ((acb->rqbuf_lastindex -
2348 acb->rqbuf_firstindex) > 1032) {
2349 memcpy(ptmpQbuffer, pQbuffer, 1032);
2350 acb->rqbuf_firstindex += 1032;
2353 memcpy(ptmpQbuffer, pQbuffer,
2354 acb->rqbuf_lastindex -
2355 acb->rqbuf_firstindex);
2356 allxfer_len = acb->rqbuf_lastindex
2357 - acb->rqbuf_firstindex;
2358 acb->rqbuf_firstindex =
2359 acb->rqbuf_lastindex;
2363 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
2365 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2366 struct QBUFFER __iomem *prbuffer;
2367 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2368 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2369 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2370 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2372 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2374 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2375 if (acb->fw_flag == FW_DEADLOCK)
2376 pcmdmessagefld->cmdmessage.ReturnCode =
2377 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2379 pcmdmessagefld->cmdmessage.ReturnCode =
2380 ARCMSR_MESSAGE_RETURNCODE_OK;
2383 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2384 unsigned char *ver_addr;
2385 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2386 uint8_t *pQbuffer, *ptmpuserbuffer;
2387 ver_addr = kmalloc(1032, GFP_ATOMIC);
2389 retvalue = ARCMSR_MESSAGE_FAIL;
2392 ptmpuserbuffer = ver_addr;
2393 user_len = pcmdmessagefld->cmdmessage.Length;
2394 memcpy(ptmpuserbuffer,
2395 pcmdmessagefld->messagedatabuffer, user_len);
2396 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2397 wqbuf_lastindex = acb->wqbuf_lastindex;
2398 wqbuf_firstindex = acb->wqbuf_firstindex;
2399 if (wqbuf_lastindex != wqbuf_firstindex) {
2400 struct SENSE_DATA *sensebuffer =
2401 (struct SENSE_DATA *)cmd->sense_buffer;
2402 arcmsr_write_ioctldata2iop(acb);
2403 /* has error report sensedata */
2404 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
2405 sensebuffer->SenseKey = ILLEGAL_REQUEST;
2406 sensebuffer->AdditionalSenseLength = 0x0A;
2407 sensebuffer->AdditionalSenseCode = 0x20;
2408 sensebuffer->Valid = 1;
2409 retvalue = ARCMSR_MESSAGE_FAIL;
2411 my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1)
2412 & (ARCMSR_MAX_QBUFFER - 1);
2413 if (my_empty_len >= user_len) {
2414 while (user_len > 0) {
2415 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2416 if ((acb->wqbuf_lastindex + user_len)
2417 > ARCMSR_MAX_QBUFFER) {
2418 memcpy(pQbuffer, ptmpuserbuffer,
2419 ARCMSR_MAX_QBUFFER -
2420 acb->wqbuf_lastindex);
2423 - acb->wqbuf_lastindex);
2424 user_len -= (ARCMSR_MAX_QBUFFER
2425 - acb->wqbuf_lastindex);
2426 acb->wqbuf_lastindex = 0;
2428 memcpy(pQbuffer, ptmpuserbuffer,
2430 acb->wqbuf_lastindex += user_len;
2431 acb->wqbuf_lastindex %=
2436 if (acb->acb_flags &
2437 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2439 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2440 arcmsr_write_ioctldata2iop(acb);
2443 struct SENSE_DATA *sensebuffer =
2444 (struct SENSE_DATA *)cmd->sense_buffer;
2445 /* has error report sensedata */
2446 sensebuffer->ErrorCode =
2447 SCSI_SENSE_CURRENT_ERRORS;
2448 sensebuffer->SenseKey = ILLEGAL_REQUEST;
2449 sensebuffer->AdditionalSenseLength = 0x0A;
2450 sensebuffer->AdditionalSenseCode = 0x20;
2451 sensebuffer->Valid = 1;
2452 retvalue = ARCMSR_MESSAGE_FAIL;
2455 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2457 if (acb->fw_flag == FW_DEADLOCK)
2458 pcmdmessagefld->cmdmessage.ReturnCode =
2459 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2461 pcmdmessagefld->cmdmessage.ReturnCode =
2462 ARCMSR_MESSAGE_RETURNCODE_OK;
2465 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2466 uint8_t *pQbuffer = acb->rqbuffer;
2468 arcmsr_clear_iop2drv_rqueue_buffer(acb);
2469 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2470 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2471 acb->rqbuf_firstindex = 0;
2472 acb->rqbuf_lastindex = 0;
2473 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2474 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2475 if (acb->fw_flag == FW_DEADLOCK)
2476 pcmdmessagefld->cmdmessage.ReturnCode =
2477 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2479 pcmdmessagefld->cmdmessage.ReturnCode =
2480 ARCMSR_MESSAGE_RETURNCODE_OK;
2483 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2484 uint8_t *pQbuffer = acb->wqbuffer;
2485 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2486 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2487 ACB_F_MESSAGE_WQBUFFER_READED);
2488 acb->wqbuf_firstindex = 0;
2489 acb->wqbuf_lastindex = 0;
2490 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2491 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2492 if (acb->fw_flag == FW_DEADLOCK)
2493 pcmdmessagefld->cmdmessage.ReturnCode =
2494 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2496 pcmdmessagefld->cmdmessage.ReturnCode =
2497 ARCMSR_MESSAGE_RETURNCODE_OK;
2500 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2502 arcmsr_clear_iop2drv_rqueue_buffer(acb);
2503 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2504 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2505 acb->rqbuf_firstindex = 0;
2506 acb->rqbuf_lastindex = 0;
2507 pQbuffer = acb->rqbuffer;
2508 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2509 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2510 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2511 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2512 ACB_F_MESSAGE_WQBUFFER_READED);
2513 acb->wqbuf_firstindex = 0;
2514 acb->wqbuf_lastindex = 0;
2515 pQbuffer = acb->wqbuffer;
2516 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2517 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2518 if (acb->fw_flag == FW_DEADLOCK)
2519 pcmdmessagefld->cmdmessage.ReturnCode =
2520 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2522 pcmdmessagefld->cmdmessage.ReturnCode =
2523 ARCMSR_MESSAGE_RETURNCODE_OK;
2526 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
2527 if (acb->fw_flag == FW_DEADLOCK)
2528 pcmdmessagefld->cmdmessage.ReturnCode =
2529 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2531 pcmdmessagefld->cmdmessage.ReturnCode =
2532 ARCMSR_MESSAGE_RETURNCODE_3F;
2535 case ARCMSR_MESSAGE_SAY_HELLO: {
2536 int8_t *hello_string = "Hello! I am ARCMSR";
2537 if (acb->fw_flag == FW_DEADLOCK)
2538 pcmdmessagefld->cmdmessage.ReturnCode =
2539 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2541 pcmdmessagefld->cmdmessage.ReturnCode =
2542 ARCMSR_MESSAGE_RETURNCODE_OK;
2543 memcpy(pcmdmessagefld->messagedatabuffer,
2544 hello_string, (int16_t)strlen(hello_string));
2547 case ARCMSR_MESSAGE_SAY_GOODBYE: {
2548 if (acb->fw_flag == FW_DEADLOCK)
2549 pcmdmessagefld->cmdmessage.ReturnCode =
2550 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2552 pcmdmessagefld->cmdmessage.ReturnCode =
2553 ARCMSR_MESSAGE_RETURNCODE_OK;
2554 arcmsr_iop_parking(acb);
2557 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
2558 if (acb->fw_flag == FW_DEADLOCK)
2559 pcmdmessagefld->cmdmessage.ReturnCode =
2560 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2562 pcmdmessagefld->cmdmessage.ReturnCode =
2563 ARCMSR_MESSAGE_RETURNCODE_OK;
2564 arcmsr_flush_adapter_cache(acb);
2568 retvalue = ARCMSR_MESSAGE_FAIL;
2569 pr_info("%s: unknown controlcode!\n", __func__);
2573 struct scatterlist *sg = scsi_sglist(cmd);
2574 kunmap_atomic(buffer - sg->offset);
2579 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
2581 struct list_head *head = &acb->ccb_free_list;
2582 struct CommandControlBlock *ccb = NULL;
2583 unsigned long flags;
2584 spin_lock_irqsave(&acb->ccblist_lock, flags);
2585 if (!list_empty(head)) {
2586 ccb = list_entry(head->next, struct CommandControlBlock, list);
2587 list_del_init(&ccb->list);
2589 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2592 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2596 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2597 struct scsi_cmnd *cmd)
2599 switch (cmd->cmnd[0]) {
2601 unsigned char inqdata[36];
2603 struct scatterlist *sg;
2605 if (cmd->device->lun) {
2606 cmd->result = (DID_TIME_OUT << 16);
2607 cmd->scsi_done(cmd);
2610 inqdata[0] = TYPE_PROCESSOR;
2611 /* Periph Qualifier & Periph Dev Type */
2613 /* rem media bit & Dev Type Modifier */
2615 /* ISO, ECMA, & ANSI versions */
2617 /* length of additional data */
2618 strncpy(&inqdata[8], "Areca ", 8);
2619 /* Vendor Identification */
2620 strncpy(&inqdata[16], "RAID controller ", 16);
2621 /* Product Identification */
2622 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2624 sg = scsi_sglist(cmd);
2625 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2627 memcpy(buffer, inqdata, sizeof(inqdata));
2628 sg = scsi_sglist(cmd);
2629 kunmap_atomic(buffer - sg->offset);
2631 cmd->scsi_done(cmd);
2636 if (arcmsr_iop_message_xfer(acb, cmd))
2637 cmd->result = (DID_ERROR << 16);
2638 cmd->scsi_done(cmd);
2642 cmd->scsi_done(cmd);
2646 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2647 void (* done)(struct scsi_cmnd *))
2649 struct Scsi_Host *host = cmd->device->host;
2650 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
2651 struct CommandControlBlock *ccb;
2652 int target = cmd->device->id;
2653 int lun = cmd->device->lun;
2654 uint8_t scsicmd = cmd->cmnd[0];
2655 cmd->scsi_done = done;
2656 cmd->host_scribble = NULL;
2658 if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
2659 if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2660 cmd->result = (DID_NO_CONNECT << 16);
2662 cmd->scsi_done(cmd);
2666 /* virtual device for iop message transfer */
2667 arcmsr_handle_virtual_command(acb, cmd);
2670 ccb = arcmsr_get_freeccb(acb);
2672 return SCSI_MLQUEUE_HOST_BUSY;
2673 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
2674 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
2675 cmd->scsi_done(cmd);
2678 arcmsr_post_ccb(acb, ccb);
2682 static DEF_SCSI_QCMD(arcmsr_queue_command)
2684 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
2686 struct MessageUnit_A __iomem *reg = acb->pmuA;
2687 char *acb_firm_model = acb->firm_model;
2688 char *acb_firm_version = acb->firm_version;
2689 char *acb_device_map = acb->device_map;
2690 char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]);
2691 char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]);
2692 char __iomem *iop_device_map = (char __iomem *)(®->message_rwbuffer[21]);
2694 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
2695 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2696 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2697 miscellaneous data' timeout \n", acb->host->host_no);
2702 *acb_firm_model = readb(iop_firm_model);
2710 *acb_firm_version = readb(iop_firm_version);
2718 *acb_device_map = readb(iop_device_map);
2723 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2727 acb->signature = readl(®->message_rwbuffer[0]);
2728 acb->firm_request_len = readl(®->message_rwbuffer[1]);
2729 acb->firm_numbers_queue = readl(®->message_rwbuffer[2]);
2730 acb->firm_sdram_size = readl(®->message_rwbuffer[3]);
2731 acb->firm_hd_channels = readl(®->message_rwbuffer[4]);
2732 acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2735 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
2737 struct MessageUnit_B *reg = acb->pmuB;
2738 struct pci_dev *pdev = acb->pdev;
2740 dma_addr_t dma_coherent_handle;
2741 char *acb_firm_model = acb->firm_model;
2742 char *acb_firm_version = acb->firm_version;
2743 char *acb_device_map = acb->device_map;
2744 char __iomem *iop_firm_model;
2745 /*firm_model,15,60-67*/
2746 char __iomem *iop_firm_version;
2747 /*firm_version,17,68-83*/
2748 char __iomem *iop_device_map;
2749 /*firm_version,21,84-99*/
2752 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
2753 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
2754 &dma_coherent_handle, GFP_KERNEL);
2757 "arcmsr%d: dma_alloc_coherent got error for hbb mu\n",
2758 acb->host->host_no);
2761 acb->dma_coherent_handle2 = dma_coherent_handle;
2762 acb->dma_coherent2 = dma_coherent;
2763 reg = (struct MessageUnit_B *)dma_coherent;
2765 reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
2766 reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
2767 reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
2768 reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
2769 reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
2770 reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
2771 reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
2772 iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); /*firm_model,15,60-67*/
2773 iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); /*firm_version,17,68-83*/
2774 iop_device_map = (char __iomem *)(®->message_rwbuffer[21]); /*firm_version,21,84-99*/
2776 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2777 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2778 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2779 miscellaneous data' timeout \n", acb->host->host_no);
2784 *acb_firm_model = readb(iop_firm_model);
2791 *acb_firm_version = readb(iop_firm_version);
2799 *acb_device_map = readb(iop_device_map);
2805 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2810 acb->signature = readl(®->message_rwbuffer[1]);
2811 /*firm_signature,1,00-03*/
2812 acb->firm_request_len = readl(®->message_rwbuffer[2]);
2813 /*firm_request_len,1,04-07*/
2814 acb->firm_numbers_queue = readl(®->message_rwbuffer[3]);
2815 /*firm_numbers_queue,2,08-11*/
2816 acb->firm_sdram_size = readl(®->message_rwbuffer[4]);
2817 /*firm_sdram_size,3,12-15*/
2818 acb->firm_hd_channels = readl(®->message_rwbuffer[5]);
2819 /*firm_ide_channels,4,16-19*/
2820 acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2821 /*firm_ide_channels,4,16-19*/
2825 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
2827 uint32_t intmask_org, Index, firmware_state = 0;
2828 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2829 char *acb_firm_model = pACB->firm_model;
2830 char *acb_firm_version = pACB->firm_version;
2831 char __iomem *iop_firm_model = (char __iomem *)(®->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
2832 char __iomem *iop_firm_version = (char __iomem *)(®->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
2834 /* disable all outbound interrupt */
2835 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
2836 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
2837 /* wait firmware ready */
2839 firmware_state = readl(®->outbound_msgaddr1);
2840 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
2841 /* post "get config" instruction */
2842 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
2843 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
2844 /* wait message ready */
2845 for (Index = 0; Index < 2000; Index++) {
2846 if (readl(®->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
2847 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);/*clear interrupt*/
2852 if (Index >= 2000) {
2853 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2854 miscellaneous data' timeout \n", pACB->host->host_no);
2859 *acb_firm_model = readb(iop_firm_model);
2866 *acb_firm_version = readb(iop_firm_version);
2871 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2872 pACB->host->host_no,
2874 pACB->firm_version);
2875 pACB->firm_request_len = readl(®->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
2876 pACB->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
2877 pACB->firm_sdram_size = readl(®->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
2878 pACB->firm_hd_channels = readl(®->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
2879 pACB->firm_cfg_version = readl(®->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2880 /*all interrupt service will be enable at arcmsr_iop_init*/
2884 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
2886 char *acb_firm_model = acb->firm_model;
2887 char *acb_firm_version = acb->firm_version;
2888 char *acb_device_map = acb->device_map;
2889 char __iomem *iop_firm_model;
2890 char __iomem *iop_firm_version;
2891 char __iomem *iop_device_map;
2893 struct MessageUnit_D *reg ;
2894 void *dma_coherent2;
2895 dma_addr_t dma_coherent_handle2;
2896 struct pci_dev *pdev = acb->pdev;
2898 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
2899 dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
2900 &dma_coherent_handle2, GFP_KERNEL);
2901 if (!dma_coherent2) {
2902 pr_notice("DMA allocation failed...\n");
2905 memset(dma_coherent2, 0, acb->roundup_ccbsize);
2906 acb->dma_coherent_handle2 = dma_coherent_handle2;
2907 acb->dma_coherent2 = dma_coherent2;
2908 reg = (struct MessageUnit_D *)dma_coherent2;
2910 reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID;
2911 reg->cpu_mem_config = acb->mem_base0 +
2912 ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION;
2913 reg->i2o_host_interrupt_mask = acb->mem_base0 +
2914 ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK;
2915 reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET;
2916 reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST;
2917 reg->host_int_status = acb->mem_base0 +
2918 ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS;
2919 reg->pcief0_int_enable = acb->mem_base0 +
2920 ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE;
2921 reg->inbound_msgaddr0 = acb->mem_base0 +
2922 ARCMSR_ARC1214_INBOUND_MESSAGE0;
2923 reg->inbound_msgaddr1 = acb->mem_base0 +
2924 ARCMSR_ARC1214_INBOUND_MESSAGE1;
2925 reg->outbound_msgaddr0 = acb->mem_base0 +
2926 ARCMSR_ARC1214_OUTBOUND_MESSAGE0;
2927 reg->outbound_msgaddr1 = acb->mem_base0 +
2928 ARCMSR_ARC1214_OUTBOUND_MESSAGE1;
2929 reg->inbound_doorbell = acb->mem_base0 +
2930 ARCMSR_ARC1214_INBOUND_DOORBELL;
2931 reg->outbound_doorbell = acb->mem_base0 +
2932 ARCMSR_ARC1214_OUTBOUND_DOORBELL;
2933 reg->outbound_doorbell_enable = acb->mem_base0 +
2934 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE;
2935 reg->inboundlist_base_low = acb->mem_base0 +
2936 ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW;
2937 reg->inboundlist_base_high = acb->mem_base0 +
2938 ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH;
2939 reg->inboundlist_write_pointer = acb->mem_base0 +
2940 ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER;
2941 reg->outboundlist_base_low = acb->mem_base0 +
2942 ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW;
2943 reg->outboundlist_base_high = acb->mem_base0 +
2944 ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH;
2945 reg->outboundlist_copy_pointer = acb->mem_base0 +
2946 ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER;
2947 reg->outboundlist_read_pointer = acb->mem_base0 +
2948 ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER;
2949 reg->outboundlist_interrupt_cause = acb->mem_base0 +
2950 ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE;
2951 reg->outboundlist_interrupt_enable = acb->mem_base0 +
2952 ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE;
2953 reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER;
2954 reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER;
2955 reg->msgcode_rwbuffer = acb->mem_base0 +
2956 ARCMSR_ARC1214_MESSAGE_RWBUFFER;
2957 iop_firm_model = (char __iomem *)(®->msgcode_rwbuffer[15]);
2958 iop_firm_version = (char __iomem *)(®->msgcode_rwbuffer[17]);
2959 iop_device_map = (char __iomem *)(®->msgcode_rwbuffer[21]);
2960 if (readl(acb->pmuD->outbound_doorbell) &
2961 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
2962 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
2963 acb->pmuD->outbound_doorbell);/*clear interrupt*/
2965 /* post "get config" instruction */
2966 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
2967 /* wait message ready */
2968 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
2969 pr_notice("arcmsr%d: wait get adapter firmware "
2970 "miscellaneous data timeout\n", acb->host->host_no);
2971 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
2972 acb->dma_coherent2, acb->dma_coherent_handle2);
2977 *acb_firm_model = readb(iop_firm_model);
2984 *acb_firm_version = readb(iop_firm_version);
2991 *acb_device_map = readb(iop_device_map);
2996 acb->signature = readl(®->msgcode_rwbuffer[1]);
2997 /*firm_signature,1,00-03*/
2998 acb->firm_request_len = readl(®->msgcode_rwbuffer[2]);
2999 /*firm_request_len,1,04-07*/
3000 acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[3]);
3001 /*firm_numbers_queue,2,08-11*/
3002 acb->firm_sdram_size = readl(®->msgcode_rwbuffer[4]);
3003 /*firm_sdram_size,3,12-15*/
3004 acb->firm_hd_channels = readl(®->msgcode_rwbuffer[5]);
3005 /*firm_hd_channels,4,16-19*/
3006 acb->firm_cfg_version = readl(®->msgcode_rwbuffer[25]);
3007 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3014 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3018 switch (acb->adapter_type) {
3019 case ACB_ADAPTER_TYPE_A:
3020 rtn = arcmsr_hbaA_get_config(acb);
3022 case ACB_ADAPTER_TYPE_B:
3023 rtn = arcmsr_hbaB_get_config(acb);
3025 case ACB_ADAPTER_TYPE_C:
3026 rtn = arcmsr_hbaC_get_config(acb);
3028 case ACB_ADAPTER_TYPE_D:
3029 rtn = arcmsr_hbaD_get_config(acb);
3034 if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
3035 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
3037 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3038 acb->host->can_queue = acb->maxOutstanding;
3042 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3043 struct CommandControlBlock *poll_ccb)
3045 struct MessageUnit_A __iomem *reg = acb->pmuA;
3046 struct CommandControlBlock *ccb;
3047 struct ARCMSR_CDB *arcmsr_cdb;
3048 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
3051 polling_hba_ccb_retry:
3053 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable;
3054 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
3056 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) {
3062 if (poll_count > 100){
3066 goto polling_hba_ccb_retry;
3069 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
3070 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3071 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3072 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3073 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3074 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3075 " poll command abort successfully \n"
3076 , acb->host->host_no
3077 , ccb->pcmd->device->id
3078 , (u32)ccb->pcmd->device->lun
3080 ccb->pcmd->result = DID_ABORT << 16;
3081 arcmsr_ccb_complete(ccb);
3084 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3085 " command done ccb = '0x%p'"
3086 "ccboutstandingcount = %d \n"
3087 , acb->host->host_no
3089 , atomic_read(&acb->ccboutstandingcount));
3092 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3093 arcmsr_report_ccb_state(acb, ccb, error);
3098 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3099 struct CommandControlBlock *poll_ccb)
3101 struct MessageUnit_B *reg = acb->pmuB;
3102 struct ARCMSR_CDB *arcmsr_cdb;
3103 struct CommandControlBlock *ccb;
3104 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3107 polling_hbb_ccb_retry:
3110 /* clear doorbell interrupt */
3111 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3113 index = reg->doneq_index;
3114 flag_ccb = reg->done_qbuffer[index];
3115 if (flag_ccb == 0) {
3121 if (poll_count > 100){
3125 goto polling_hbb_ccb_retry;
3128 reg->done_qbuffer[index] = 0;
3130 /*if last index number set it to 0 */
3131 index %= ARCMSR_MAX_HBB_POSTQUEUE;
3132 reg->doneq_index = index;
3133 /* check if command done with no error*/
3134 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
3135 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3136 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3137 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3138 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3139 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3140 " poll command abort successfully \n"
3142 ,ccb->pcmd->device->id
3143 ,(u32)ccb->pcmd->device->lun
3145 ccb->pcmd->result = DID_ABORT << 16;
3146 arcmsr_ccb_complete(ccb);
3149 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3150 " command done ccb = '0x%p'"
3151 "ccboutstandingcount = %d \n"
3152 , acb->host->host_no
3154 , atomic_read(&acb->ccboutstandingcount));
3157 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3158 arcmsr_report_ccb_state(acb, ccb, error);
3163 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3164 struct CommandControlBlock *poll_ccb)
3166 struct MessageUnit_C __iomem *reg = acb->pmuC;
3167 uint32_t flag_ccb, ccb_cdb_phy;
3168 struct ARCMSR_CDB *arcmsr_cdb;
3170 struct CommandControlBlock *pCCB;
3171 uint32_t poll_ccb_done = 0, poll_count = 0;
3173 polling_hbc_ccb_retry:
3176 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3177 if (poll_ccb_done) {
3182 if (poll_count > 100) {
3186 goto polling_hbc_ccb_retry;
3189 flag_ccb = readl(®->outbound_queueport_low);
3190 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3191 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
3192 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3193 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3194 /* check ifcommand done with no error*/
3195 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3196 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3197 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3198 " poll command abort successfully \n"
3199 , acb->host->host_no
3200 , pCCB->pcmd->device->id
3201 , (u32)pCCB->pcmd->device->lun
3203 pCCB->pcmd->result = DID_ABORT << 16;
3204 arcmsr_ccb_complete(pCCB);
3207 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3208 " command done ccb = '0x%p'"
3209 "ccboutstandingcount = %d \n"
3210 , acb->host->host_no
3212 , atomic_read(&acb->ccboutstandingcount));
3215 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3216 arcmsr_report_ccb_state(acb, pCCB, error);
3221 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3222 struct CommandControlBlock *poll_ccb)
3225 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
3226 int rtn, doneq_index, index_stripped, outbound_write_pointer;
3227 unsigned long flags;
3228 struct ARCMSR_CDB *arcmsr_cdb;
3229 struct CommandControlBlock *pCCB;
3230 struct MessageUnit_D *pmu = acb->pmuD;
3232 polling_hbaD_ccb_retry:
3235 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3236 doneq_index = pmu->doneq_index;
3237 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3238 if (poll_ccb_done) {
3243 if (poll_count > 40) {
3247 goto polling_hbaD_ccb_retry;
3250 spin_lock_irqsave(&acb->doneq_lock, flags);
3251 if (doneq_index & 0x4000) {
3252 index_stripped = doneq_index & 0xFFF;
3253 index_stripped += 1;
3254 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3255 pmu->doneq_index = index_stripped ?
3256 (index_stripped | 0x4000) :
3257 (index_stripped + 1);
3259 index_stripped = doneq_index;
3260 index_stripped += 1;
3261 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3262 pmu->doneq_index = index_stripped ? index_stripped :
3263 ((index_stripped | 0x4000) + 1);
3265 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3266 doneq_index = pmu->doneq_index;
3267 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3268 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3269 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3271 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3273 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3274 if ((pCCB->acb != acb) ||
3275 (pCCB->startdone != ARCMSR_CCB_START)) {
3276 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3277 pr_notice("arcmsr%d: scsi id = %d "
3278 "lun = %d ccb = '0x%p' poll command "
3279 "abort successfully\n"
3280 , acb->host->host_no
3281 , pCCB->pcmd->device->id
3282 , (u32)pCCB->pcmd->device->lun
3284 pCCB->pcmd->result = DID_ABORT << 16;
3285 arcmsr_ccb_complete(pCCB);
3288 pr_notice("arcmsr%d: polling an illegal "
3289 "ccb command done ccb = '0x%p' "
3290 "ccboutstandingcount = %d\n"
3291 , acb->host->host_no
3293 , atomic_read(&acb->ccboutstandingcount));
3296 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3298 arcmsr_report_ccb_state(acb, pCCB, error);
3303 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3304 struct CommandControlBlock *poll_ccb)
3307 switch (acb->adapter_type) {
3309 case ACB_ADAPTER_TYPE_A: {
3310 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3314 case ACB_ADAPTER_TYPE_B: {
3315 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3318 case ACB_ADAPTER_TYPE_C: {
3319 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3322 case ACB_ADAPTER_TYPE_D:
3323 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3329 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3331 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
3332 dma_addr_t dma_coherent_handle;
3335 ********************************************************************
3336 ** here we need to tell iop 331 our freeccb.HighPart
3337 ** if freeccb.HighPart is not zero
3338 ********************************************************************
3340 switch (acb->adapter_type) {
3341 case ACB_ADAPTER_TYPE_B:
3342 case ACB_ADAPTER_TYPE_D:
3343 dma_coherent_handle = acb->dma_coherent_handle2;
3346 dma_coherent_handle = acb->dma_coherent_handle;
3349 cdb_phyaddr = lower_32_bits(dma_coherent_handle);
3350 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
3351 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
3353 ***********************************************************************
3354 ** if adapter type B, set window of "post command Q"
3355 ***********************************************************************
3357 switch (acb->adapter_type) {
3359 case ACB_ADAPTER_TYPE_A: {
3360 if (cdb_phyaddr_hi32 != 0) {
3361 struct MessageUnit_A __iomem *reg = acb->pmuA;
3362 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
3363 ®->message_rwbuffer[0]);
3364 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]);
3365 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
3366 ®->inbound_msgaddr0);
3367 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3368 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
3369 part physical address timeout\n",
3370 acb->host->host_no);
3377 case ACB_ADAPTER_TYPE_B: {
3378 uint32_t __iomem *rwbuffer;
3380 struct MessageUnit_B *reg = acb->pmuB;
3381 reg->postq_index = 0;
3382 reg->doneq_index = 0;
3383 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
3384 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3385 printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
3386 acb->host->host_no);
3389 rwbuffer = reg->message_rwbuffer;
3390 /* driver "set config" signature */
3391 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
3392 /* normal should be zero */
3393 writel(cdb_phyaddr_hi32, rwbuffer++);
3394 /* postQ size (256 + 8)*4 */
3395 writel(cdb_phyaddr, rwbuffer++);
3396 /* doneQ size (256 + 8)*4 */
3397 writel(cdb_phyaddr + 1056, rwbuffer++);
3398 /* ccb maxQ size must be --> [(256 + 8)*4]*/
3399 writel(1056, rwbuffer);
3401 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
3402 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3403 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
3404 timeout \n",acb->host->host_no);
3407 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3408 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3409 pr_err("arcmsr%d: can't set driver mode.\n",
3410 acb->host->host_no);
3415 case ACB_ADAPTER_TYPE_C: {
3416 if (cdb_phyaddr_hi32 != 0) {
3417 struct MessageUnit_C __iomem *reg = acb->pmuC;
3419 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
3420 acb->adapter_index, cdb_phyaddr_hi32);
3421 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
3422 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]);
3423 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
3424 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3425 if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
3426 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
3427 timeout \n", acb->host->host_no);
3433 case ACB_ADAPTER_TYPE_D: {
3434 uint32_t __iomem *rwbuffer;
3435 struct MessageUnit_D *reg = acb->pmuD;
3436 reg->postq_index = 0;
3437 reg->doneq_index = 0;
3438 rwbuffer = reg->msgcode_rwbuffer;
3439 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
3440 writel(cdb_phyaddr_hi32, rwbuffer++);
3441 writel(cdb_phyaddr, rwbuffer++);
3442 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
3443 sizeof(struct InBound_SRB)), rwbuffer++);
3444 writel(0x100, rwbuffer);
3445 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
3446 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3447 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
3448 acb->host->host_no);
3457 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
3459 uint32_t firmware_state = 0;
3460 switch (acb->adapter_type) {
3462 case ACB_ADAPTER_TYPE_A: {
3463 struct MessageUnit_A __iomem *reg = acb->pmuA;
3465 firmware_state = readl(®->outbound_msgaddr1);
3466 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
3470 case ACB_ADAPTER_TYPE_B: {
3471 struct MessageUnit_B *reg = acb->pmuB;
3473 firmware_state = readl(reg->iop2drv_doorbell);
3474 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
3475 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
3478 case ACB_ADAPTER_TYPE_C: {
3479 struct MessageUnit_C __iomem *reg = acb->pmuC;
3481 firmware_state = readl(®->outbound_msgaddr1);
3482 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
3485 case ACB_ADAPTER_TYPE_D: {
3486 struct MessageUnit_D *reg = acb->pmuD;
3488 firmware_state = readl(reg->outbound_msgaddr1);
3489 } while ((firmware_state &
3490 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
3496 static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
3498 struct MessageUnit_A __iomem *reg = acb->pmuA;
3499 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
3500 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3503 acb->fw_flag = FW_NORMAL;
3504 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
3505 atomic_set(&acb->rq_map_token, 16);
3507 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3508 if (atomic_dec_and_test(&acb->rq_map_token)) {
3509 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3512 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3513 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3518 static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
3520 struct MessageUnit_B *reg = acb->pmuB;
3521 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
3522 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3525 acb->fw_flag = FW_NORMAL;
3526 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
3527 atomic_set(&acb->rq_map_token, 16);
3529 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3530 if (atomic_dec_and_test(&acb->rq_map_token)) {
3531 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3534 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3535 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3540 static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
3542 struct MessageUnit_C __iomem *reg = acb->pmuC;
3543 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
3544 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3547 acb->fw_flag = FW_NORMAL;
3548 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
3549 atomic_set(&acb->rq_map_token, 16);
3551 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3552 if (atomic_dec_and_test(&acb->rq_map_token)) {
3553 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3556 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3557 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3558 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3563 static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
3565 struct MessageUnit_D *reg = acb->pmuD;
3567 if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
3568 ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
3569 ((acb->acb_flags & ACB_F_ABORT) != 0)) {
3570 mod_timer(&acb->eternal_timer,
3571 jiffies + msecs_to_jiffies(6 * HZ));
3573 acb->fw_flag = FW_NORMAL;
3574 if (atomic_read(&acb->ante_token_value) ==
3575 atomic_read(&acb->rq_map_token)) {
3576 atomic_set(&acb->rq_map_token, 16);
3578 atomic_set(&acb->ante_token_value,
3579 atomic_read(&acb->rq_map_token));
3580 if (atomic_dec_and_test(&acb->rq_map_token)) {
3581 mod_timer(&acb->eternal_timer, jiffies +
3582 msecs_to_jiffies(6 * HZ));
3585 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
3586 reg->inbound_msgaddr0);
3587 mod_timer(&acb->eternal_timer, jiffies +
3588 msecs_to_jiffies(6 * HZ));
3592 static void arcmsr_request_device_map(unsigned long pacb)
3594 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
3595 switch (acb->adapter_type) {
3596 case ACB_ADAPTER_TYPE_A: {
3597 arcmsr_hbaA_request_device_map(acb);
3600 case ACB_ADAPTER_TYPE_B: {
3601 arcmsr_hbaB_request_device_map(acb);
3604 case ACB_ADAPTER_TYPE_C: {
3605 arcmsr_hbaC_request_device_map(acb);
3608 case ACB_ADAPTER_TYPE_D:
3609 arcmsr_hbaD_request_device_map(acb);
3614 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
3616 struct MessageUnit_A __iomem *reg = acb->pmuA;
3617 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3618 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0);
3619 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3620 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3621 rebulid' timeout \n", acb->host->host_no);
3625 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
3627 struct MessageUnit_B *reg = acb->pmuB;
3628 acb->acb_flags |= ACB_F_MSG_START_BGRB;
3629 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
3630 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3631 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3632 rebulid' timeout \n",acb->host->host_no);
3636 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
3638 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
3639 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
3640 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
3641 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
3642 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3643 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3644 rebulid' timeout \n", pACB->host->host_no);
3649 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
3651 struct MessageUnit_D *pmu = pACB->pmuD;
3653 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
3654 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
3655 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
3656 pr_notice("arcmsr%d: wait 'start adapter "
3657 "background rebulid' timeout\n", pACB->host->host_no);
3661 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
3663 switch (acb->adapter_type) {
3664 case ACB_ADAPTER_TYPE_A:
3665 arcmsr_hbaA_start_bgrb(acb);
3667 case ACB_ADAPTER_TYPE_B:
3668 arcmsr_hbaB_start_bgrb(acb);
3670 case ACB_ADAPTER_TYPE_C:
3671 arcmsr_hbaC_start_bgrb(acb);
3673 case ACB_ADAPTER_TYPE_D:
3674 arcmsr_hbaD_start_bgrb(acb);
3679 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
3681 switch (acb->adapter_type) {
3682 case ACB_ADAPTER_TYPE_A: {
3683 struct MessageUnit_A __iomem *reg = acb->pmuA;
3684 uint32_t outbound_doorbell;
3685 /* empty doorbell Qbuffer if door bell ringed */
3686 outbound_doorbell = readl(®->outbound_doorbell);
3687 /*clear doorbell interrupt */
3688 writel(outbound_doorbell, ®->outbound_doorbell);
3689 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
3693 case ACB_ADAPTER_TYPE_B: {
3694 struct MessageUnit_B *reg = acb->pmuB;
3695 /*clear interrupt and message state*/
3696 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3697 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
3698 /* let IOP know data has been read */
3701 case ACB_ADAPTER_TYPE_C: {
3702 struct MessageUnit_C __iomem *reg = acb->pmuC;
3703 uint32_t outbound_doorbell, i;
3704 /* empty doorbell Qbuffer if door bell ringed */
3705 outbound_doorbell = readl(®->outbound_doorbell);
3706 writel(outbound_doorbell, ®->outbound_doorbell_clear);
3707 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
3708 for (i = 0; i < 200; i++) {
3710 outbound_doorbell = readl(®->outbound_doorbell);
3711 if (outbound_doorbell &
3712 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
3713 writel(outbound_doorbell,
3714 ®->outbound_doorbell_clear);
3715 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
3716 ®->inbound_doorbell);
3722 case ACB_ADAPTER_TYPE_D: {
3723 struct MessageUnit_D *reg = acb->pmuD;
3724 uint32_t outbound_doorbell, i;
3725 /* empty doorbell Qbuffer if door bell ringed */
3726 outbound_doorbell = readl(reg->outbound_doorbell);
3727 writel(outbound_doorbell, reg->outbound_doorbell);
3728 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
3729 reg->inbound_doorbell);
3730 for (i = 0; i < 200; i++) {
3732 outbound_doorbell = readl(reg->outbound_doorbell);
3733 if (outbound_doorbell &
3734 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
3735 writel(outbound_doorbell,
3736 reg->outbound_doorbell);
3737 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
3738 reg->inbound_doorbell);
3747 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3749 switch (acb->adapter_type) {
3750 case ACB_ADAPTER_TYPE_A:
3752 case ACB_ADAPTER_TYPE_B:
3754 struct MessageUnit_B *reg = acb->pmuB;
3755 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
3756 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3757 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
3762 case ACB_ADAPTER_TYPE_C:
3768 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
3772 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
3773 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
3774 struct MessageUnit_D *pmuD = acb->pmuD;
3776 /* backup pci config data */
3777 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
3778 for (i = 0; i < 64; i++) {
3779 pci_read_config_byte(acb->pdev, i, &value[i]);
3781 /* hardware reset signal */
3782 if ((acb->dev_id == 0x1680)) {
3783 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
3784 } else if ((acb->dev_id == 0x1880)) {
3787 writel(0xF, &pmuC->write_sequence);
3788 writel(0x4, &pmuC->write_sequence);
3789 writel(0xB, &pmuC->write_sequence);
3790 writel(0x2, &pmuC->write_sequence);
3791 writel(0x7, &pmuC->write_sequence);
3792 writel(0xD, &pmuC->write_sequence);
3793 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
3794 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
3795 } else if ((acb->dev_id == 0x1214)) {
3796 writel(0x20, pmuD->reset_request);
3798 pci_write_config_byte(acb->pdev, 0x84, 0x20);
3801 /* write back pci config data */
3802 for (i = 0; i < 64; i++) {
3803 pci_write_config_byte(acb->pdev, i, value[i]);
3808 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3810 uint32_t intmask_org;
3811 /* disable all outbound interrupt */
3812 intmask_org = arcmsr_disable_outbound_ints(acb);
3813 arcmsr_wait_firmware_ready(acb);
3814 arcmsr_iop_confirm(acb);
3815 /*start background rebuild*/
3816 arcmsr_start_adapter_bgrb(acb);
3817 /* empty doorbell Qbuffer if door bell ringed */
3818 arcmsr_clear_doorbell_queue_buffer(acb);
3819 arcmsr_enable_eoi_mode(acb);
3820 /* enable outbound Post Queue,outbound doorbell Interrupt */
3821 arcmsr_enable_outbound_ints(acb, intmask_org);
3822 acb->acb_flags |= ACB_F_IOP_INITED;
3825 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
3827 struct CommandControlBlock *ccb;
3828 uint32_t intmask_org;
3829 uint8_t rtnval = 0x00;
3831 unsigned long flags;
3833 if (atomic_read(&acb->ccboutstandingcount) != 0) {
3834 /* disable all outbound interrupt */
3835 intmask_org = arcmsr_disable_outbound_ints(acb);
3836 /* talk to iop 331 outstanding command aborted */
3837 rtnval = arcmsr_abort_allcmd(acb);
3838 /* clear all outbound posted Q */
3839 arcmsr_done4abort_postqueue(acb);
3840 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3841 ccb = acb->pccb_pool[i];
3842 if (ccb->startdone == ARCMSR_CCB_START) {
3843 scsi_dma_unmap(ccb->pcmd);
3844 ccb->startdone = ARCMSR_CCB_DONE;
3846 spin_lock_irqsave(&acb->ccblist_lock, flags);
3847 list_add_tail(&ccb->list, &acb->ccb_free_list);
3848 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3851 atomic_set(&acb->ccboutstandingcount, 0);
3852 /* enable all outbound interrupt */
3853 arcmsr_enable_outbound_ints(acb, intmask_org);
3859 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
3861 struct AdapterControlBlock *acb;
3862 uint32_t intmask_org, outbound_doorbell;
3863 int retry_count = 0;
3865 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
3866 printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
3869 switch(acb->adapter_type){
3870 case ACB_ADAPTER_TYPE_A:{
3871 if (acb->acb_flags & ACB_F_BUS_RESET){
3873 printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
3874 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
3879 acb->acb_flags |= ACB_F_BUS_RESET;
3880 if (!arcmsr_iop_reset(acb)) {
3881 struct MessageUnit_A __iomem *reg;
3883 arcmsr_hardware_reset(acb);
3884 acb->acb_flags &= ~ACB_F_IOP_INITED;
3886 ssleep(ARCMSR_SLEEPTIME);
3887 if ((readl(®->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
3888 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
3889 if (retry_count > ARCMSR_RETRYCOUNT) {
3890 acb->fw_flag = FW_DEADLOCK;
3891 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3897 acb->acb_flags |= ACB_F_IOP_INITED;
3898 /* disable all outbound interrupt */
3899 intmask_org = arcmsr_disable_outbound_ints(acb);
3900 arcmsr_get_firmware_spec(acb);
3901 arcmsr_start_adapter_bgrb(acb);
3902 /* clear Qbuffer if door bell ringed */
3903 outbound_doorbell = readl(®->outbound_doorbell);
3904 writel(outbound_doorbell, ®->outbound_doorbell); /*clear interrupt */
3905 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
3906 /* enable outbound Post Queue,outbound doorbell Interrupt */
3907 arcmsr_enable_outbound_ints(acb, intmask_org);
3908 atomic_set(&acb->rq_map_token, 16);
3909 atomic_set(&acb->ante_token_value, 16);
3910 acb->fw_flag = FW_NORMAL;
3911 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3912 acb->acb_flags &= ~ACB_F_BUS_RESET;
3914 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3916 acb->acb_flags &= ~ACB_F_BUS_RESET;
3917 atomic_set(&acb->rq_map_token, 16);
3918 atomic_set(&acb->ante_token_value, 16);
3919 acb->fw_flag = FW_NORMAL;
3920 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3925 case ACB_ADAPTER_TYPE_B:{
3926 acb->acb_flags |= ACB_F_BUS_RESET;
3927 if (!arcmsr_iop_reset(acb)) {
3928 acb->acb_flags &= ~ACB_F_BUS_RESET;
3931 acb->acb_flags &= ~ACB_F_BUS_RESET;
3932 atomic_set(&acb->rq_map_token, 16);
3933 atomic_set(&acb->ante_token_value, 16);
3934 acb->fw_flag = FW_NORMAL;
3935 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3940 case ACB_ADAPTER_TYPE_C:{
3941 if (acb->acb_flags & ACB_F_BUS_RESET) {
3943 printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
3944 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
3949 acb->acb_flags |= ACB_F_BUS_RESET;
3950 if (!arcmsr_iop_reset(acb)) {
3951 struct MessageUnit_C __iomem *reg;
3953 arcmsr_hardware_reset(acb);
3954 acb->acb_flags &= ~ACB_F_IOP_INITED;
3956 ssleep(ARCMSR_SLEEPTIME);
3957 if ((readl(®->host_diagnostic) & 0x04) != 0) {
3958 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
3959 if (retry_count > ARCMSR_RETRYCOUNT) {
3960 acb->fw_flag = FW_DEADLOCK;
3961 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3967 acb->acb_flags |= ACB_F_IOP_INITED;
3968 /* disable all outbound interrupt */
3969 intmask_org = arcmsr_disable_outbound_ints(acb);
3970 arcmsr_get_firmware_spec(acb);
3971 arcmsr_start_adapter_bgrb(acb);
3972 /* clear Qbuffer if door bell ringed */
3973 arcmsr_clear_doorbell_queue_buffer(acb);
3974 /* enable outbound Post Queue,outbound doorbell Interrupt */
3975 arcmsr_enable_outbound_ints(acb, intmask_org);
3976 atomic_set(&acb->rq_map_token, 16);
3977 atomic_set(&acb->ante_token_value, 16);
3978 acb->fw_flag = FW_NORMAL;
3979 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3980 acb->acb_flags &= ~ACB_F_BUS_RESET;
3982 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3984 acb->acb_flags &= ~ACB_F_BUS_RESET;
3985 atomic_set(&acb->rq_map_token, 16);
3986 atomic_set(&acb->ante_token_value, 16);
3987 acb->fw_flag = FW_NORMAL;
3988 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3993 case ACB_ADAPTER_TYPE_D: {
3994 if (acb->acb_flags & ACB_F_BUS_RESET) {
3996 pr_notice("arcmsr: there is an bus reset"
3997 " eh proceeding.......\n");
3998 timeout = wait_event_timeout(wait_q, (acb->acb_flags
3999 & ACB_F_BUS_RESET) == 0, 220 * HZ);
4003 acb->acb_flags |= ACB_F_BUS_RESET;
4004 if (!arcmsr_iop_reset(acb)) {
4005 struct MessageUnit_D *reg;
4007 arcmsr_hardware_reset(acb);
4008 acb->acb_flags &= ~ACB_F_IOP_INITED;
4010 ssleep(ARCMSR_SLEEPTIME);
4011 if ((readl(reg->sample_at_reset) & 0x80) != 0) {
4012 pr_err("arcmsr%d: waiting for "
4013 "hw bus reset return, retry=%d\n",
4014 acb->host->host_no, retry_count);
4015 if (retry_count > ARCMSR_RETRYCOUNT) {
4016 acb->fw_flag = FW_DEADLOCK;
4017 pr_err("arcmsr%d: waiting for hw bus"
4019 "RETRY TERMINATED!!\n",
4020 acb->host->host_no);
4026 acb->acb_flags |= ACB_F_IOP_INITED;
4027 /* disable all outbound interrupt */
4028 intmask_org = arcmsr_disable_outbound_ints(acb);
4029 arcmsr_get_firmware_spec(acb);
4030 arcmsr_start_adapter_bgrb(acb);
4031 arcmsr_clear_doorbell_queue_buffer(acb);
4032 arcmsr_enable_outbound_ints(acb, intmask_org);
4033 atomic_set(&acb->rq_map_token, 16);
4034 atomic_set(&acb->ante_token_value, 16);
4035 acb->fw_flag = FW_NORMAL;
4036 mod_timer(&acb->eternal_timer,
4037 jiffies + msecs_to_jiffies(6 * HZ));
4038 acb->acb_flags &= ~ACB_F_BUS_RESET;
4040 pr_err("arcmsr: scsi bus reset "
4041 "eh returns with success\n");
4043 acb->acb_flags &= ~ACB_F_BUS_RESET;
4044 atomic_set(&acb->rq_map_token, 16);
4045 atomic_set(&acb->ante_token_value, 16);
4046 acb->fw_flag = FW_NORMAL;
4047 mod_timer(&acb->eternal_timer,
4048 jiffies + msecs_to_jiffies(6 * HZ));
4057 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
4058 struct CommandControlBlock *ccb)
4061 rtn = arcmsr_polling_ccbdone(acb, ccb);
4065 static int arcmsr_abort(struct scsi_cmnd *cmd)
4067 struct AdapterControlBlock *acb =
4068 (struct AdapterControlBlock *)cmd->device->host->hostdata;
4071 uint32_t intmask_org;
4074 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
4075 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
4076 acb->acb_flags |= ACB_F_ABORT;
4079 ************************************************
4080 ** the all interrupt service routine is locked
4081 ** we need to handle it as soon as possible and exit
4082 ************************************************
4084 if (!atomic_read(&acb->ccboutstandingcount)) {
4085 acb->acb_flags &= ~ACB_F_ABORT;
4089 intmask_org = arcmsr_disable_outbound_ints(acb);
4090 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4091 struct CommandControlBlock *ccb = acb->pccb_pool[i];
4092 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
4093 ccb->startdone = ARCMSR_CCB_ABORTED;
4094 rtn = arcmsr_abort_one_cmd(acb, ccb);
4098 acb->acb_flags &= ~ACB_F_ABORT;
4099 arcmsr_enable_outbound_ints(acb, intmask_org);
4103 static const char *arcmsr_info(struct Scsi_Host *host)
4105 struct AdapterControlBlock *acb =
4106 (struct AdapterControlBlock *) host->hostdata;
4107 static char buf[256];
4110 switch (acb->pdev->device) {
4111 case PCI_DEVICE_ID_ARECA_1110:
4112 case PCI_DEVICE_ID_ARECA_1200:
4113 case PCI_DEVICE_ID_ARECA_1202:
4114 case PCI_DEVICE_ID_ARECA_1210:
4117 case PCI_DEVICE_ID_ARECA_1120:
4118 case PCI_DEVICE_ID_ARECA_1130:
4119 case PCI_DEVICE_ID_ARECA_1160:
4120 case PCI_DEVICE_ID_ARECA_1170:
4121 case PCI_DEVICE_ID_ARECA_1201:
4122 case PCI_DEVICE_ID_ARECA_1220:
4123 case PCI_DEVICE_ID_ARECA_1230:
4124 case PCI_DEVICE_ID_ARECA_1260:
4125 case PCI_DEVICE_ID_ARECA_1270:
4126 case PCI_DEVICE_ID_ARECA_1280:
4129 case PCI_DEVICE_ID_ARECA_1214:
4130 case PCI_DEVICE_ID_ARECA_1380:
4131 case PCI_DEVICE_ID_ARECA_1381:
4132 case PCI_DEVICE_ID_ARECA_1680:
4133 case PCI_DEVICE_ID_ARECA_1681:
4134 case PCI_DEVICE_ID_ARECA_1880:
4142 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
4143 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);