OSDN Git Service

hpsa: hpsa decode sense data for io and tmf
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/fs.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_eh.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
56 #include "hpsa_cmd.h"
57 #include "hpsa.h"
58
59 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
60 #define HPSA_DRIVER_VERSION "3.4.4-1"
61 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
62 #define HPSA "hpsa"
63
64 /* How long to wait for CISS doorbell communication */
65 #define CLEAR_EVENT_WAIT_INTERVAL 20    /* ms for each msleep() call */
66 #define MODE_CHANGE_WAIT_INTERVAL 10    /* ms for each msleep() call */
67 #define MAX_CLEAR_EVENT_WAIT 30000      /* times 20 ms = 600 s */
68 #define MAX_MODE_CHANGE_WAIT 2000       /* times 10 ms = 20 s */
69 #define MAX_IOCTL_CONFIG_WAIT 1000
70
71 /*define how many times we will try a command because of bus resets */
72 #define MAX_CMD_RETRIES 3
73
74 /* Embedded module documentation macros - see modules.h */
75 MODULE_AUTHOR("Hewlett-Packard Company");
76 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
77         HPSA_DRIVER_VERSION);
78 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
79 MODULE_VERSION(HPSA_DRIVER_VERSION);
80 MODULE_LICENSE("GPL");
81
82 static int hpsa_allow_any;
83 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
84 MODULE_PARM_DESC(hpsa_allow_any,
85                 "Allow hpsa driver to access unknown HP Smart Array hardware");
86 static int hpsa_simple_mode;
87 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
88 MODULE_PARM_DESC(hpsa_simple_mode,
89         "Use 'simple mode' rather than 'performant mode'");
90
91 /* define the PCI info for the cards we can control */
92 static const struct pci_device_id hpsa_pci_device_id[] = {
93         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
94         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
128         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
129         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
130         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
131         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
132         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
133         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
134         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
135         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
136         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
137         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
138         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
139                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
140         {0,}
141 };
142
143 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
144
145 /*  board_id = Subsystem Device ID & Vendor ID
146  *  product = Marketing Name for the board
147  *  access = Address of the struct of function pointers
148  */
149 static struct board_type products[] = {
150         {0x3241103C, "Smart Array P212", &SA5_access},
151         {0x3243103C, "Smart Array P410", &SA5_access},
152         {0x3245103C, "Smart Array P410i", &SA5_access},
153         {0x3247103C, "Smart Array P411", &SA5_access},
154         {0x3249103C, "Smart Array P812", &SA5_access},
155         {0x324A103C, "Smart Array P712m", &SA5_access},
156         {0x324B103C, "Smart Array P711m", &SA5_access},
157         {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
158         {0x3350103C, "Smart Array P222", &SA5_access},
159         {0x3351103C, "Smart Array P420", &SA5_access},
160         {0x3352103C, "Smart Array P421", &SA5_access},
161         {0x3353103C, "Smart Array P822", &SA5_access},
162         {0x3354103C, "Smart Array P420i", &SA5_access},
163         {0x3355103C, "Smart Array P220i", &SA5_access},
164         {0x3356103C, "Smart Array P721m", &SA5_access},
165         {0x1921103C, "Smart Array P830i", &SA5_access},
166         {0x1922103C, "Smart Array P430", &SA5_access},
167         {0x1923103C, "Smart Array P431", &SA5_access},
168         {0x1924103C, "Smart Array P830", &SA5_access},
169         {0x1926103C, "Smart Array P731m", &SA5_access},
170         {0x1928103C, "Smart Array P230i", &SA5_access},
171         {0x1929103C, "Smart Array P530", &SA5_access},
172         {0x21BD103C, "Smart Array P244br", &SA5_access},
173         {0x21BE103C, "Smart Array P741m", &SA5_access},
174         {0x21BF103C, "Smart HBA H240ar", &SA5_access},
175         {0x21C0103C, "Smart Array P440ar", &SA5_access},
176         {0x21C1103C, "Smart Array P840ar", &SA5_access},
177         {0x21C2103C, "Smart Array P440", &SA5_access},
178         {0x21C3103C, "Smart Array P441", &SA5_access},
179         {0x21C4103C, "Smart Array", &SA5_access},
180         {0x21C5103C, "Smart Array P841", &SA5_access},
181         {0x21C6103C, "Smart HBA H244br", &SA5_access},
182         {0x21C7103C, "Smart HBA H240", &SA5_access},
183         {0x21C8103C, "Smart HBA H241", &SA5_access},
184         {0x21C9103C, "Smart Array", &SA5_access},
185         {0x21CA103C, "Smart Array P246br", &SA5_access},
186         {0x21CB103C, "Smart Array P840", &SA5_access},
187         {0x21CC103C, "Smart Array", &SA5_access},
188         {0x21CD103C, "Smart Array", &SA5_access},
189         {0x21CE103C, "Smart HBA", &SA5_access},
190         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
191         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
192         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
193         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
194         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
195         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
196 };
197
198 static int number_of_controllers;
199
200 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
201 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
202 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
203
204 #ifdef CONFIG_COMPAT
205 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
206         void __user *arg);
207 #endif
208
209 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
210 static struct CommandList *cmd_alloc(struct ctlr_info *h);
211 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
212         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
213         int cmd_type);
214 static void hpsa_free_cmd_pool(struct ctlr_info *h);
215 #define VPD_PAGE (1 << 8)
216
217 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
218 static void hpsa_scan_start(struct Scsi_Host *);
219 static int hpsa_scan_finished(struct Scsi_Host *sh,
220         unsigned long elapsed_time);
221 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
222
223 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
224 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
225 static int hpsa_slave_alloc(struct scsi_device *sdev);
226 static int hpsa_slave_configure(struct scsi_device *sdev);
227 static void hpsa_slave_destroy(struct scsi_device *sdev);
228
229 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
230 static int check_for_unit_attention(struct ctlr_info *h,
231         struct CommandList *c);
232 static void check_ioctl_unit_attention(struct ctlr_info *h,
233         struct CommandList *c);
234 /* performant mode helper functions */
235 static void calc_bucket_map(int *bucket, int num_buckets,
236         int nsgs, int min_blocks, u32 *bucket_map);
237 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
238 static inline u32 next_command(struct ctlr_info *h, u8 q);
239 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
240                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
241                                u64 *cfg_offset);
242 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
243                                     unsigned long *memory_bar);
244 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
245 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
246                                      int wait_for_ready);
247 static inline void finish_cmd(struct CommandList *c);
248 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
249 #define BOARD_NOT_READY 0
250 #define BOARD_READY 1
251 static void hpsa_drain_accel_commands(struct ctlr_info *h);
252 static void hpsa_flush_cache(struct ctlr_info *h);
253 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
254         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
255         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
256 static void hpsa_command_resubmit_worker(struct work_struct *work);
257 static u32 lockup_detected(struct ctlr_info *h);
258 static int detect_controller_lockup(struct ctlr_info *h);
259
260 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
261 {
262         unsigned long *priv = shost_priv(sdev->host);
263         return (struct ctlr_info *) *priv;
264 }
265
266 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
267 {
268         unsigned long *priv = shost_priv(sh);
269         return (struct ctlr_info *) *priv;
270 }
271
272 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
273 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
274                         u8 *sense_key, u8 *asc, u8 *ascq)
275 {
276         struct scsi_sense_hdr sshdr;
277         bool rc;
278
279         *sense_key = -1;
280         *asc = -1;
281         *ascq = -1;
282
283         if (sense_data_len < 1)
284                 return;
285
286         rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
287         if (rc) {
288                 *sense_key = sshdr.sense_key;
289                 *asc = sshdr.asc;
290                 *ascq = sshdr.ascq;
291         }
292 }
293
294 static int check_for_unit_attention(struct ctlr_info *h,
295         struct CommandList *c)
296 {
297         u8 sense_key, asc, ascq;
298         int sense_len;
299
300         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
301                 sense_len = sizeof(c->err_info->SenseInfo);
302         else
303                 sense_len = c->err_info->SenseLen;
304
305         decode_sense_data(c->err_info->SenseInfo, sense_len,
306                                 &sense_key, &asc, &ascq);
307         if (sense_key != UNIT_ATTENTION || asc == -1)
308                 return 0;
309
310         switch (asc) {
311         case STATE_CHANGED:
312                 dev_warn(&h->pdev->dev,
313                         HPSA "%d: a state change detected, command retried\n",
314                         h->ctlr);
315                 break;
316         case LUN_FAILED:
317                 dev_warn(&h->pdev->dev,
318                         HPSA "%d: LUN failure detected\n", h->ctlr);
319                 break;
320         case REPORT_LUNS_CHANGED:
321                 dev_warn(&h->pdev->dev,
322                         HPSA "%d: report LUN data changed\n", h->ctlr);
323         /*
324          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
325          * target (array) devices.
326          */
327                 break;
328         case POWER_OR_RESET:
329                 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
330                         "or device reset detected\n", h->ctlr);
331                 break;
332         case UNIT_ATTENTION_CLEARED:
333                 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
334                     "cleared by another initiator\n", h->ctlr);
335                 break;
336         default:
337                 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
338                         "unit attention detected\n", h->ctlr);
339                 break;
340         }
341         return 1;
342 }
343
344 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
345 {
346         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
347                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
348                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
349                 return 0;
350         dev_warn(&h->pdev->dev, HPSA "device busy");
351         return 1;
352 }
353
354 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
355                                          struct device_attribute *attr,
356                                          const char *buf, size_t count)
357 {
358         int status, len;
359         struct ctlr_info *h;
360         struct Scsi_Host *shost = class_to_shost(dev);
361         char tmpbuf[10];
362
363         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
364                 return -EACCES;
365         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
366         strncpy(tmpbuf, buf, len);
367         tmpbuf[len] = '\0';
368         if (sscanf(tmpbuf, "%d", &status) != 1)
369                 return -EINVAL;
370         h = shost_to_hba(shost);
371         h->acciopath_status = !!status;
372         dev_warn(&h->pdev->dev,
373                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
374                 h->acciopath_status ? "enabled" : "disabled");
375         return count;
376 }
377
378 static ssize_t host_store_raid_offload_debug(struct device *dev,
379                                          struct device_attribute *attr,
380                                          const char *buf, size_t count)
381 {
382         int debug_level, len;
383         struct ctlr_info *h;
384         struct Scsi_Host *shost = class_to_shost(dev);
385         char tmpbuf[10];
386
387         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
388                 return -EACCES;
389         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
390         strncpy(tmpbuf, buf, len);
391         tmpbuf[len] = '\0';
392         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
393                 return -EINVAL;
394         if (debug_level < 0)
395                 debug_level = 0;
396         h = shost_to_hba(shost);
397         h->raid_offload_debug = debug_level;
398         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
399                 h->raid_offload_debug);
400         return count;
401 }
402
403 static ssize_t host_store_rescan(struct device *dev,
404                                  struct device_attribute *attr,
405                                  const char *buf, size_t count)
406 {
407         struct ctlr_info *h;
408         struct Scsi_Host *shost = class_to_shost(dev);
409         h = shost_to_hba(shost);
410         hpsa_scan_start(h->scsi_host);
411         return count;
412 }
413
414 static ssize_t host_show_firmware_revision(struct device *dev,
415              struct device_attribute *attr, char *buf)
416 {
417         struct ctlr_info *h;
418         struct Scsi_Host *shost = class_to_shost(dev);
419         unsigned char *fwrev;
420
421         h = shost_to_hba(shost);
422         if (!h->hba_inquiry_data)
423                 return 0;
424         fwrev = &h->hba_inquiry_data[32];
425         return snprintf(buf, 20, "%c%c%c%c\n",
426                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
427 }
428
429 static ssize_t host_show_commands_outstanding(struct device *dev,
430              struct device_attribute *attr, char *buf)
431 {
432         struct Scsi_Host *shost = class_to_shost(dev);
433         struct ctlr_info *h = shost_to_hba(shost);
434
435         return snprintf(buf, 20, "%d\n",
436                         atomic_read(&h->commands_outstanding));
437 }
438
439 static ssize_t host_show_transport_mode(struct device *dev,
440         struct device_attribute *attr, char *buf)
441 {
442         struct ctlr_info *h;
443         struct Scsi_Host *shost = class_to_shost(dev);
444
445         h = shost_to_hba(shost);
446         return snprintf(buf, 20, "%s\n",
447                 h->transMethod & CFGTBL_Trans_Performant ?
448                         "performant" : "simple");
449 }
450
451 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
452         struct device_attribute *attr, char *buf)
453 {
454         struct ctlr_info *h;
455         struct Scsi_Host *shost = class_to_shost(dev);
456
457         h = shost_to_hba(shost);
458         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
459                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
460 }
461
462 /* List of controllers which cannot be hard reset on kexec with reset_devices */
463 static u32 unresettable_controller[] = {
464         0x324a103C, /* Smart Array P712m */
465         0x324b103C, /* Smart Array P711m */
466         0x3223103C, /* Smart Array P800 */
467         0x3234103C, /* Smart Array P400 */
468         0x3235103C, /* Smart Array P400i */
469         0x3211103C, /* Smart Array E200i */
470         0x3212103C, /* Smart Array E200 */
471         0x3213103C, /* Smart Array E200i */
472         0x3214103C, /* Smart Array E200i */
473         0x3215103C, /* Smart Array E200i */
474         0x3237103C, /* Smart Array E500 */
475         0x323D103C, /* Smart Array P700m */
476         0x40800E11, /* Smart Array 5i */
477         0x409C0E11, /* Smart Array 6400 */
478         0x409D0E11, /* Smart Array 6400 EM */
479         0x40700E11, /* Smart Array 5300 */
480         0x40820E11, /* Smart Array 532 */
481         0x40830E11, /* Smart Array 5312 */
482         0x409A0E11, /* Smart Array 641 */
483         0x409B0E11, /* Smart Array 642 */
484         0x40910E11, /* Smart Array 6i */
485 };
486
487 /* List of controllers which cannot even be soft reset */
488 static u32 soft_unresettable_controller[] = {
489         0x40800E11, /* Smart Array 5i */
490         0x40700E11, /* Smart Array 5300 */
491         0x40820E11, /* Smart Array 532 */
492         0x40830E11, /* Smart Array 5312 */
493         0x409A0E11, /* Smart Array 641 */
494         0x409B0E11, /* Smart Array 642 */
495         0x40910E11, /* Smart Array 6i */
496         /* Exclude 640x boards.  These are two pci devices in one slot
497          * which share a battery backed cache module.  One controls the
498          * cache, the other accesses the cache through the one that controls
499          * it.  If we reset the one controlling the cache, the other will
500          * likely not be happy.  Just forbid resetting this conjoined mess.
501          * The 640x isn't really supported by hpsa anyway.
502          */
503         0x409C0E11, /* Smart Array 6400 */
504         0x409D0E11, /* Smart Array 6400 EM */
505 };
506
507 static u32 needs_abort_tags_swizzled[] = {
508         0x323D103C, /* Smart Array P700m */
509         0x324a103C, /* Smart Array P712m */
510         0x324b103C, /* SmartArray P711m */
511 };
512
513 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
514 {
515         int i;
516
517         for (i = 0; i < nelems; i++)
518                 if (a[i] == board_id)
519                         return 1;
520         return 0;
521 }
522
523 static int ctlr_is_hard_resettable(u32 board_id)
524 {
525         return !board_id_in_array(unresettable_controller,
526                         ARRAY_SIZE(unresettable_controller), board_id);
527 }
528
529 static int ctlr_is_soft_resettable(u32 board_id)
530 {
531         return !board_id_in_array(soft_unresettable_controller,
532                         ARRAY_SIZE(soft_unresettable_controller), board_id);
533 }
534
535 static int ctlr_is_resettable(u32 board_id)
536 {
537         return ctlr_is_hard_resettable(board_id) ||
538                 ctlr_is_soft_resettable(board_id);
539 }
540
541 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
542 {
543         return board_id_in_array(needs_abort_tags_swizzled,
544                         ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
545 }
546
547 static ssize_t host_show_resettable(struct device *dev,
548         struct device_attribute *attr, char *buf)
549 {
550         struct ctlr_info *h;
551         struct Scsi_Host *shost = class_to_shost(dev);
552
553         h = shost_to_hba(shost);
554         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
555 }
556
557 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
558 {
559         return (scsi3addr[3] & 0xC0) == 0x40;
560 }
561
562 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
563         "1(+0)ADM", "UNKNOWN"
564 };
565 #define HPSA_RAID_0     0
566 #define HPSA_RAID_4     1
567 #define HPSA_RAID_1     2       /* also used for RAID 10 */
568 #define HPSA_RAID_5     3       /* also used for RAID 50 */
569 #define HPSA_RAID_51    4
570 #define HPSA_RAID_6     5       /* also used for RAID 60 */
571 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
572 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
573
574 static ssize_t raid_level_show(struct device *dev,
575              struct device_attribute *attr, char *buf)
576 {
577         ssize_t l = 0;
578         unsigned char rlevel;
579         struct ctlr_info *h;
580         struct scsi_device *sdev;
581         struct hpsa_scsi_dev_t *hdev;
582         unsigned long flags;
583
584         sdev = to_scsi_device(dev);
585         h = sdev_to_hba(sdev);
586         spin_lock_irqsave(&h->lock, flags);
587         hdev = sdev->hostdata;
588         if (!hdev) {
589                 spin_unlock_irqrestore(&h->lock, flags);
590                 return -ENODEV;
591         }
592
593         /* Is this even a logical drive? */
594         if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
595                 spin_unlock_irqrestore(&h->lock, flags);
596                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
597                 return l;
598         }
599
600         rlevel = hdev->raid_level;
601         spin_unlock_irqrestore(&h->lock, flags);
602         if (rlevel > RAID_UNKNOWN)
603                 rlevel = RAID_UNKNOWN;
604         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
605         return l;
606 }
607
608 static ssize_t lunid_show(struct device *dev,
609              struct device_attribute *attr, char *buf)
610 {
611         struct ctlr_info *h;
612         struct scsi_device *sdev;
613         struct hpsa_scsi_dev_t *hdev;
614         unsigned long flags;
615         unsigned char lunid[8];
616
617         sdev = to_scsi_device(dev);
618         h = sdev_to_hba(sdev);
619         spin_lock_irqsave(&h->lock, flags);
620         hdev = sdev->hostdata;
621         if (!hdev) {
622                 spin_unlock_irqrestore(&h->lock, flags);
623                 return -ENODEV;
624         }
625         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
626         spin_unlock_irqrestore(&h->lock, flags);
627         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
628                 lunid[0], lunid[1], lunid[2], lunid[3],
629                 lunid[4], lunid[5], lunid[6], lunid[7]);
630 }
631
632 static ssize_t unique_id_show(struct device *dev,
633              struct device_attribute *attr, char *buf)
634 {
635         struct ctlr_info *h;
636         struct scsi_device *sdev;
637         struct hpsa_scsi_dev_t *hdev;
638         unsigned long flags;
639         unsigned char sn[16];
640
641         sdev = to_scsi_device(dev);
642         h = sdev_to_hba(sdev);
643         spin_lock_irqsave(&h->lock, flags);
644         hdev = sdev->hostdata;
645         if (!hdev) {
646                 spin_unlock_irqrestore(&h->lock, flags);
647                 return -ENODEV;
648         }
649         memcpy(sn, hdev->device_id, sizeof(sn));
650         spin_unlock_irqrestore(&h->lock, flags);
651         return snprintf(buf, 16 * 2 + 2,
652                         "%02X%02X%02X%02X%02X%02X%02X%02X"
653                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
654                         sn[0], sn[1], sn[2], sn[3],
655                         sn[4], sn[5], sn[6], sn[7],
656                         sn[8], sn[9], sn[10], sn[11],
657                         sn[12], sn[13], sn[14], sn[15]);
658 }
659
660 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
661              struct device_attribute *attr, char *buf)
662 {
663         struct ctlr_info *h;
664         struct scsi_device *sdev;
665         struct hpsa_scsi_dev_t *hdev;
666         unsigned long flags;
667         int offload_enabled;
668
669         sdev = to_scsi_device(dev);
670         h = sdev_to_hba(sdev);
671         spin_lock_irqsave(&h->lock, flags);
672         hdev = sdev->hostdata;
673         if (!hdev) {
674                 spin_unlock_irqrestore(&h->lock, flags);
675                 return -ENODEV;
676         }
677         offload_enabled = hdev->offload_enabled;
678         spin_unlock_irqrestore(&h->lock, flags);
679         return snprintf(buf, 20, "%d\n", offload_enabled);
680 }
681
682 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
683 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
684 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
685 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
686 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
687                         host_show_hp_ssd_smart_path_enabled, NULL);
688 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
689                 host_show_hp_ssd_smart_path_status,
690                 host_store_hp_ssd_smart_path_status);
691 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
692                         host_store_raid_offload_debug);
693 static DEVICE_ATTR(firmware_revision, S_IRUGO,
694         host_show_firmware_revision, NULL);
695 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
696         host_show_commands_outstanding, NULL);
697 static DEVICE_ATTR(transport_mode, S_IRUGO,
698         host_show_transport_mode, NULL);
699 static DEVICE_ATTR(resettable, S_IRUGO,
700         host_show_resettable, NULL);
701
702 static struct device_attribute *hpsa_sdev_attrs[] = {
703         &dev_attr_raid_level,
704         &dev_attr_lunid,
705         &dev_attr_unique_id,
706         &dev_attr_hp_ssd_smart_path_enabled,
707         NULL,
708 };
709
710 static struct device_attribute *hpsa_shost_attrs[] = {
711         &dev_attr_rescan,
712         &dev_attr_firmware_revision,
713         &dev_attr_commands_outstanding,
714         &dev_attr_transport_mode,
715         &dev_attr_resettable,
716         &dev_attr_hp_ssd_smart_path_status,
717         &dev_attr_raid_offload_debug,
718         NULL,
719 };
720
721 #define HPSA_NRESERVED_CMDS     (HPSA_CMDS_RESERVED_FOR_ABORTS + \
722                 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
723
724 static struct scsi_host_template hpsa_driver_template = {
725         .module                 = THIS_MODULE,
726         .name                   = HPSA,
727         .proc_name              = HPSA,
728         .queuecommand           = hpsa_scsi_queue_command,
729         .scan_start             = hpsa_scan_start,
730         .scan_finished          = hpsa_scan_finished,
731         .change_queue_depth     = hpsa_change_queue_depth,
732         .this_id                = -1,
733         .use_clustering         = ENABLE_CLUSTERING,
734         .eh_abort_handler       = hpsa_eh_abort_handler,
735         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
736         .ioctl                  = hpsa_ioctl,
737         .slave_alloc            = hpsa_slave_alloc,
738         .slave_configure        = hpsa_slave_configure,
739         .slave_destroy          = hpsa_slave_destroy,
740 #ifdef CONFIG_COMPAT
741         .compat_ioctl           = hpsa_compat_ioctl,
742 #endif
743         .sdev_attrs = hpsa_sdev_attrs,
744         .shost_attrs = hpsa_shost_attrs,
745         .max_sectors = 8192,
746         .no_write_same = 1,
747 };
748
749 static inline u32 next_command(struct ctlr_info *h, u8 q)
750 {
751         u32 a;
752         struct reply_queue_buffer *rq = &h->reply_queue[q];
753
754         if (h->transMethod & CFGTBL_Trans_io_accel1)
755                 return h->access.command_completed(h, q);
756
757         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
758                 return h->access.command_completed(h, q);
759
760         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
761                 a = rq->head[rq->current_entry];
762                 rq->current_entry++;
763                 atomic_dec(&h->commands_outstanding);
764         } else {
765                 a = FIFO_EMPTY;
766         }
767         /* Check for wraparound */
768         if (rq->current_entry == h->max_commands) {
769                 rq->current_entry = 0;
770                 rq->wraparound ^= 1;
771         }
772         return a;
773 }
774
775 /*
776  * There are some special bits in the bus address of the
777  * command that we have to set for the controller to know
778  * how to process the command:
779  *
780  * Normal performant mode:
781  * bit 0: 1 means performant mode, 0 means simple mode.
782  * bits 1-3 = block fetch table entry
783  * bits 4-6 = command type (== 0)
784  *
785  * ioaccel1 mode:
786  * bit 0 = "performant mode" bit.
787  * bits 1-3 = block fetch table entry
788  * bits 4-6 = command type (== 110)
789  * (command type is needed because ioaccel1 mode
790  * commands are submitted through the same register as normal
791  * mode commands, so this is how the controller knows whether
792  * the command is normal mode or ioaccel1 mode.)
793  *
794  * ioaccel2 mode:
795  * bit 0 = "performant mode" bit.
796  * bits 1-4 = block fetch table entry (note extra bit)
797  * bits 4-6 = not needed, because ioaccel2 mode has
798  * a separate special register for submitting commands.
799  */
800
801 /*
802  * set_performant_mode: Modify the tag for cciss performant
803  * set bit 0 for pull model, bits 3-1 for block fetch
804  * register number
805  */
806 #define DEFAULT_REPLY_QUEUE (-1)
807 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
808                                         int reply_queue)
809 {
810         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
811                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
812                 if (unlikely(!h->msix_vector))
813                         return;
814                 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
815                         c->Header.ReplyQueue =
816                                 raw_smp_processor_id() % h->nreply_queues;
817                 else
818                         c->Header.ReplyQueue = reply_queue % h->nreply_queues;
819         }
820 }
821
822 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
823                                                 struct CommandList *c,
824                                                 int reply_queue)
825 {
826         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
827
828         /*
829          * Tell the controller to post the reply to the queue for this
830          * processor.  This seems to give the best I/O throughput.
831          */
832         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
833                 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
834         else
835                 cp->ReplyQueue = reply_queue % h->nreply_queues;
836         /*
837          * Set the bits in the address sent down to include:
838          *  - performant mode bit (bit 0)
839          *  - pull count (bits 1-3)
840          *  - command type (bits 4-6)
841          */
842         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
843                                         IOACCEL1_BUSADDR_CMDTYPE;
844 }
845
846 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
847                                                 struct CommandList *c,
848                                                 int reply_queue)
849 {
850         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
851
852         /*
853          * Tell the controller to post the reply to the queue for this
854          * processor.  This seems to give the best I/O throughput.
855          */
856         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
857                 cp->reply_queue = smp_processor_id() % h->nreply_queues;
858         else
859                 cp->reply_queue = reply_queue % h->nreply_queues;
860         /*
861          * Set the bits in the address sent down to include:
862          *  - performant mode bit not used in ioaccel mode 2
863          *  - pull count (bits 0-3)
864          *  - command type isn't needed for ioaccel2
865          */
866         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
867 }
868
869 static int is_firmware_flash_cmd(u8 *cdb)
870 {
871         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
872 }
873
874 /*
875  * During firmware flash, the heartbeat register may not update as frequently
876  * as it should.  So we dial down lockup detection during firmware flash. and
877  * dial it back up when firmware flash completes.
878  */
879 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
880 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
881 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
882                 struct CommandList *c)
883 {
884         if (!is_firmware_flash_cmd(c->Request.CDB))
885                 return;
886         atomic_inc(&h->firmware_flash_in_progress);
887         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
888 }
889
890 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
891                 struct CommandList *c)
892 {
893         if (is_firmware_flash_cmd(c->Request.CDB) &&
894                 atomic_dec_and_test(&h->firmware_flash_in_progress))
895                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
896 }
897
898 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
899         struct CommandList *c, int reply_queue)
900 {
901         dial_down_lockup_detection_during_fw_flash(h, c);
902         atomic_inc(&h->commands_outstanding);
903         switch (c->cmd_type) {
904         case CMD_IOACCEL1:
905                 set_ioaccel1_performant_mode(h, c, reply_queue);
906                 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
907                 break;
908         case CMD_IOACCEL2:
909                 set_ioaccel2_performant_mode(h, c, reply_queue);
910                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
911                 break;
912         default:
913                 set_performant_mode(h, c, reply_queue);
914                 h->access.submit_command(h, c);
915         }
916 }
917
918 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
919                                         struct CommandList *c)
920 {
921         __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
922 }
923
924 static inline int is_hba_lunid(unsigned char scsi3addr[])
925 {
926         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
927 }
928
929 static inline int is_scsi_rev_5(struct ctlr_info *h)
930 {
931         if (!h->hba_inquiry_data)
932                 return 0;
933         if ((h->hba_inquiry_data[2] & 0x07) == 5)
934                 return 1;
935         return 0;
936 }
937
938 static int hpsa_find_target_lun(struct ctlr_info *h,
939         unsigned char scsi3addr[], int bus, int *target, int *lun)
940 {
941         /* finds an unused bus, target, lun for a new physical device
942          * assumes h->devlock is held
943          */
944         int i, found = 0;
945         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
946
947         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
948
949         for (i = 0; i < h->ndevices; i++) {
950                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
951                         __set_bit(h->dev[i]->target, lun_taken);
952         }
953
954         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
955         if (i < HPSA_MAX_DEVICES) {
956                 /* *bus = 1; */
957                 *target = i;
958                 *lun = 0;
959                 found = 1;
960         }
961         return !found;
962 }
963
964 static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
965         struct hpsa_scsi_dev_t *dev, char *description)
966 {
967         dev_printk(level, &h->pdev->dev,
968                         "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
969                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
970                         description,
971                         scsi_device_type(dev->devtype),
972                         dev->vendor,
973                         dev->model,
974                         dev->raid_level > RAID_UNKNOWN ?
975                                 "RAID-?" : raid_label[dev->raid_level],
976                         dev->offload_config ? '+' : '-',
977                         dev->offload_enabled ? '+' : '-',
978                         dev->expose_state);
979 }
980
981 /* Add an entry into h->dev[] array. */
982 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
983                 struct hpsa_scsi_dev_t *device,
984                 struct hpsa_scsi_dev_t *added[], int *nadded)
985 {
986         /* assumes h->devlock is held */
987         int n = h->ndevices;
988         int i;
989         unsigned char addr1[8], addr2[8];
990         struct hpsa_scsi_dev_t *sd;
991
992         if (n >= HPSA_MAX_DEVICES) {
993                 dev_err(&h->pdev->dev, "too many devices, some will be "
994                         "inaccessible.\n");
995                 return -1;
996         }
997
998         /* physical devices do not have lun or target assigned until now. */
999         if (device->lun != -1)
1000                 /* Logical device, lun is already assigned. */
1001                 goto lun_assigned;
1002
1003         /* If this device a non-zero lun of a multi-lun device
1004          * byte 4 of the 8-byte LUN addr will contain the logical
1005          * unit no, zero otherwise.
1006          */
1007         if (device->scsi3addr[4] == 0) {
1008                 /* This is not a non-zero lun of a multi-lun device */
1009                 if (hpsa_find_target_lun(h, device->scsi3addr,
1010                         device->bus, &device->target, &device->lun) != 0)
1011                         return -1;
1012                 goto lun_assigned;
1013         }
1014
1015         /* This is a non-zero lun of a multi-lun device.
1016          * Search through our list and find the device which
1017          * has the same 8 byte LUN address, excepting byte 4.
1018          * Assign the same bus and target for this new LUN.
1019          * Use the logical unit number from the firmware.
1020          */
1021         memcpy(addr1, device->scsi3addr, 8);
1022         addr1[4] = 0;
1023         for (i = 0; i < n; i++) {
1024                 sd = h->dev[i];
1025                 memcpy(addr2, sd->scsi3addr, 8);
1026                 addr2[4] = 0;
1027                 /* differ only in byte 4? */
1028                 if (memcmp(addr1, addr2, 8) == 0) {
1029                         device->bus = sd->bus;
1030                         device->target = sd->target;
1031                         device->lun = device->scsi3addr[4];
1032                         break;
1033                 }
1034         }
1035         if (device->lun == -1) {
1036                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1037                         " suspect firmware bug or unsupported hardware "
1038                         "configuration.\n");
1039                         return -1;
1040         }
1041
1042 lun_assigned:
1043
1044         h->dev[n] = device;
1045         h->ndevices++;
1046         device->offload_to_be_enabled = device->offload_enabled;
1047         device->offload_enabled = 0;
1048         added[*nadded] = device;
1049         (*nadded)++;
1050         hpsa_show_dev_msg(KERN_INFO, h, device,
1051                 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
1052         return 0;
1053 }
1054
1055 /* Update an entry in h->dev[] array. */
1056 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1057         int entry, struct hpsa_scsi_dev_t *new_entry)
1058 {
1059         /* assumes h->devlock is held */
1060         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1061
1062         /* Raid level changed. */
1063         h->dev[entry]->raid_level = new_entry->raid_level;
1064
1065         /* Raid offload parameters changed.  Careful about the ordering. */
1066         if (new_entry->offload_config && new_entry->offload_enabled) {
1067                 /*
1068                  * if drive is newly offload_enabled, we want to copy the
1069                  * raid map data first.  If previously offload_enabled and
1070                  * offload_config were set, raid map data had better be
1071                  * the same as it was before.  if raid map data is changed
1072                  * then it had better be the case that
1073                  * h->dev[entry]->offload_enabled is currently 0.
1074                  */
1075                 h->dev[entry]->raid_map = new_entry->raid_map;
1076                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1077         }
1078         h->dev[entry]->offload_config = new_entry->offload_config;
1079         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1080         h->dev[entry]->queue_depth = new_entry->queue_depth;
1081
1082         /*
1083          * We can turn off ioaccel offload now, but need to delay turning
1084          * it on until we can update h->dev[entry]->phys_disk[], but we
1085          * can't do that until all the devices are updated.
1086          */
1087         h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1088         if (!new_entry->offload_enabled)
1089                 h->dev[entry]->offload_enabled = 0;
1090
1091         hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1092 }
1093
1094 /* Replace an entry from h->dev[] array. */
1095 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1096         int entry, struct hpsa_scsi_dev_t *new_entry,
1097         struct hpsa_scsi_dev_t *added[], int *nadded,
1098         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1099 {
1100         /* assumes h->devlock is held */
1101         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1102         removed[*nremoved] = h->dev[entry];
1103         (*nremoved)++;
1104
1105         /*
1106          * New physical devices won't have target/lun assigned yet
1107          * so we need to preserve the values in the slot we are replacing.
1108          */
1109         if (new_entry->target == -1) {
1110                 new_entry->target = h->dev[entry]->target;
1111                 new_entry->lun = h->dev[entry]->lun;
1112         }
1113
1114         new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1115         new_entry->offload_enabled = 0;
1116         h->dev[entry] = new_entry;
1117         added[*nadded] = new_entry;
1118         (*nadded)++;
1119         hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1120 }
1121
1122 /* Remove an entry from h->dev[] array. */
1123 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1124         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1125 {
1126         /* assumes h->devlock is held */
1127         int i;
1128         struct hpsa_scsi_dev_t *sd;
1129
1130         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1131
1132         sd = h->dev[entry];
1133         removed[*nremoved] = h->dev[entry];
1134         (*nremoved)++;
1135
1136         for (i = entry; i < h->ndevices-1; i++)
1137                 h->dev[i] = h->dev[i+1];
1138         h->ndevices--;
1139         hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1140 }
1141
1142 #define SCSI3ADDR_EQ(a, b) ( \
1143         (a)[7] == (b)[7] && \
1144         (a)[6] == (b)[6] && \
1145         (a)[5] == (b)[5] && \
1146         (a)[4] == (b)[4] && \
1147         (a)[3] == (b)[3] && \
1148         (a)[2] == (b)[2] && \
1149         (a)[1] == (b)[1] && \
1150         (a)[0] == (b)[0])
1151
1152 static void fixup_botched_add(struct ctlr_info *h,
1153         struct hpsa_scsi_dev_t *added)
1154 {
1155         /* called when scsi_add_device fails in order to re-adjust
1156          * h->dev[] to match the mid layer's view.
1157          */
1158         unsigned long flags;
1159         int i, j;
1160
1161         spin_lock_irqsave(&h->lock, flags);
1162         for (i = 0; i < h->ndevices; i++) {
1163                 if (h->dev[i] == added) {
1164                         for (j = i; j < h->ndevices-1; j++)
1165                                 h->dev[j] = h->dev[j+1];
1166                         h->ndevices--;
1167                         break;
1168                 }
1169         }
1170         spin_unlock_irqrestore(&h->lock, flags);
1171         kfree(added);
1172 }
1173
1174 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1175         struct hpsa_scsi_dev_t *dev2)
1176 {
1177         /* we compare everything except lun and target as these
1178          * are not yet assigned.  Compare parts likely
1179          * to differ first
1180          */
1181         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1182                 sizeof(dev1->scsi3addr)) != 0)
1183                 return 0;
1184         if (memcmp(dev1->device_id, dev2->device_id,
1185                 sizeof(dev1->device_id)) != 0)
1186                 return 0;
1187         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1188                 return 0;
1189         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1190                 return 0;
1191         if (dev1->devtype != dev2->devtype)
1192                 return 0;
1193         if (dev1->bus != dev2->bus)
1194                 return 0;
1195         return 1;
1196 }
1197
1198 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1199         struct hpsa_scsi_dev_t *dev2)
1200 {
1201         /* Device attributes that can change, but don't mean
1202          * that the device is a different device, nor that the OS
1203          * needs to be told anything about the change.
1204          */
1205         if (dev1->raid_level != dev2->raid_level)
1206                 return 1;
1207         if (dev1->offload_config != dev2->offload_config)
1208                 return 1;
1209         if (dev1->offload_enabled != dev2->offload_enabled)
1210                 return 1;
1211         if (dev1->queue_depth != dev2->queue_depth)
1212                 return 1;
1213         return 0;
1214 }
1215
1216 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1217  * and return needle location in *index.  If scsi3addr matches, but not
1218  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1219  * location in *index.
1220  * In the case of a minor device attribute change, such as RAID level, just
1221  * return DEVICE_UPDATED, along with the updated device's location in index.
1222  * If needle not found, return DEVICE_NOT_FOUND.
1223  */
1224 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1225         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1226         int *index)
1227 {
1228         int i;
1229 #define DEVICE_NOT_FOUND 0
1230 #define DEVICE_CHANGED 1
1231 #define DEVICE_SAME 2
1232 #define DEVICE_UPDATED 3
1233         for (i = 0; i < haystack_size; i++) {
1234                 if (haystack[i] == NULL) /* previously removed. */
1235                         continue;
1236                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1237                         *index = i;
1238                         if (device_is_the_same(needle, haystack[i])) {
1239                                 if (device_updated(needle, haystack[i]))
1240                                         return DEVICE_UPDATED;
1241                                 return DEVICE_SAME;
1242                         } else {
1243                                 /* Keep offline devices offline */
1244                                 if (needle->volume_offline)
1245                                         return DEVICE_NOT_FOUND;
1246                                 return DEVICE_CHANGED;
1247                         }
1248                 }
1249         }
1250         *index = -1;
1251         return DEVICE_NOT_FOUND;
1252 }
1253
1254 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1255                                         unsigned char scsi3addr[])
1256 {
1257         struct offline_device_entry *device;
1258         unsigned long flags;
1259
1260         /* Check to see if device is already on the list */
1261         spin_lock_irqsave(&h->offline_device_lock, flags);
1262         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1263                 if (memcmp(device->scsi3addr, scsi3addr,
1264                         sizeof(device->scsi3addr)) == 0) {
1265                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1266                         return;
1267                 }
1268         }
1269         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1270
1271         /* Device is not on the list, add it. */
1272         device = kmalloc(sizeof(*device), GFP_KERNEL);
1273         if (!device) {
1274                 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1275                 return;
1276         }
1277         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1278         spin_lock_irqsave(&h->offline_device_lock, flags);
1279         list_add_tail(&device->offline_list, &h->offline_device_list);
1280         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1281 }
1282
1283 /* Print a message explaining various offline volume states */
1284 static void hpsa_show_volume_status(struct ctlr_info *h,
1285         struct hpsa_scsi_dev_t *sd)
1286 {
1287         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1288                 dev_info(&h->pdev->dev,
1289                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1290                         h->scsi_host->host_no,
1291                         sd->bus, sd->target, sd->lun);
1292         switch (sd->volume_offline) {
1293         case HPSA_LV_OK:
1294                 break;
1295         case HPSA_LV_UNDERGOING_ERASE:
1296                 dev_info(&h->pdev->dev,
1297                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1298                         h->scsi_host->host_no,
1299                         sd->bus, sd->target, sd->lun);
1300                 break;
1301         case HPSA_LV_UNDERGOING_RPI:
1302                 dev_info(&h->pdev->dev,
1303                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1304                         h->scsi_host->host_no,
1305                         sd->bus, sd->target, sd->lun);
1306                 break;
1307         case HPSA_LV_PENDING_RPI:
1308                 dev_info(&h->pdev->dev,
1309                                 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1310                                 h->scsi_host->host_no,
1311                                 sd->bus, sd->target, sd->lun);
1312                 break;
1313         case HPSA_LV_ENCRYPTED_NO_KEY:
1314                 dev_info(&h->pdev->dev,
1315                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1316                         h->scsi_host->host_no,
1317                         sd->bus, sd->target, sd->lun);
1318                 break;
1319         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1320                 dev_info(&h->pdev->dev,
1321                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1322                         h->scsi_host->host_no,
1323                         sd->bus, sd->target, sd->lun);
1324                 break;
1325         case HPSA_LV_UNDERGOING_ENCRYPTION:
1326                 dev_info(&h->pdev->dev,
1327                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1328                         h->scsi_host->host_no,
1329                         sd->bus, sd->target, sd->lun);
1330                 break;
1331         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1332                 dev_info(&h->pdev->dev,
1333                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1334                         h->scsi_host->host_no,
1335                         sd->bus, sd->target, sd->lun);
1336                 break;
1337         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1338                 dev_info(&h->pdev->dev,
1339                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1340                         h->scsi_host->host_no,
1341                         sd->bus, sd->target, sd->lun);
1342                 break;
1343         case HPSA_LV_PENDING_ENCRYPTION:
1344                 dev_info(&h->pdev->dev,
1345                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1346                         h->scsi_host->host_no,
1347                         sd->bus, sd->target, sd->lun);
1348                 break;
1349         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1350                 dev_info(&h->pdev->dev,
1351                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1352                         h->scsi_host->host_no,
1353                         sd->bus, sd->target, sd->lun);
1354                 break;
1355         }
1356 }
1357
1358 /*
1359  * Figure the list of physical drive pointers for a logical drive with
1360  * raid offload configured.
1361  */
1362 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1363                                 struct hpsa_scsi_dev_t *dev[], int ndevices,
1364                                 struct hpsa_scsi_dev_t *logical_drive)
1365 {
1366         struct raid_map_data *map = &logical_drive->raid_map;
1367         struct raid_map_disk_data *dd = &map->data[0];
1368         int i, j;
1369         int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1370                                 le16_to_cpu(map->metadata_disks_per_row);
1371         int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1372                                 le16_to_cpu(map->layout_map_count) *
1373                                 total_disks_per_row;
1374         int nphys_disk = le16_to_cpu(map->layout_map_count) *
1375                                 total_disks_per_row;
1376         int qdepth;
1377
1378         if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1379                 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1380
1381         qdepth = 0;
1382         for (i = 0; i < nraid_map_entries; i++) {
1383                 logical_drive->phys_disk[i] = NULL;
1384                 if (!logical_drive->offload_config)
1385                         continue;
1386                 for (j = 0; j < ndevices; j++) {
1387                         if (dev[j]->devtype != TYPE_DISK)
1388                                 continue;
1389                         if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1390                                 continue;
1391                         if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1392                                 continue;
1393
1394                         logical_drive->phys_disk[i] = dev[j];
1395                         if (i < nphys_disk)
1396                                 qdepth = min(h->nr_cmds, qdepth +
1397                                     logical_drive->phys_disk[i]->queue_depth);
1398                         break;
1399                 }
1400
1401                 /*
1402                  * This can happen if a physical drive is removed and
1403                  * the logical drive is degraded.  In that case, the RAID
1404                  * map data will refer to a physical disk which isn't actually
1405                  * present.  And in that case offload_enabled should already
1406                  * be 0, but we'll turn it off here just in case
1407                  */
1408                 if (!logical_drive->phys_disk[i]) {
1409                         logical_drive->offload_enabled = 0;
1410                         logical_drive->offload_to_be_enabled = 0;
1411                         logical_drive->queue_depth = 8;
1412                 }
1413         }
1414         if (nraid_map_entries)
1415                 /*
1416                  * This is correct for reads, too high for full stripe writes,
1417                  * way too high for partial stripe writes
1418                  */
1419                 logical_drive->queue_depth = qdepth;
1420         else
1421                 logical_drive->queue_depth = h->nr_cmds;
1422 }
1423
1424 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1425                                 struct hpsa_scsi_dev_t *dev[], int ndevices)
1426 {
1427         int i;
1428
1429         for (i = 0; i < ndevices; i++) {
1430                 if (dev[i]->devtype != TYPE_DISK)
1431                         continue;
1432                 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1433                         continue;
1434
1435                 /*
1436                  * If offload is currently enabled, the RAID map and
1437                  * phys_disk[] assignment *better* not be changing
1438                  * and since it isn't changing, we do not need to
1439                  * update it.
1440                  */
1441                 if (dev[i]->offload_enabled)
1442                         continue;
1443
1444                 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1445         }
1446 }
1447
1448 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1449         struct hpsa_scsi_dev_t *sd[], int nsds)
1450 {
1451         /* sd contains scsi3 addresses and devtypes, and inquiry
1452          * data.  This function takes what's in sd to be the current
1453          * reality and updates h->dev[] to reflect that reality.
1454          */
1455         int i, entry, device_change, changes = 0;
1456         struct hpsa_scsi_dev_t *csd;
1457         unsigned long flags;
1458         struct hpsa_scsi_dev_t **added, **removed;
1459         int nadded, nremoved;
1460         struct Scsi_Host *sh = NULL;
1461
1462         added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1463         removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1464
1465         if (!added || !removed) {
1466                 dev_warn(&h->pdev->dev, "out of memory in "
1467                         "adjust_hpsa_scsi_table\n");
1468                 goto free_and_out;
1469         }
1470
1471         spin_lock_irqsave(&h->devlock, flags);
1472
1473         /* find any devices in h->dev[] that are not in
1474          * sd[] and remove them from h->dev[], and for any
1475          * devices which have changed, remove the old device
1476          * info and add the new device info.
1477          * If minor device attributes change, just update
1478          * the existing device structure.
1479          */
1480         i = 0;
1481         nremoved = 0;
1482         nadded = 0;
1483         while (i < h->ndevices) {
1484                 csd = h->dev[i];
1485                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1486                 if (device_change == DEVICE_NOT_FOUND) {
1487                         changes++;
1488                         hpsa_scsi_remove_entry(h, hostno, i,
1489                                 removed, &nremoved);
1490                         continue; /* remove ^^^, hence i not incremented */
1491                 } else if (device_change == DEVICE_CHANGED) {
1492                         changes++;
1493                         hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1494                                 added, &nadded, removed, &nremoved);
1495                         /* Set it to NULL to prevent it from being freed
1496                          * at the bottom of hpsa_update_scsi_devices()
1497                          */
1498                         sd[entry] = NULL;
1499                 } else if (device_change == DEVICE_UPDATED) {
1500                         hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1501                 }
1502                 i++;
1503         }
1504
1505         /* Now, make sure every device listed in sd[] is also
1506          * listed in h->dev[], adding them if they aren't found
1507          */
1508
1509         for (i = 0; i < nsds; i++) {
1510                 if (!sd[i]) /* if already added above. */
1511                         continue;
1512
1513                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1514                  * as the SCSI mid-layer does not handle such devices well.
1515                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1516                  * at 160Hz, and prevents the system from coming up.
1517                  */
1518                 if (sd[i]->volume_offline) {
1519                         hpsa_show_volume_status(h, sd[i]);
1520                         hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1521                         continue;
1522                 }
1523
1524                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1525                                         h->ndevices, &entry);
1526                 if (device_change == DEVICE_NOT_FOUND) {
1527                         changes++;
1528                         if (hpsa_scsi_add_entry(h, hostno, sd[i],
1529                                 added, &nadded) != 0)
1530                                 break;
1531                         sd[i] = NULL; /* prevent from being freed later. */
1532                 } else if (device_change == DEVICE_CHANGED) {
1533                         /* should never happen... */
1534                         changes++;
1535                         dev_warn(&h->pdev->dev,
1536                                 "device unexpectedly changed.\n");
1537                         /* but if it does happen, we just ignore that device */
1538                 }
1539         }
1540         hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1541
1542         /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1543          * any logical drives that need it enabled.
1544          */
1545         for (i = 0; i < h->ndevices; i++)
1546                 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1547
1548         spin_unlock_irqrestore(&h->devlock, flags);
1549
1550         /* Monitor devices which are in one of several NOT READY states to be
1551          * brought online later. This must be done without holding h->devlock,
1552          * so don't touch h->dev[]
1553          */
1554         for (i = 0; i < nsds; i++) {
1555                 if (!sd[i]) /* if already added above. */
1556                         continue;
1557                 if (sd[i]->volume_offline)
1558                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1559         }
1560
1561         /* Don't notify scsi mid layer of any changes the first time through
1562          * (or if there are no changes) scsi_scan_host will do it later the
1563          * first time through.
1564          */
1565         if (hostno == -1 || !changes)
1566                 goto free_and_out;
1567
1568         sh = h->scsi_host;
1569         /* Notify scsi mid layer of any removed devices */
1570         for (i = 0; i < nremoved; i++) {
1571                 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1572                         struct scsi_device *sdev =
1573                                 scsi_device_lookup(sh, removed[i]->bus,
1574                                         removed[i]->target, removed[i]->lun);
1575                         if (sdev != NULL) {
1576                                 scsi_remove_device(sdev);
1577                                 scsi_device_put(sdev);
1578                         } else {
1579                                 /*
1580                                  * We don't expect to get here.
1581                                  * future cmds to this device will get selection
1582                                  * timeout as if the device was gone.
1583                                  */
1584                                 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1585                                         "didn't find device for removal.");
1586                         }
1587                 }
1588                 kfree(removed[i]);
1589                 removed[i] = NULL;
1590         }
1591
1592         /* Notify scsi mid layer of any added devices */
1593         for (i = 0; i < nadded; i++) {
1594                 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1595                         continue;
1596                 if (scsi_add_device(sh, added[i]->bus,
1597                         added[i]->target, added[i]->lun) == 0)
1598                         continue;
1599                 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1600                                         "addition failed, device not added.");
1601                 /* now we have to remove it from h->dev,
1602                  * since it didn't get added to scsi mid layer
1603                  */
1604                 fixup_botched_add(h, added[i]);
1605         }
1606
1607 free_and_out:
1608         kfree(added);
1609         kfree(removed);
1610 }
1611
1612 /*
1613  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1614  * Assume's h->devlock is held.
1615  */
1616 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1617         int bus, int target, int lun)
1618 {
1619         int i;
1620         struct hpsa_scsi_dev_t *sd;
1621
1622         for (i = 0; i < h->ndevices; i++) {
1623                 sd = h->dev[i];
1624                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1625                         return sd;
1626         }
1627         return NULL;
1628 }
1629
1630 static int hpsa_slave_alloc(struct scsi_device *sdev)
1631 {
1632         struct hpsa_scsi_dev_t *sd;
1633         unsigned long flags;
1634         struct ctlr_info *h;
1635
1636         h = sdev_to_hba(sdev);
1637         spin_lock_irqsave(&h->devlock, flags);
1638         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1639                 sdev_id(sdev), sdev->lun);
1640         if (likely(sd)) {
1641                 atomic_set(&sd->ioaccel_cmds_out, 0);
1642                 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1643         } else
1644                 sdev->hostdata = NULL;
1645         spin_unlock_irqrestore(&h->devlock, flags);
1646         return 0;
1647 }
1648
1649 /* configure scsi device based on internal per-device structure */
1650 static int hpsa_slave_configure(struct scsi_device *sdev)
1651 {
1652         struct hpsa_scsi_dev_t *sd;
1653         int queue_depth;
1654
1655         sd = sdev->hostdata;
1656         sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1657
1658         if (sd)
1659                 queue_depth = sd->queue_depth != 0 ?
1660                         sd->queue_depth : sdev->host->can_queue;
1661         else
1662                 queue_depth = sdev->host->can_queue;
1663
1664         scsi_change_queue_depth(sdev, queue_depth);
1665
1666         return 0;
1667 }
1668
1669 static void hpsa_slave_destroy(struct scsi_device *sdev)
1670 {
1671         /* nothing to do. */
1672 }
1673
1674 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1675 {
1676         int i;
1677
1678         if (!h->cmd_sg_list)
1679                 return;
1680         for (i = 0; i < h->nr_cmds; i++) {
1681                 kfree(h->cmd_sg_list[i]);
1682                 h->cmd_sg_list[i] = NULL;
1683         }
1684         kfree(h->cmd_sg_list);
1685         h->cmd_sg_list = NULL;
1686 }
1687
1688 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1689 {
1690         int i;
1691
1692         if (h->chainsize <= 0)
1693                 return 0;
1694
1695         h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1696                                 GFP_KERNEL);
1697         if (!h->cmd_sg_list) {
1698                 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1699                 return -ENOMEM;
1700         }
1701         for (i = 0; i < h->nr_cmds; i++) {
1702                 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1703                                                 h->chainsize, GFP_KERNEL);
1704                 if (!h->cmd_sg_list[i]) {
1705                         dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1706                         goto clean;
1707                 }
1708         }
1709         return 0;
1710
1711 clean:
1712         hpsa_free_sg_chain_blocks(h);
1713         return -ENOMEM;
1714 }
1715
1716 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1717         struct CommandList *c)
1718 {
1719         struct SGDescriptor *chain_sg, *chain_block;
1720         u64 temp64;
1721         u32 chain_len;
1722
1723         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1724         chain_block = h->cmd_sg_list[c->cmdindex];
1725         chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1726         chain_len = sizeof(*chain_sg) *
1727                 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1728         chain_sg->Len = cpu_to_le32(chain_len);
1729         temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1730                                 PCI_DMA_TODEVICE);
1731         if (dma_mapping_error(&h->pdev->dev, temp64)) {
1732                 /* prevent subsequent unmapping */
1733                 chain_sg->Addr = cpu_to_le64(0);
1734                 return -1;
1735         }
1736         chain_sg->Addr = cpu_to_le64(temp64);
1737         return 0;
1738 }
1739
1740 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1741         struct CommandList *c)
1742 {
1743         struct SGDescriptor *chain_sg;
1744
1745         if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1746                 return;
1747
1748         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1749         pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1750                         le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1751 }
1752
1753
1754 /* Decode the various types of errors on ioaccel2 path.
1755  * Return 1 for any error that should generate a RAID path retry.
1756  * Return 0 for errors that don't require a RAID path retry.
1757  */
1758 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1759                                         struct CommandList *c,
1760                                         struct scsi_cmnd *cmd,
1761                                         struct io_accel2_cmd *c2)
1762 {
1763         int data_len;
1764         int retry = 0;
1765
1766         switch (c2->error_data.serv_response) {
1767         case IOACCEL2_SERV_RESPONSE_COMPLETE:
1768                 switch (c2->error_data.status) {
1769                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1770                         break;
1771                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1772                         dev_warn(&h->pdev->dev,
1773                                 "%s: task complete with check condition.\n",
1774                                 "HP SSD Smart Path");
1775                         cmd->result |= SAM_STAT_CHECK_CONDITION;
1776                         if (c2->error_data.data_present !=
1777                                         IOACCEL2_SENSE_DATA_PRESENT) {
1778                                 memset(cmd->sense_buffer, 0,
1779                                         SCSI_SENSE_BUFFERSIZE);
1780                                 break;
1781                         }
1782                         /* copy the sense data */
1783                         data_len = c2->error_data.sense_data_len;
1784                         if (data_len > SCSI_SENSE_BUFFERSIZE)
1785                                 data_len = SCSI_SENSE_BUFFERSIZE;
1786                         if (data_len > sizeof(c2->error_data.sense_data_buff))
1787                                 data_len =
1788                                         sizeof(c2->error_data.sense_data_buff);
1789                         memcpy(cmd->sense_buffer,
1790                                 c2->error_data.sense_data_buff, data_len);
1791                         retry = 1;
1792                         break;
1793                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1794                         dev_warn(&h->pdev->dev,
1795                                 "%s: task complete with BUSY status.\n",
1796                                 "HP SSD Smart Path");
1797                         retry = 1;
1798                         break;
1799                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1800                         dev_warn(&h->pdev->dev,
1801                                 "%s: task complete with reservation conflict.\n",
1802                                 "HP SSD Smart Path");
1803                         retry = 1;
1804                         break;
1805                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1806                         /* Make scsi midlayer do unlimited retries */
1807                         cmd->result = DID_IMM_RETRY << 16;
1808                         break;
1809                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1810                         dev_warn(&h->pdev->dev,
1811                                 "%s: task complete with aborted status.\n",
1812                                 "HP SSD Smart Path");
1813                         retry = 1;
1814                         break;
1815                 default:
1816                         dev_warn(&h->pdev->dev,
1817                                 "%s: task complete with unrecognized status: 0x%02x\n",
1818                                 "HP SSD Smart Path", c2->error_data.status);
1819                         retry = 1;
1820                         break;
1821                 }
1822                 break;
1823         case IOACCEL2_SERV_RESPONSE_FAILURE:
1824                 /* don't expect to get here. */
1825                 dev_warn(&h->pdev->dev,
1826                         "unexpected delivery or target failure, status = 0x%02x\n",
1827                         c2->error_data.status);
1828                 retry = 1;
1829                 break;
1830         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1831                 break;
1832         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1833                 break;
1834         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1835                 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1836                 retry = 1;
1837                 break;
1838         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1839                 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1840                 break;
1841         default:
1842                 dev_warn(&h->pdev->dev,
1843                         "%s: Unrecognized server response: 0x%02x\n",
1844                         "HP SSD Smart Path",
1845                         c2->error_data.serv_response);
1846                 retry = 1;
1847                 break;
1848         }
1849
1850         return retry;   /* retry on raid path? */
1851 }
1852
1853 static void process_ioaccel2_completion(struct ctlr_info *h,
1854                 struct CommandList *c, struct scsi_cmnd *cmd,
1855                 struct hpsa_scsi_dev_t *dev)
1856 {
1857         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1858
1859         /* check for good status */
1860         if (likely(c2->error_data.serv_response == 0 &&
1861                         c2->error_data.status == 0)) {
1862                 cmd_free(h, c);
1863                 cmd->scsi_done(cmd);
1864                 return;
1865         }
1866
1867         /* Any RAID offload error results in retry which will use
1868          * the normal I/O path so the controller can handle whatever's
1869          * wrong.
1870          */
1871         if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1872                 c2->error_data.serv_response ==
1873                         IOACCEL2_SERV_RESPONSE_FAILURE) {
1874                 if (c2->error_data.status ==
1875                         IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1876                         dev->offload_enabled = 0;
1877                 goto retry_cmd;
1878         }
1879
1880         if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1881                 goto retry_cmd;
1882
1883         cmd_free(h, c);
1884         cmd->scsi_done(cmd);
1885         return;
1886
1887 retry_cmd:
1888         INIT_WORK(&c->work, hpsa_command_resubmit_worker);
1889         queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
1890 }
1891
1892 /* Returns 0 on success, < 0 otherwise. */
1893 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
1894                                         struct CommandList *cp)
1895 {
1896         u8 tmf_status = cp->err_info->ScsiStatus;
1897
1898         switch (tmf_status) {
1899         case CISS_TMF_COMPLETE:
1900                 /*
1901                  * CISS_TMF_COMPLETE never happens, instead,
1902                  * ei->CommandStatus == 0 for this case.
1903                  */
1904         case CISS_TMF_SUCCESS:
1905                 return 0;
1906         case CISS_TMF_INVALID_FRAME:
1907         case CISS_TMF_NOT_SUPPORTED:
1908         case CISS_TMF_FAILED:
1909         case CISS_TMF_WRONG_LUN:
1910         case CISS_TMF_OVERLAPPED_TAG:
1911                 break;
1912         default:
1913                 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
1914                                 tmf_status);
1915                 break;
1916         }
1917         return -tmf_status;
1918 }
1919
1920 static void complete_scsi_command(struct CommandList *cp)
1921 {
1922         struct scsi_cmnd *cmd;
1923         struct ctlr_info *h;
1924         struct ErrorInfo *ei;
1925         struct hpsa_scsi_dev_t *dev;
1926
1927         u8 sense_key;
1928         u8 asc;      /* additional sense code */
1929         u8 ascq;     /* additional sense code qualifier */
1930         unsigned long sense_data_size;
1931
1932         ei = cp->err_info;
1933         cmd = cp->scsi_cmd;
1934         h = cp->h;
1935         dev = cmd->device->hostdata;
1936
1937         scsi_dma_unmap(cmd); /* undo the DMA mappings */
1938         if ((cp->cmd_type == CMD_SCSI) &&
1939                 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1940                 hpsa_unmap_sg_chain_block(h, cp);
1941
1942         cmd->result = (DID_OK << 16);           /* host byte */
1943         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1944
1945         if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1946                 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1947
1948         /*
1949          * We check for lockup status here as it may be set for
1950          * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
1951          * fail_all_oustanding_cmds()
1952          */
1953         if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
1954                 /* DID_NO_CONNECT will prevent a retry */
1955                 cmd->result = DID_NO_CONNECT << 16;
1956                 cmd_free(h, cp);
1957                 cmd->scsi_done(cmd);
1958                 return;
1959         }
1960
1961         if (cp->cmd_type == CMD_IOACCEL2)
1962                 return process_ioaccel2_completion(h, cp, cmd, dev);
1963
1964         scsi_set_resid(cmd, ei->ResidualCnt);
1965         if (ei->CommandStatus == 0) {
1966                 if (cp->cmd_type == CMD_IOACCEL1)
1967                         atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1968                 cmd_free(h, cp);
1969                 cmd->scsi_done(cmd);
1970                 return;
1971         }
1972
1973         /* For I/O accelerator commands, copy over some fields to the normal
1974          * CISS header used below for error handling.
1975          */
1976         if (cp->cmd_type == CMD_IOACCEL1) {
1977                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1978                 cp->Header.SGList = scsi_sg_count(cmd);
1979                 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1980                 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1981                         IOACCEL1_IOFLAGS_CDBLEN_MASK;
1982                 cp->Header.tag = c->tag;
1983                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1984                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1985
1986                 /* Any RAID offload error results in retry which will use
1987                  * the normal I/O path so the controller can handle whatever's
1988                  * wrong.
1989                  */
1990                 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1991                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1992                                 dev->offload_enabled = 0;
1993                         INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
1994                         queue_work_on(raw_smp_processor_id(),
1995                                         h->resubmit_wq, &cp->work);
1996                         return;
1997                 }
1998         }
1999
2000         /* an error has occurred */
2001         switch (ei->CommandStatus) {
2002
2003         case CMD_TARGET_STATUS:
2004                 cmd->result |= ei->ScsiStatus;
2005                 /* copy the sense data */
2006                 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2007                         sense_data_size = SCSI_SENSE_BUFFERSIZE;
2008                 else
2009                         sense_data_size = sizeof(ei->SenseInfo);
2010                 if (ei->SenseLen < sense_data_size)
2011                         sense_data_size = ei->SenseLen;
2012                 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2013                 if (ei->ScsiStatus)
2014                         decode_sense_data(ei->SenseInfo, sense_data_size,
2015                                 &sense_key, &asc, &ascq);
2016                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2017                         if (sense_key == ABORTED_COMMAND) {
2018                                 cmd->result |= DID_SOFT_ERROR << 16;
2019                                 break;
2020                         }
2021                         break;
2022                 }
2023                 /* Problem was not a check condition
2024                  * Pass it up to the upper layers...
2025                  */
2026                 if (ei->ScsiStatus) {
2027                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2028                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2029                                 "Returning result: 0x%x\n",
2030                                 cp, ei->ScsiStatus,
2031                                 sense_key, asc, ascq,
2032                                 cmd->result);
2033                 } else {  /* scsi status is zero??? How??? */
2034                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2035                                 "Returning no connection.\n", cp),
2036
2037                         /* Ordinarily, this case should never happen,
2038                          * but there is a bug in some released firmware
2039                          * revisions that allows it to happen if, for
2040                          * example, a 4100 backplane loses power and
2041                          * the tape drive is in it.  We assume that
2042                          * it's a fatal error of some kind because we
2043                          * can't show that it wasn't. We will make it
2044                          * look like selection timeout since that is
2045                          * the most common reason for this to occur,
2046                          * and it's severe enough.
2047                          */
2048
2049                         cmd->result = DID_NO_CONNECT << 16;
2050                 }
2051                 break;
2052
2053         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2054                 break;
2055         case CMD_DATA_OVERRUN:
2056                 dev_warn(&h->pdev->dev,
2057                         "CDB %16phN data overrun\n", cp->Request.CDB);
2058                 break;
2059         case CMD_INVALID: {
2060                 /* print_bytes(cp, sizeof(*cp), 1, 0);
2061                 print_cmd(cp); */
2062                 /* We get CMD_INVALID if you address a non-existent device
2063                  * instead of a selection timeout (no response).  You will
2064                  * see this if you yank out a drive, then try to access it.
2065                  * This is kind of a shame because it means that any other
2066                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
2067                  * missing target. */
2068                 cmd->result = DID_NO_CONNECT << 16;
2069         }
2070                 break;
2071         case CMD_PROTOCOL_ERR:
2072                 cmd->result = DID_ERROR << 16;
2073                 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2074                                 cp->Request.CDB);
2075                 break;
2076         case CMD_HARDWARE_ERR:
2077                 cmd->result = DID_ERROR << 16;
2078                 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2079                         cp->Request.CDB);
2080                 break;
2081         case CMD_CONNECTION_LOST:
2082                 cmd->result = DID_ERROR << 16;
2083                 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2084                         cp->Request.CDB);
2085                 break;
2086         case CMD_ABORTED:
2087                 cmd->result = DID_ABORT << 16;
2088                 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2089                                 cp->Request.CDB, ei->ScsiStatus);
2090                 break;
2091         case CMD_ABORT_FAILED:
2092                 cmd->result = DID_ERROR << 16;
2093                 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2094                         cp->Request.CDB);
2095                 break;
2096         case CMD_UNSOLICITED_ABORT:
2097                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2098                 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2099                         cp->Request.CDB);
2100                 break;
2101         case CMD_TIMEOUT:
2102                 cmd->result = DID_TIME_OUT << 16;
2103                 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2104                         cp->Request.CDB);
2105                 break;
2106         case CMD_UNABORTABLE:
2107                 cmd->result = DID_ERROR << 16;
2108                 dev_warn(&h->pdev->dev, "Command unabortable\n");
2109                 break;
2110         case CMD_TMF_STATUS:
2111                 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2112                         cmd->result = DID_ERROR << 16;
2113                 break;
2114         case CMD_IOACCEL_DISABLED:
2115                 /* This only handles the direct pass-through case since RAID
2116                  * offload is handled above.  Just attempt a retry.
2117                  */
2118                 cmd->result = DID_SOFT_ERROR << 16;
2119                 dev_warn(&h->pdev->dev,
2120                                 "cp %p had HP SSD Smart Path error\n", cp);
2121                 break;
2122         default:
2123                 cmd->result = DID_ERROR << 16;
2124                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2125                                 cp, ei->CommandStatus);
2126         }
2127         cmd_free(h, cp);
2128         cmd->scsi_done(cmd);
2129 }
2130
2131 static void hpsa_pci_unmap(struct pci_dev *pdev,
2132         struct CommandList *c, int sg_used, int data_direction)
2133 {
2134         int i;
2135
2136         for (i = 0; i < sg_used; i++)
2137                 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2138                                 le32_to_cpu(c->SG[i].Len),
2139                                 data_direction);
2140 }
2141
2142 static int hpsa_map_one(struct pci_dev *pdev,
2143                 struct CommandList *cp,
2144                 unsigned char *buf,
2145                 size_t buflen,
2146                 int data_direction)
2147 {
2148         u64 addr64;
2149
2150         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2151                 cp->Header.SGList = 0;
2152                 cp->Header.SGTotal = cpu_to_le16(0);
2153                 return 0;
2154         }
2155
2156         addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2157         if (dma_mapping_error(&pdev->dev, addr64)) {
2158                 /* Prevent subsequent unmap of something never mapped */
2159                 cp->Header.SGList = 0;
2160                 cp->Header.SGTotal = cpu_to_le16(0);
2161                 return -1;
2162         }
2163         cp->SG[0].Addr = cpu_to_le64(addr64);
2164         cp->SG[0].Len = cpu_to_le32(buflen);
2165         cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2166         cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2167         cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2168         return 0;
2169 }
2170
2171 #define NO_TIMEOUT ((unsigned long) -1)
2172 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2173 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2174         struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2175 {
2176         DECLARE_COMPLETION_ONSTACK(wait);
2177
2178         c->waiting = &wait;
2179         __enqueue_cmd_and_start_io(h, c, reply_queue);
2180         if (timeout_msecs == NO_TIMEOUT) {
2181                 /* TODO: get rid of this no-timeout thing */
2182                 wait_for_completion_io(&wait);
2183                 return IO_OK;
2184         }
2185         if (!wait_for_completion_io_timeout(&wait,
2186                                         msecs_to_jiffies(timeout_msecs))) {
2187                 dev_warn(&h->pdev->dev, "Command timed out.\n");
2188                 return -ETIMEDOUT;
2189         }
2190         return IO_OK;
2191 }
2192
2193 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2194                                    int reply_queue, unsigned long timeout_msecs)
2195 {
2196         if (unlikely(lockup_detected(h))) {
2197                 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2198                 return IO_OK;
2199         }
2200         return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2201 }
2202
2203 static u32 lockup_detected(struct ctlr_info *h)
2204 {
2205         int cpu;
2206         u32 rc, *lockup_detected;
2207
2208         cpu = get_cpu();
2209         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2210         rc = *lockup_detected;
2211         put_cpu();
2212         return rc;
2213 }
2214
2215 #define MAX_DRIVER_CMD_RETRIES 25
2216 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2217         struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2218 {
2219         int backoff_time = 10, retry_count = 0;
2220         int rc;
2221
2222         do {
2223                 memset(c->err_info, 0, sizeof(*c->err_info));
2224                 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2225                                                   timeout_msecs);
2226                 if (rc)
2227                         break;
2228                 retry_count++;
2229                 if (retry_count > 3) {
2230                         msleep(backoff_time);
2231                         if (backoff_time < 1000)
2232                                 backoff_time *= 2;
2233                 }
2234         } while ((check_for_unit_attention(h, c) ||
2235                         check_for_busy(h, c)) &&
2236                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2237         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2238         if (retry_count > MAX_DRIVER_CMD_RETRIES)
2239                 rc = -EIO;
2240         return rc;
2241 }
2242
2243 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2244                                 struct CommandList *c)
2245 {
2246         const u8 *cdb = c->Request.CDB;
2247         const u8 *lun = c->Header.LUN.LunAddrBytes;
2248
2249         dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2250         " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2251                 txt, lun[0], lun[1], lun[2], lun[3],
2252                 lun[4], lun[5], lun[6], lun[7],
2253                 cdb[0], cdb[1], cdb[2], cdb[3],
2254                 cdb[4], cdb[5], cdb[6], cdb[7],
2255                 cdb[8], cdb[9], cdb[10], cdb[11],
2256                 cdb[12], cdb[13], cdb[14], cdb[15]);
2257 }
2258
2259 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2260                         struct CommandList *cp)
2261 {
2262         const struct ErrorInfo *ei = cp->err_info;
2263         struct device *d = &cp->h->pdev->dev;
2264         u8 sense_key, asc, ascq;
2265         int sense_len;
2266
2267         switch (ei->CommandStatus) {
2268         case CMD_TARGET_STATUS:
2269                 if (ei->SenseLen > sizeof(ei->SenseInfo))
2270                         sense_len = sizeof(ei->SenseInfo);
2271                 else
2272                         sense_len = ei->SenseLen;
2273                 decode_sense_data(ei->SenseInfo, sense_len,
2274                                         &sense_key, &asc, &ascq);
2275                 hpsa_print_cmd(h, "SCSI status", cp);
2276                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2277                         dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2278                                 sense_key, asc, ascq);
2279                 else
2280                         dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2281                 if (ei->ScsiStatus == 0)
2282                         dev_warn(d, "SCSI status is abnormally zero.  "
2283                         "(probably indicates selection timeout "
2284                         "reported incorrectly due to a known "
2285                         "firmware bug, circa July, 2001.)\n");
2286                 break;
2287         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2288                 break;
2289         case CMD_DATA_OVERRUN:
2290                 hpsa_print_cmd(h, "overrun condition", cp);
2291                 break;
2292         case CMD_INVALID: {
2293                 /* controller unfortunately reports SCSI passthru's
2294                  * to non-existent targets as invalid commands.
2295                  */
2296                 hpsa_print_cmd(h, "invalid command", cp);
2297                 dev_warn(d, "probably means device no longer present\n");
2298                 }
2299                 break;
2300         case CMD_PROTOCOL_ERR:
2301                 hpsa_print_cmd(h, "protocol error", cp);
2302                 break;
2303         case CMD_HARDWARE_ERR:
2304                 hpsa_print_cmd(h, "hardware error", cp);
2305                 break;
2306         case CMD_CONNECTION_LOST:
2307                 hpsa_print_cmd(h, "connection lost", cp);
2308                 break;
2309         case CMD_ABORTED:
2310                 hpsa_print_cmd(h, "aborted", cp);
2311                 break;
2312         case CMD_ABORT_FAILED:
2313                 hpsa_print_cmd(h, "abort failed", cp);
2314                 break;
2315         case CMD_UNSOLICITED_ABORT:
2316                 hpsa_print_cmd(h, "unsolicited abort", cp);
2317                 break;
2318         case CMD_TIMEOUT:
2319                 hpsa_print_cmd(h, "timed out", cp);
2320                 break;
2321         case CMD_UNABORTABLE:
2322                 hpsa_print_cmd(h, "unabortable", cp);
2323                 break;
2324         case CMD_CTLR_LOCKUP:
2325                 hpsa_print_cmd(h, "controller lockup detected", cp);
2326                 break;
2327         default:
2328                 hpsa_print_cmd(h, "unknown status", cp);
2329                 dev_warn(d, "Unknown command status %x\n",
2330                                 ei->CommandStatus);
2331         }
2332 }
2333
2334 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2335                         u16 page, unsigned char *buf,
2336                         unsigned char bufsize)
2337 {
2338         int rc = IO_OK;
2339         struct CommandList *c;
2340         struct ErrorInfo *ei;
2341
2342         c = cmd_alloc(h);
2343
2344         if (c == NULL) {
2345                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2346                 return -ENOMEM;
2347         }
2348
2349         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2350                         page, scsi3addr, TYPE_CMD)) {
2351                 rc = -1;
2352                 goto out;
2353         }
2354         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2355                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2356         if (rc)
2357                 goto out;
2358         ei = c->err_info;
2359         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2360                 hpsa_scsi_interpret_error(h, c);
2361                 rc = -1;
2362         }
2363 out:
2364         cmd_free(h, c);
2365         return rc;
2366 }
2367
2368 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2369                 unsigned char *scsi3addr, unsigned char page,
2370                 struct bmic_controller_parameters *buf, size_t bufsize)
2371 {
2372         int rc = IO_OK;
2373         struct CommandList *c;
2374         struct ErrorInfo *ei;
2375
2376         c = cmd_alloc(h);
2377         if (c == NULL) {                        /* trouble... */
2378                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2379                 return -ENOMEM;
2380         }
2381
2382         if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2383                         page, scsi3addr, TYPE_CMD)) {
2384                 rc = -1;
2385                 goto out;
2386         }
2387         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2388                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2389         if (rc)
2390                 goto out;
2391         ei = c->err_info;
2392         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2393                 hpsa_scsi_interpret_error(h, c);
2394                 rc = -1;
2395         }
2396 out:
2397         cmd_free(h, c);
2398         return rc;
2399         }
2400
2401 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2402         u8 reset_type, int reply_queue)
2403 {
2404         int rc = IO_OK;
2405         struct CommandList *c;
2406         struct ErrorInfo *ei;
2407
2408         c = cmd_alloc(h);
2409
2410         if (c == NULL) {                        /* trouble... */
2411                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2412                 return -ENOMEM;
2413         }
2414
2415         /* fill_cmd can't fail here, no data buffer to map. */
2416         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2417                         scsi3addr, TYPE_MSG);
2418         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2419         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2420         if (rc) {
2421                 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2422                 goto out;
2423         }
2424         /* no unmap needed here because no data xfer. */
2425
2426         ei = c->err_info;
2427         if (ei->CommandStatus != 0) {
2428                 hpsa_scsi_interpret_error(h, c);
2429                 rc = -1;
2430         }
2431 out:
2432         cmd_free(h, c);
2433         return rc;
2434 }
2435
2436 static void hpsa_get_raid_level(struct ctlr_info *h,
2437         unsigned char *scsi3addr, unsigned char *raid_level)
2438 {
2439         int rc;
2440         unsigned char *buf;
2441
2442         *raid_level = RAID_UNKNOWN;
2443         buf = kzalloc(64, GFP_KERNEL);
2444         if (!buf)
2445                 return;
2446         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2447         if (rc == 0)
2448                 *raid_level = buf[8];
2449         if (*raid_level > RAID_UNKNOWN)
2450                 *raid_level = RAID_UNKNOWN;
2451         kfree(buf);
2452         return;
2453 }
2454
2455 #define HPSA_MAP_DEBUG
2456 #ifdef HPSA_MAP_DEBUG
2457 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2458                                 struct raid_map_data *map_buff)
2459 {
2460         struct raid_map_disk_data *dd = &map_buff->data[0];
2461         int map, row, col;
2462         u16 map_cnt, row_cnt, disks_per_row;
2463
2464         if (rc != 0)
2465                 return;
2466
2467         /* Show details only if debugging has been activated. */
2468         if (h->raid_offload_debug < 2)
2469                 return;
2470
2471         dev_info(&h->pdev->dev, "structure_size = %u\n",
2472                                 le32_to_cpu(map_buff->structure_size));
2473         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2474                         le32_to_cpu(map_buff->volume_blk_size));
2475         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2476                         le64_to_cpu(map_buff->volume_blk_cnt));
2477         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2478                         map_buff->phys_blk_shift);
2479         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2480                         map_buff->parity_rotation_shift);
2481         dev_info(&h->pdev->dev, "strip_size = %u\n",
2482                         le16_to_cpu(map_buff->strip_size));
2483         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2484                         le64_to_cpu(map_buff->disk_starting_blk));
2485         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2486                         le64_to_cpu(map_buff->disk_blk_cnt));
2487         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2488                         le16_to_cpu(map_buff->data_disks_per_row));
2489         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2490                         le16_to_cpu(map_buff->metadata_disks_per_row));
2491         dev_info(&h->pdev->dev, "row_cnt = %u\n",
2492                         le16_to_cpu(map_buff->row_cnt));
2493         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2494                         le16_to_cpu(map_buff->layout_map_count));
2495         dev_info(&h->pdev->dev, "flags = 0x%x\n",
2496                         le16_to_cpu(map_buff->flags));
2497         dev_info(&h->pdev->dev, "encrypytion = %s\n",
2498                         le16_to_cpu(map_buff->flags) &
2499                         RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
2500         dev_info(&h->pdev->dev, "dekindex = %u\n",
2501                         le16_to_cpu(map_buff->dekindex));
2502         map_cnt = le16_to_cpu(map_buff->layout_map_count);
2503         for (map = 0; map < map_cnt; map++) {
2504                 dev_info(&h->pdev->dev, "Map%u:\n", map);
2505                 row_cnt = le16_to_cpu(map_buff->row_cnt);
2506                 for (row = 0; row < row_cnt; row++) {
2507                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
2508                         disks_per_row =
2509                                 le16_to_cpu(map_buff->data_disks_per_row);
2510                         for (col = 0; col < disks_per_row; col++, dd++)
2511                                 dev_info(&h->pdev->dev,
2512                                         "    D%02u: h=0x%04x xor=%u,%u\n",
2513                                         col, dd->ioaccel_handle,
2514                                         dd->xor_mult[0], dd->xor_mult[1]);
2515                         disks_per_row =
2516                                 le16_to_cpu(map_buff->metadata_disks_per_row);
2517                         for (col = 0; col < disks_per_row; col++, dd++)
2518                                 dev_info(&h->pdev->dev,
2519                                         "    M%02u: h=0x%04x xor=%u,%u\n",
2520                                         col, dd->ioaccel_handle,
2521                                         dd->xor_mult[0], dd->xor_mult[1]);
2522                 }
2523         }
2524 }
2525 #else
2526 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2527                         __attribute__((unused)) int rc,
2528                         __attribute__((unused)) struct raid_map_data *map_buff)
2529 {
2530 }
2531 #endif
2532
2533 static int hpsa_get_raid_map(struct ctlr_info *h,
2534         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2535 {
2536         int rc = 0;
2537         struct CommandList *c;
2538         struct ErrorInfo *ei;
2539
2540         c = cmd_alloc(h);
2541         if (c == NULL) {
2542                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2543                 return -ENOMEM;
2544         }
2545         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2546                         sizeof(this_device->raid_map), 0,
2547                         scsi3addr, TYPE_CMD)) {
2548                 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2549                 rc = -ENOMEM;
2550                 goto out;
2551         }
2552         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2553                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2554         if (rc)
2555                 goto out;
2556         ei = c->err_info;
2557         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2558                 hpsa_scsi_interpret_error(h, c);
2559                 rc = -1;
2560                 goto out;
2561         }
2562         cmd_free(h, c);
2563
2564         /* @todo in the future, dynamically allocate RAID map memory */
2565         if (le32_to_cpu(this_device->raid_map.structure_size) >
2566                                 sizeof(this_device->raid_map)) {
2567                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2568                 rc = -1;
2569         }
2570         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2571         return rc;
2572 out:
2573         cmd_free(h, c);
2574         return rc;
2575 }
2576
2577 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2578                 unsigned char scsi3addr[], u16 bmic_device_index,
2579                 struct bmic_identify_physical_device *buf, size_t bufsize)
2580 {
2581         int rc = IO_OK;
2582         struct CommandList *c;
2583         struct ErrorInfo *ei;
2584
2585         c = cmd_alloc(h);
2586         rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2587                 0, RAID_CTLR_LUNID, TYPE_CMD);
2588         if (rc)
2589                 goto out;
2590
2591         c->Request.CDB[2] = bmic_device_index & 0xff;
2592         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2593
2594         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2595                                                 NO_TIMEOUT);
2596         ei = c->err_info;
2597         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2598                 hpsa_scsi_interpret_error(h, c);
2599                 rc = -1;
2600         }
2601 out:
2602         cmd_free(h, c);
2603         return rc;
2604 }
2605
2606 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2607         unsigned char scsi3addr[], u8 page)
2608 {
2609         int rc;
2610         int i;
2611         int pages;
2612         unsigned char *buf, bufsize;
2613
2614         buf = kzalloc(256, GFP_KERNEL);
2615         if (!buf)
2616                 return 0;
2617
2618         /* Get the size of the page list first */
2619         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2620                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2621                                 buf, HPSA_VPD_HEADER_SZ);
2622         if (rc != 0)
2623                 goto exit_unsupported;
2624         pages = buf[3];
2625         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2626                 bufsize = pages + HPSA_VPD_HEADER_SZ;
2627         else
2628                 bufsize = 255;
2629
2630         /* Get the whole VPD page list */
2631         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2632                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2633                                 buf, bufsize);
2634         if (rc != 0)
2635                 goto exit_unsupported;
2636
2637         pages = buf[3];
2638         for (i = 1; i <= pages; i++)
2639                 if (buf[3 + i] == page)
2640                         goto exit_supported;
2641 exit_unsupported:
2642         kfree(buf);
2643         return 0;
2644 exit_supported:
2645         kfree(buf);
2646         return 1;
2647 }
2648
2649 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2650         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2651 {
2652         int rc;
2653         unsigned char *buf;
2654         u8 ioaccel_status;
2655
2656         this_device->offload_config = 0;
2657         this_device->offload_enabled = 0;
2658         this_device->offload_to_be_enabled = 0;
2659
2660         buf = kzalloc(64, GFP_KERNEL);
2661         if (!buf)
2662                 return;
2663         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2664                 goto out;
2665         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2666                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2667         if (rc != 0)
2668                 goto out;
2669
2670 #define IOACCEL_STATUS_BYTE 4
2671 #define OFFLOAD_CONFIGURED_BIT 0x01
2672 #define OFFLOAD_ENABLED_BIT 0x02
2673         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2674         this_device->offload_config =
2675                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2676         if (this_device->offload_config) {
2677                 this_device->offload_enabled =
2678                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2679                 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2680                         this_device->offload_enabled = 0;
2681         }
2682         this_device->offload_to_be_enabled = this_device->offload_enabled;
2683 out:
2684         kfree(buf);
2685         return;
2686 }
2687
2688 /* Get the device id from inquiry page 0x83 */
2689 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2690         unsigned char *device_id, int buflen)
2691 {
2692         int rc;
2693         unsigned char *buf;
2694
2695         if (buflen > 16)
2696                 buflen = 16;
2697         buf = kzalloc(64, GFP_KERNEL);
2698         if (!buf)
2699                 return -ENOMEM;
2700         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2701         if (rc == 0)
2702                 memcpy(device_id, &buf[8], buflen);
2703         kfree(buf);
2704         return rc != 0;
2705 }
2706
2707 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2708                 void *buf, int bufsize,
2709                 int extended_response)
2710 {
2711         int rc = IO_OK;
2712         struct CommandList *c;
2713         unsigned char scsi3addr[8];
2714         struct ErrorInfo *ei;
2715
2716         c = cmd_alloc(h);
2717         if (c == NULL) {                        /* trouble... */
2718                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2719                 return -1;
2720         }
2721         /* address the controller */
2722         memset(scsi3addr, 0, sizeof(scsi3addr));
2723         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2724                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2725                 rc = -1;
2726                 goto out;
2727         }
2728         if (extended_response)
2729                 c->Request.CDB[1] = extended_response;
2730         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2731                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2732         if (rc)
2733                 goto out;
2734         ei = c->err_info;
2735         if (ei->CommandStatus != 0 &&
2736             ei->CommandStatus != CMD_DATA_UNDERRUN) {
2737                 hpsa_scsi_interpret_error(h, c);
2738                 rc = -1;
2739         } else {
2740                 struct ReportLUNdata *rld = buf;
2741
2742                 if (rld->extended_response_flag != extended_response) {
2743                         dev_err(&h->pdev->dev,
2744                                 "report luns requested format %u, got %u\n",
2745                                 extended_response,
2746                                 rld->extended_response_flag);
2747                         rc = -1;
2748                 }
2749         }
2750 out:
2751         cmd_free(h, c);
2752         return rc;
2753 }
2754
2755 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2756                 struct ReportExtendedLUNdata *buf, int bufsize)
2757 {
2758         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2759                                                 HPSA_REPORT_PHYS_EXTENDED);
2760 }
2761
2762 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2763                 struct ReportLUNdata *buf, int bufsize)
2764 {
2765         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2766 }
2767
2768 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2769         int bus, int target, int lun)
2770 {
2771         device->bus = bus;
2772         device->target = target;
2773         device->lun = lun;
2774 }
2775
2776 /* Use VPD inquiry to get details of volume status */
2777 static int hpsa_get_volume_status(struct ctlr_info *h,
2778                                         unsigned char scsi3addr[])
2779 {
2780         int rc;
2781         int status;
2782         int size;
2783         unsigned char *buf;
2784
2785         buf = kzalloc(64, GFP_KERNEL);
2786         if (!buf)
2787                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2788
2789         /* Does controller have VPD for logical volume status? */
2790         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2791                 goto exit_failed;
2792
2793         /* Get the size of the VPD return buffer */
2794         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2795                                         buf, HPSA_VPD_HEADER_SZ);
2796         if (rc != 0)
2797                 goto exit_failed;
2798         size = buf[3];
2799
2800         /* Now get the whole VPD buffer */
2801         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2802                                         buf, size + HPSA_VPD_HEADER_SZ);
2803         if (rc != 0)
2804                 goto exit_failed;
2805         status = buf[4]; /* status byte */
2806
2807         kfree(buf);
2808         return status;
2809 exit_failed:
2810         kfree(buf);
2811         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2812 }
2813
2814 /* Determine offline status of a volume.
2815  * Return either:
2816  *  0 (not offline)
2817  *  0xff (offline for unknown reasons)
2818  *  # (integer code indicating one of several NOT READY states
2819  *     describing why a volume is to be kept offline)
2820  */
2821 static int hpsa_volume_offline(struct ctlr_info *h,
2822                                         unsigned char scsi3addr[])
2823 {
2824         struct CommandList *c;
2825         unsigned char *sense;
2826         u8 sense_key, asc, ascq;
2827         int sense_len;
2828         int rc, ldstat = 0;
2829         u16 cmd_status;
2830         u8 scsi_status;
2831 #define ASC_LUN_NOT_READY 0x04
2832 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2833 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2834
2835         c = cmd_alloc(h);
2836         if (!c)
2837                 return 0;
2838         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2839         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
2840         if (rc) {
2841                 cmd_free(h, c);
2842                 return 0;
2843         }
2844         sense = c->err_info->SenseInfo;
2845         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
2846                 sense_len = sizeof(c->err_info->SenseInfo);
2847         else
2848                 sense_len = c->err_info->SenseLen;
2849         decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
2850         cmd_status = c->err_info->CommandStatus;
2851         scsi_status = c->err_info->ScsiStatus;
2852         cmd_free(h, c);
2853         /* Is the volume 'not ready'? */
2854         if (cmd_status != CMD_TARGET_STATUS ||
2855                 scsi_status != SAM_STAT_CHECK_CONDITION ||
2856                 sense_key != NOT_READY ||
2857                 asc != ASC_LUN_NOT_READY)  {
2858                 return 0;
2859         }
2860
2861         /* Determine the reason for not ready state */
2862         ldstat = hpsa_get_volume_status(h, scsi3addr);
2863
2864         /* Keep volume offline in certain cases: */
2865         switch (ldstat) {
2866         case HPSA_LV_UNDERGOING_ERASE:
2867         case HPSA_LV_UNDERGOING_RPI:
2868         case HPSA_LV_PENDING_RPI:
2869         case HPSA_LV_ENCRYPTED_NO_KEY:
2870         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2871         case HPSA_LV_UNDERGOING_ENCRYPTION:
2872         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2873         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2874                 return ldstat;
2875         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2876                 /* If VPD status page isn't available,
2877                  * use ASC/ASCQ to determine state
2878                  */
2879                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2880                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2881                         return ldstat;
2882                 break;
2883         default:
2884                 break;
2885         }
2886         return 0;
2887 }
2888
2889 /*
2890  * Find out if a logical device supports aborts by simply trying one.
2891  * Smart Array may claim not to support aborts on logical drives, but
2892  * if a MSA2000 * is connected, the drives on that will be presented
2893  * by the Smart Array as logical drives, and aborts may be sent to
2894  * those devices successfully.  So the simplest way to find out is
2895  * to simply try an abort and see how the device responds.
2896  */
2897 static int hpsa_device_supports_aborts(struct ctlr_info *h,
2898                                         unsigned char *scsi3addr)
2899 {
2900         struct CommandList *c;
2901         struct ErrorInfo *ei;
2902         int rc = 0;
2903
2904         u64 tag = (u64) -1; /* bogus tag */
2905
2906         /* Assume that physical devices support aborts */
2907         if (!is_logical_dev_addr_mode(scsi3addr))
2908                 return 1;
2909
2910         c = cmd_alloc(h);
2911         if (!c)
2912                 return -ENOMEM;
2913         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
2914         (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
2915         /* no unmap needed here because no data xfer. */
2916         ei = c->err_info;
2917         switch (ei->CommandStatus) {
2918         case CMD_INVALID:
2919                 rc = 0;
2920                 break;
2921         case CMD_UNABORTABLE:
2922         case CMD_ABORT_FAILED:
2923                 rc = 1;
2924                 break;
2925         case CMD_TMF_STATUS:
2926                 rc = hpsa_evaluate_tmf_status(h, c);
2927                 break;
2928         default:
2929                 rc = 0;
2930                 break;
2931         }
2932         cmd_free(h, c);
2933         return rc;
2934 }
2935
2936 static int hpsa_update_device_info(struct ctlr_info *h,
2937         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2938         unsigned char *is_OBDR_device)
2939 {
2940
2941 #define OBDR_SIG_OFFSET 43
2942 #define OBDR_TAPE_SIG "$DR-10"
2943 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2944 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2945
2946         unsigned char *inq_buff;
2947         unsigned char *obdr_sig;
2948
2949         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2950         if (!inq_buff)
2951                 goto bail_out;
2952
2953         /* Do an inquiry to the device to see what it is. */
2954         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2955                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2956                 /* Inquiry failed (msg printed already) */
2957                 dev_err(&h->pdev->dev,
2958                         "hpsa_update_device_info: inquiry failed\n");
2959                 goto bail_out;
2960         }
2961
2962         this_device->devtype = (inq_buff[0] & 0x1f);
2963         memcpy(this_device->scsi3addr, scsi3addr, 8);
2964         memcpy(this_device->vendor, &inq_buff[8],
2965                 sizeof(this_device->vendor));
2966         memcpy(this_device->model, &inq_buff[16],
2967                 sizeof(this_device->model));
2968         memset(this_device->device_id, 0,
2969                 sizeof(this_device->device_id));
2970         hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2971                 sizeof(this_device->device_id));
2972
2973         if (this_device->devtype == TYPE_DISK &&
2974                 is_logical_dev_addr_mode(scsi3addr)) {
2975                 int volume_offline;
2976
2977                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2978                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2979                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2980                 volume_offline = hpsa_volume_offline(h, scsi3addr);
2981                 if (volume_offline < 0 || volume_offline > 0xff)
2982                         volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2983                 this_device->volume_offline = volume_offline & 0xff;
2984         } else {
2985                 this_device->raid_level = RAID_UNKNOWN;
2986                 this_device->offload_config = 0;
2987                 this_device->offload_enabled = 0;
2988                 this_device->offload_to_be_enabled = 0;
2989                 this_device->volume_offline = 0;
2990                 this_device->queue_depth = h->nr_cmds;
2991         }
2992
2993         if (is_OBDR_device) {
2994                 /* See if this is a One-Button-Disaster-Recovery device
2995                  * by looking for "$DR-10" at offset 43 in inquiry data.
2996                  */
2997                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2998                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2999                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
3000                                                 OBDR_SIG_LEN) == 0);
3001         }
3002         kfree(inq_buff);
3003         return 0;
3004
3005 bail_out:
3006         kfree(inq_buff);
3007         return 1;
3008 }
3009
3010 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3011                         struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3012 {
3013         unsigned long flags;
3014         int rc, entry;
3015         /*
3016          * See if this device supports aborts.  If we already know
3017          * the device, we already know if it supports aborts, otherwise
3018          * we have to find out if it supports aborts by trying one.
3019          */
3020         spin_lock_irqsave(&h->devlock, flags);
3021         rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3022         if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3023                 entry >= 0 && entry < h->ndevices) {
3024                 dev->supports_aborts = h->dev[entry]->supports_aborts;
3025                 spin_unlock_irqrestore(&h->devlock, flags);
3026         } else {
3027                 spin_unlock_irqrestore(&h->devlock, flags);
3028                 dev->supports_aborts =
3029                                 hpsa_device_supports_aborts(h, scsi3addr);
3030                 if (dev->supports_aborts < 0)
3031                         dev->supports_aborts = 0;
3032         }
3033 }
3034
3035 static unsigned char *ext_target_model[] = {
3036         "MSA2012",
3037         "MSA2024",
3038         "MSA2312",
3039         "MSA2324",
3040         "P2000 G3 SAS",
3041         "MSA 2040 SAS",
3042         NULL,
3043 };
3044
3045 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
3046 {
3047         int i;
3048
3049         for (i = 0; ext_target_model[i]; i++)
3050                 if (strncmp(device->model, ext_target_model[i],
3051                         strlen(ext_target_model[i])) == 0)
3052                         return 1;
3053         return 0;
3054 }
3055
3056 /* Helper function to assign bus, target, lun mapping of devices.
3057  * Puts non-external target logical volumes on bus 0, external target logical
3058  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3059  * Logical drive target and lun are assigned at this time, but
3060  * physical device lun and target assignment are deferred (assigned
3061  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3062  */
3063 static void figure_bus_target_lun(struct ctlr_info *h,
3064         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3065 {
3066         u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3067
3068         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3069                 /* physical device, target and lun filled in later */
3070                 if (is_hba_lunid(lunaddrbytes))
3071                         hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
3072                 else
3073                         /* defer target, lun assignment for physical devices */
3074                         hpsa_set_bus_target_lun(device, 2, -1, -1);
3075                 return;
3076         }
3077         /* It's a logical device */
3078         if (is_ext_target(h, device)) {
3079                 /* external target way, put logicals on bus 1
3080                  * and match target/lun numbers box
3081                  * reports, other smart array, bus 0, target 0, match lunid
3082                  */
3083                 hpsa_set_bus_target_lun(device,
3084                         1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3085                 return;
3086         }
3087         hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
3088 }
3089
3090 /*
3091  * If there is no lun 0 on a target, linux won't find any devices.
3092  * For the external targets (arrays), we have to manually detect the enclosure
3093  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3094  * it for some reason.  *tmpdevice is the target we're adding,
3095  * this_device is a pointer into the current element of currentsd[]
3096  * that we're building up in update_scsi_devices(), below.
3097  * lunzerobits is a bitmap that tracks which targets already have a
3098  * lun 0 assigned.
3099  * Returns 1 if an enclosure was added, 0 if not.
3100  */
3101 static int add_ext_target_dev(struct ctlr_info *h,
3102         struct hpsa_scsi_dev_t *tmpdevice,
3103         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
3104         unsigned long lunzerobits[], int *n_ext_target_devs)
3105 {
3106         unsigned char scsi3addr[8];
3107
3108         if (test_bit(tmpdevice->target, lunzerobits))
3109                 return 0; /* There is already a lun 0 on this target. */
3110
3111         if (!is_logical_dev_addr_mode(lunaddrbytes))
3112                 return 0; /* It's the logical targets that may lack lun 0. */
3113
3114         if (!is_ext_target(h, tmpdevice))
3115                 return 0; /* Only external target devices have this problem. */
3116
3117         if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
3118                 return 0;
3119
3120         memset(scsi3addr, 0, 8);
3121         scsi3addr[3] = tmpdevice->target;
3122         if (is_hba_lunid(scsi3addr))
3123                 return 0; /* Don't add the RAID controller here. */
3124
3125         if (is_scsi_rev_5(h))
3126                 return 0; /* p1210m doesn't need to do this. */
3127
3128         if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
3129                 dev_warn(&h->pdev->dev, "Maximum number of external "
3130                         "target devices exceeded.  Check your hardware "
3131                         "configuration.");
3132                 return 0;
3133         }
3134
3135         if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
3136                 return 0;
3137         (*n_ext_target_devs)++;
3138         hpsa_set_bus_target_lun(this_device,
3139                                 tmpdevice->bus, tmpdevice->target, 0);
3140         hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
3141         set_bit(tmpdevice->target, lunzerobits);
3142         return 1;
3143 }
3144
3145 /*
3146  * Get address of physical disk used for an ioaccel2 mode command:
3147  *      1. Extract ioaccel2 handle from the command.
3148  *      2. Find a matching ioaccel2 handle from list of physical disks.
3149  *      3. Return:
3150  *              1 and set scsi3addr to address of matching physical
3151  *              0 if no matching physical disk was found.
3152  */
3153 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3154         struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3155 {
3156         struct io_accel2_cmd *c2 =
3157                         &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3158         unsigned long flags;
3159         int i;
3160
3161         spin_lock_irqsave(&h->devlock, flags);
3162         for (i = 0; i < h->ndevices; i++)
3163                 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3164                         memcpy(scsi3addr, h->dev[i]->scsi3addr,
3165                                 sizeof(h->dev[i]->scsi3addr));
3166                         spin_unlock_irqrestore(&h->devlock, flags);
3167                         return 1;
3168                 }
3169         spin_unlock_irqrestore(&h->devlock, flags);
3170         return 0;
3171 }
3172
3173 /*
3174  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
3175  * logdev.  The number of luns in physdev and logdev are returned in
3176  * *nphysicals and *nlogicals, respectively.
3177  * Returns 0 on success, -1 otherwise.
3178  */
3179 static int hpsa_gather_lun_info(struct ctlr_info *h,
3180         struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3181         struct ReportLUNdata *logdev, u32 *nlogicals)
3182 {
3183         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3184                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3185                 return -1;
3186         }
3187         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3188         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3189                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3190                         HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3191                 *nphysicals = HPSA_MAX_PHYS_LUN;
3192         }
3193         if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3194                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3195                 return -1;
3196         }
3197         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3198         /* Reject Logicals in excess of our max capability. */
3199         if (*nlogicals > HPSA_MAX_LUN) {
3200                 dev_warn(&h->pdev->dev,
3201                         "maximum logical LUNs (%d) exceeded.  "
3202                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
3203                         *nlogicals - HPSA_MAX_LUN);
3204                         *nlogicals = HPSA_MAX_LUN;
3205         }
3206         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3207                 dev_warn(&h->pdev->dev,
3208                         "maximum logical + physical LUNs (%d) exceeded. "
3209                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3210                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3211                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3212         }
3213         return 0;
3214 }
3215
3216 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3217         int i, int nphysicals, int nlogicals,
3218         struct ReportExtendedLUNdata *physdev_list,
3219         struct ReportLUNdata *logdev_list)
3220 {
3221         /* Helper function, figure out where the LUN ID info is coming from
3222          * given index i, lists of physical and logical devices, where in
3223          * the list the raid controller is supposed to appear (first or last)
3224          */
3225
3226         int logicals_start = nphysicals + (raid_ctlr_position == 0);
3227         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3228
3229         if (i == raid_ctlr_position)
3230                 return RAID_CTLR_LUNID;
3231
3232         if (i < logicals_start)
3233                 return &physdev_list->LUN[i -
3234                                 (raid_ctlr_position == 0)].lunid[0];
3235
3236         if (i < last_device)
3237                 return &logdev_list->LUN[i - nphysicals -
3238                         (raid_ctlr_position == 0)][0];
3239         BUG();
3240         return NULL;
3241 }
3242
3243 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3244 {
3245         int rc;
3246         int hba_mode_enabled;
3247         struct bmic_controller_parameters *ctlr_params;
3248         ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3249                 GFP_KERNEL);
3250
3251         if (!ctlr_params)
3252                 return -ENOMEM;
3253         rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3254                 sizeof(struct bmic_controller_parameters));
3255         if (rc) {
3256                 kfree(ctlr_params);
3257                 return rc;
3258         }
3259
3260         hba_mode_enabled =
3261                 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3262         kfree(ctlr_params);
3263         return hba_mode_enabled;
3264 }
3265
3266 /* get physical drive ioaccel handle and queue depth */
3267 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3268                 struct hpsa_scsi_dev_t *dev,
3269                 u8 *lunaddrbytes,
3270                 struct bmic_identify_physical_device *id_phys)
3271 {
3272         int rc;
3273         struct ext_report_lun_entry *rle =
3274                 (struct ext_report_lun_entry *) lunaddrbytes;
3275
3276         dev->ioaccel_handle = rle->ioaccel_handle;
3277         memset(id_phys, 0, sizeof(*id_phys));
3278         rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3279                         GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3280                         sizeof(*id_phys));
3281         if (!rc)
3282                 /* Reserve space for FW operations */
3283 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3284 #define DRIVE_QUEUE_DEPTH 7
3285                 dev->queue_depth =
3286                         le16_to_cpu(id_phys->current_queue_depth_limit) -
3287                                 DRIVE_CMDS_RESERVED_FOR_FW;
3288         else
3289                 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3290         atomic_set(&dev->ioaccel_cmds_out, 0);
3291 }
3292
3293 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3294 {
3295         /* the idea here is we could get notified
3296          * that some devices have changed, so we do a report
3297          * physical luns and report logical luns cmd, and adjust
3298          * our list of devices accordingly.
3299          *
3300          * The scsi3addr's of devices won't change so long as the
3301          * adapter is not reset.  That means we can rescan and
3302          * tell which devices we already know about, vs. new
3303          * devices, vs.  disappearing devices.
3304          */
3305         struct ReportExtendedLUNdata *physdev_list = NULL;
3306         struct ReportLUNdata *logdev_list = NULL;
3307         struct bmic_identify_physical_device *id_phys = NULL;
3308         u32 nphysicals = 0;
3309         u32 nlogicals = 0;
3310         u32 ndev_allocated = 0;
3311         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3312         int ncurrent = 0;
3313         int i, n_ext_target_devs, ndevs_to_allocate;
3314         int raid_ctlr_position;
3315         int rescan_hba_mode;
3316         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3317
3318         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3319         physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3320         logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3321         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3322         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3323
3324         if (!currentsd || !physdev_list || !logdev_list ||
3325                 !tmpdevice || !id_phys) {
3326                 dev_err(&h->pdev->dev, "out of memory\n");
3327                 goto out;
3328         }
3329         memset(lunzerobits, 0, sizeof(lunzerobits));
3330
3331         rescan_hba_mode = hpsa_hba_mode_enabled(h);
3332         if (rescan_hba_mode < 0)
3333                 goto out;
3334
3335         if (!h->hba_mode_enabled && rescan_hba_mode)
3336                 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3337         else if (h->hba_mode_enabled && !rescan_hba_mode)
3338                 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3339
3340         h->hba_mode_enabled = rescan_hba_mode;
3341
3342         if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3343                         logdev_list, &nlogicals))
3344                 goto out;
3345
3346         /* We might see up to the maximum number of logical and physical disks
3347          * plus external target devices, and a device for the local RAID
3348          * controller.
3349          */
3350         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3351
3352         /* Allocate the per device structures */
3353         for (i = 0; i < ndevs_to_allocate; i++) {
3354                 if (i >= HPSA_MAX_DEVICES) {
3355                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3356                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
3357                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
3358                         break;
3359                 }
3360
3361                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3362                 if (!currentsd[i]) {
3363                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3364                                 __FILE__, __LINE__);
3365                         goto out;
3366                 }
3367                 ndev_allocated++;
3368         }
3369
3370         if (is_scsi_rev_5(h))
3371                 raid_ctlr_position = 0;
3372         else
3373                 raid_ctlr_position = nphysicals + nlogicals;
3374
3375         /* adjust our table of devices */
3376         n_ext_target_devs = 0;
3377         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3378                 u8 *lunaddrbytes, is_OBDR = 0;
3379
3380                 /* Figure out where the LUN ID info is coming from */
3381                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3382                         i, nphysicals, nlogicals, physdev_list, logdev_list);
3383
3384                 /* skip masked non-disk devices */
3385                 if (MASKED_DEVICE(lunaddrbytes))
3386                         if (i < nphysicals + (raid_ctlr_position == 0) &&
3387                                 NON_DISK_PHYS_DEV(lunaddrbytes))
3388                                 continue;
3389
3390                 /* Get device type, vendor, model, device id */
3391                 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3392                                                         &is_OBDR))
3393                         continue; /* skip it if we can't talk to it. */
3394                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3395                 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3396                 this_device = currentsd[ncurrent];
3397
3398                 /*
3399                  * For external target devices, we have to insert a LUN 0 which
3400                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3401                  * is nonetheless an enclosure device there.  We have to
3402                  * present that otherwise linux won't find anything if
3403                  * there is no lun 0.
3404                  */
3405                 if (add_ext_target_dev(h, tmpdevice, this_device,
3406                                 lunaddrbytes, lunzerobits,
3407                                 &n_ext_target_devs)) {
3408                         ncurrent++;
3409                         this_device = currentsd[ncurrent];
3410                 }
3411
3412                 *this_device = *tmpdevice;
3413
3414                 /* do not expose masked devices */
3415                 if (MASKED_DEVICE(lunaddrbytes) &&
3416                         i < nphysicals + (raid_ctlr_position == 0)) {
3417                         if (h->hba_mode_enabled)
3418                                 dev_warn(&h->pdev->dev,
3419                                         "Masked physical device detected\n");
3420                         this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3421                 } else {
3422                         this_device->expose_state =
3423                                         HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3424                 }
3425
3426                 switch (this_device->devtype) {
3427                 case TYPE_ROM:
3428                         /* We don't *really* support actual CD-ROM devices,
3429                          * just "One Button Disaster Recovery" tape drive
3430                          * which temporarily pretends to be a CD-ROM drive.
3431                          * So we check that the device is really an OBDR tape
3432                          * device by checking for "$DR-10" in bytes 43-48 of
3433                          * the inquiry data.
3434                          */
3435                         if (is_OBDR)
3436                                 ncurrent++;
3437                         break;
3438                 case TYPE_DISK:
3439                         if (h->hba_mode_enabled) {
3440                                 /* never use raid mapper in HBA mode */
3441                                 this_device->offload_enabled = 0;
3442                                 ncurrent++;
3443                                 break;
3444                         } else if (h->acciopath_status) {
3445                                 if (i >= nphysicals) {
3446                                         ncurrent++;
3447                                         break;
3448                                 }
3449                         } else {
3450                                 if (i < nphysicals)
3451                                         break;
3452                                 ncurrent++;
3453                                 break;
3454                         }
3455                         if (h->transMethod & CFGTBL_Trans_io_accel1 ||
3456                                 h->transMethod & CFGTBL_Trans_io_accel2) {
3457                                 hpsa_get_ioaccel_drive_info(h, this_device,
3458                                                         lunaddrbytes, id_phys);
3459                                 atomic_set(&this_device->ioaccel_cmds_out, 0);
3460                                 ncurrent++;
3461                         }
3462                         break;
3463                 case TYPE_TAPE:
3464                 case TYPE_MEDIUM_CHANGER:
3465                         ncurrent++;
3466                         break;
3467                 case TYPE_ENCLOSURE:
3468                         if (h->hba_mode_enabled)
3469                                 ncurrent++;
3470                         break;
3471                 case TYPE_RAID:
3472                         /* Only present the Smartarray HBA as a RAID controller.
3473                          * If it's a RAID controller other than the HBA itself
3474                          * (an external RAID controller, MSA500 or similar)
3475                          * don't present it.
3476                          */
3477                         if (!is_hba_lunid(lunaddrbytes))
3478                                 break;
3479                         ncurrent++;
3480                         break;
3481                 default:
3482                         break;
3483                 }
3484                 if (ncurrent >= HPSA_MAX_DEVICES)
3485                         break;
3486         }
3487         adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3488 out:
3489         kfree(tmpdevice);
3490         for (i = 0; i < ndev_allocated; i++)
3491                 kfree(currentsd[i]);
3492         kfree(currentsd);
3493         kfree(physdev_list);
3494         kfree(logdev_list);
3495         kfree(id_phys);
3496 }
3497
3498 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3499                                    struct scatterlist *sg)
3500 {
3501         u64 addr64 = (u64) sg_dma_address(sg);
3502         unsigned int len = sg_dma_len(sg);
3503
3504         desc->Addr = cpu_to_le64(addr64);
3505         desc->Len = cpu_to_le32(len);
3506         desc->Ext = 0;
3507 }
3508
3509 /*
3510  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3511  * dma mapping  and fills in the scatter gather entries of the
3512  * hpsa command, cp.
3513  */
3514 static int hpsa_scatter_gather(struct ctlr_info *h,
3515                 struct CommandList *cp,
3516                 struct scsi_cmnd *cmd)
3517 {
3518         struct scatterlist *sg;
3519         int use_sg, i, sg_index, chained;
3520         struct SGDescriptor *curr_sg;
3521
3522         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3523
3524         use_sg = scsi_dma_map(cmd);
3525         if (use_sg < 0)
3526                 return use_sg;
3527
3528         if (!use_sg)
3529                 goto sglist_finished;
3530
3531         curr_sg = cp->SG;
3532         chained = 0;
3533         sg_index = 0;
3534         scsi_for_each_sg(cmd, sg, use_sg, i) {
3535                 if (i == h->max_cmd_sg_entries - 1 &&
3536                         use_sg > h->max_cmd_sg_entries) {
3537                         chained = 1;
3538                         curr_sg = h->cmd_sg_list[cp->cmdindex];
3539                         sg_index = 0;
3540                 }
3541                 hpsa_set_sg_descriptor(curr_sg, sg);
3542                 curr_sg++;
3543         }
3544
3545         /* Back the pointer up to the last entry and mark it as "last". */
3546         (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3547
3548         if (use_sg + chained > h->maxSG)
3549                 h->maxSG = use_sg + chained;
3550
3551         if (chained) {
3552                 cp->Header.SGList = h->max_cmd_sg_entries;
3553                 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3554                 if (hpsa_map_sg_chain_block(h, cp)) {
3555                         scsi_dma_unmap(cmd);
3556                         return -1;
3557                 }
3558                 return 0;
3559         }
3560
3561 sglist_finished:
3562
3563         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
3564         cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3565         return 0;
3566 }
3567
3568 #define IO_ACCEL_INELIGIBLE (1)
3569 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3570 {
3571         int is_write = 0;
3572         u32 block;
3573         u32 block_cnt;
3574
3575         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3576         switch (cdb[0]) {
3577         case WRITE_6:
3578         case WRITE_12:
3579                 is_write = 1;
3580         case READ_6:
3581         case READ_12:
3582                 if (*cdb_len == 6) {
3583                         block = (((u32) cdb[2]) << 8) | cdb[3];
3584                         block_cnt = cdb[4];
3585                 } else {
3586                         BUG_ON(*cdb_len != 12);
3587                         block = (((u32) cdb[2]) << 24) |
3588                                 (((u32) cdb[3]) << 16) |
3589                                 (((u32) cdb[4]) << 8) |
3590                                 cdb[5];
3591                         block_cnt =
3592                                 (((u32) cdb[6]) << 24) |
3593                                 (((u32) cdb[7]) << 16) |
3594                                 (((u32) cdb[8]) << 8) |
3595                                 cdb[9];
3596                 }
3597                 if (block_cnt > 0xffff)
3598                         return IO_ACCEL_INELIGIBLE;
3599
3600                 cdb[0] = is_write ? WRITE_10 : READ_10;
3601                 cdb[1] = 0;
3602                 cdb[2] = (u8) (block >> 24);
3603                 cdb[3] = (u8) (block >> 16);
3604                 cdb[4] = (u8) (block >> 8);
3605                 cdb[5] = (u8) (block);
3606                 cdb[6] = 0;
3607                 cdb[7] = (u8) (block_cnt >> 8);
3608                 cdb[8] = (u8) (block_cnt);
3609                 cdb[9] = 0;
3610                 *cdb_len = 10;
3611                 break;
3612         }
3613         return 0;
3614 }
3615
3616 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3617         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3618         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3619 {
3620         struct scsi_cmnd *cmd = c->scsi_cmd;
3621         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3622         unsigned int len;
3623         unsigned int total_len = 0;
3624         struct scatterlist *sg;
3625         u64 addr64;
3626         int use_sg, i;
3627         struct SGDescriptor *curr_sg;
3628         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3629
3630         /* TODO: implement chaining support */
3631         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3632                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3633                 return IO_ACCEL_INELIGIBLE;
3634         }
3635
3636         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3637
3638         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3639                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3640                 return IO_ACCEL_INELIGIBLE;
3641         }
3642
3643         c->cmd_type = CMD_IOACCEL1;
3644
3645         /* Adjust the DMA address to point to the accelerated command buffer */
3646         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3647                                 (c->cmdindex * sizeof(*cp));
3648         BUG_ON(c->busaddr & 0x0000007F);
3649
3650         use_sg = scsi_dma_map(cmd);
3651         if (use_sg < 0) {
3652                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3653                 return use_sg;
3654         }
3655
3656         if (use_sg) {
3657                 curr_sg = cp->SG;
3658                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3659                         addr64 = (u64) sg_dma_address(sg);
3660                         len  = sg_dma_len(sg);
3661                         total_len += len;
3662                         curr_sg->Addr = cpu_to_le64(addr64);
3663                         curr_sg->Len = cpu_to_le32(len);
3664                         curr_sg->Ext = cpu_to_le32(0);
3665                         curr_sg++;
3666                 }
3667                 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3668
3669                 switch (cmd->sc_data_direction) {
3670                 case DMA_TO_DEVICE:
3671                         control |= IOACCEL1_CONTROL_DATA_OUT;
3672                         break;
3673                 case DMA_FROM_DEVICE:
3674                         control |= IOACCEL1_CONTROL_DATA_IN;
3675                         break;
3676                 case DMA_NONE:
3677                         control |= IOACCEL1_CONTROL_NODATAXFER;
3678                         break;
3679                 default:
3680                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3681                         cmd->sc_data_direction);
3682                         BUG();
3683                         break;
3684                 }
3685         } else {
3686                 control |= IOACCEL1_CONTROL_NODATAXFER;
3687         }
3688
3689         c->Header.SGList = use_sg;
3690         /* Fill out the command structure to submit */
3691         cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3692         cp->transfer_len = cpu_to_le32(total_len);
3693         cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3694                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3695         cp->control = cpu_to_le32(control);
3696         memcpy(cp->CDB, cdb, cdb_len);
3697         memcpy(cp->CISS_LUN, scsi3addr, 8);
3698         /* Tag was already set at init time. */
3699         enqueue_cmd_and_start_io(h, c);
3700         return 0;
3701 }
3702
3703 /*
3704  * Queue a command directly to a device behind the controller using the
3705  * I/O accelerator path.
3706  */
3707 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3708         struct CommandList *c)
3709 {
3710         struct scsi_cmnd *cmd = c->scsi_cmd;
3711         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3712
3713         c->phys_disk = dev;
3714
3715         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3716                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3717 }
3718
3719 /*
3720  * Set encryption parameters for the ioaccel2 request
3721  */
3722 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3723         struct CommandList *c, struct io_accel2_cmd *cp)
3724 {
3725         struct scsi_cmnd *cmd = c->scsi_cmd;
3726         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3727         struct raid_map_data *map = &dev->raid_map;
3728         u64 first_block;
3729
3730         /* Are we doing encryption on this device */
3731         if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3732                 return;
3733         /* Set the data encryption key index. */
3734         cp->dekindex = map->dekindex;
3735
3736         /* Set the encryption enable flag, encoded into direction field. */
3737         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3738
3739         /* Set encryption tweak values based on logical block address
3740          * If block size is 512, tweak value is LBA.
3741          * For other block sizes, tweak is (LBA * block size)/ 512)
3742          */
3743         switch (cmd->cmnd[0]) {
3744         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3745         case WRITE_6:
3746         case READ_6:
3747                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3748                 break;
3749         case WRITE_10:
3750         case READ_10:
3751         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3752         case WRITE_12:
3753         case READ_12:
3754                 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3755                 break;
3756         case WRITE_16:
3757         case READ_16:
3758                 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3759                 break;
3760         default:
3761                 dev_err(&h->pdev->dev,
3762                         "ERROR: %s: size (0x%x) not supported for encryption\n",
3763                         __func__, cmd->cmnd[0]);
3764                 BUG();
3765                 break;
3766         }
3767
3768         if (le32_to_cpu(map->volume_blk_size) != 512)
3769                 first_block = first_block *
3770                                 le32_to_cpu(map->volume_blk_size)/512;
3771
3772         cp->tweak_lower = cpu_to_le32(first_block);
3773         cp->tweak_upper = cpu_to_le32(first_block >> 32);
3774 }
3775
3776 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3777         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3778         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3779 {
3780         struct scsi_cmnd *cmd = c->scsi_cmd;
3781         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3782         struct ioaccel2_sg_element *curr_sg;
3783         int use_sg, i;
3784         struct scatterlist *sg;
3785         u64 addr64;
3786         u32 len;
3787         u32 total_len = 0;
3788
3789         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3790                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3791                 return IO_ACCEL_INELIGIBLE;
3792         }
3793
3794         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3795                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3796                 return IO_ACCEL_INELIGIBLE;
3797         }
3798
3799         c->cmd_type = CMD_IOACCEL2;
3800         /* Adjust the DMA address to point to the accelerated command buffer */
3801         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3802                                 (c->cmdindex * sizeof(*cp));
3803         BUG_ON(c->busaddr & 0x0000007F);
3804
3805         memset(cp, 0, sizeof(*cp));
3806         cp->IU_type = IOACCEL2_IU_TYPE;
3807
3808         use_sg = scsi_dma_map(cmd);
3809         if (use_sg < 0) {
3810                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3811                 return use_sg;
3812         }
3813
3814         if (use_sg) {
3815                 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3816                 curr_sg = cp->sg;
3817                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3818                         addr64 = (u64) sg_dma_address(sg);
3819                         len  = sg_dma_len(sg);
3820                         total_len += len;
3821                         curr_sg->address = cpu_to_le64(addr64);
3822                         curr_sg->length = cpu_to_le32(len);
3823                         curr_sg->reserved[0] = 0;
3824                         curr_sg->reserved[1] = 0;
3825                         curr_sg->reserved[2] = 0;
3826                         curr_sg->chain_indicator = 0;
3827                         curr_sg++;
3828                 }
3829
3830                 switch (cmd->sc_data_direction) {
3831                 case DMA_TO_DEVICE:
3832                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3833                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
3834                         break;
3835                 case DMA_FROM_DEVICE:
3836                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3837                         cp->direction |= IOACCEL2_DIR_DATA_IN;
3838                         break;
3839                 case DMA_NONE:
3840                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3841                         cp->direction |= IOACCEL2_DIR_NO_DATA;
3842                         break;
3843                 default:
3844                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3845                                 cmd->sc_data_direction);
3846                         BUG();
3847                         break;
3848                 }
3849         } else {
3850                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3851                 cp->direction |= IOACCEL2_DIR_NO_DATA;
3852         }
3853
3854         /* Set encryption parameters, if necessary */
3855         set_encrypt_ioaccel2(h, c, cp);
3856
3857         cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3858         cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3859         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3860
3861         /* fill in sg elements */
3862         cp->sg_count = (u8) use_sg;
3863
3864         cp->data_len = cpu_to_le32(total_len);
3865         cp->err_ptr = cpu_to_le64(c->busaddr +
3866                         offsetof(struct io_accel2_cmd, error_data));
3867         cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3868
3869         enqueue_cmd_and_start_io(h, c);
3870         return 0;
3871 }
3872
3873 /*
3874  * Queue a command to the correct I/O accelerator path.
3875  */
3876 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3877         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3878         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3879 {
3880         /* Try to honor the device's queue depth */
3881         if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3882                                         phys_disk->queue_depth) {
3883                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3884                 return IO_ACCEL_INELIGIBLE;
3885         }
3886         if (h->transMethod & CFGTBL_Trans_io_accel1)
3887                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3888                                                 cdb, cdb_len, scsi3addr,
3889                                                 phys_disk);
3890         else
3891                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3892                                                 cdb, cdb_len, scsi3addr,
3893                                                 phys_disk);
3894 }
3895
3896 static void raid_map_helper(struct raid_map_data *map,
3897                 int offload_to_mirror, u32 *map_index, u32 *current_group)
3898 {
3899         if (offload_to_mirror == 0)  {
3900                 /* use physical disk in the first mirrored group. */
3901                 *map_index %= le16_to_cpu(map->data_disks_per_row);
3902                 return;
3903         }
3904         do {
3905                 /* determine mirror group that *map_index indicates */
3906                 *current_group = *map_index /
3907                         le16_to_cpu(map->data_disks_per_row);
3908                 if (offload_to_mirror == *current_group)
3909                         continue;
3910                 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
3911                         /* select map index from next group */
3912                         *map_index += le16_to_cpu(map->data_disks_per_row);
3913                         (*current_group)++;
3914                 } else {
3915                         /* select map index from first group */
3916                         *map_index %= le16_to_cpu(map->data_disks_per_row);
3917                         *current_group = 0;
3918                 }
3919         } while (offload_to_mirror != *current_group);
3920 }
3921
3922 /*
3923  * Attempt to perform offload RAID mapping for a logical volume I/O.
3924  */
3925 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3926         struct CommandList *c)
3927 {
3928         struct scsi_cmnd *cmd = c->scsi_cmd;
3929         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3930         struct raid_map_data *map = &dev->raid_map;
3931         struct raid_map_disk_data *dd = &map->data[0];
3932         int is_write = 0;
3933         u32 map_index;
3934         u64 first_block, last_block;
3935         u32 block_cnt;
3936         u32 blocks_per_row;
3937         u64 first_row, last_row;
3938         u32 first_row_offset, last_row_offset;
3939         u32 first_column, last_column;
3940         u64 r0_first_row, r0_last_row;
3941         u32 r5or6_blocks_per_row;
3942         u64 r5or6_first_row, r5or6_last_row;
3943         u32 r5or6_first_row_offset, r5or6_last_row_offset;
3944         u32 r5or6_first_column, r5or6_last_column;
3945         u32 total_disks_per_row;
3946         u32 stripesize;
3947         u32 first_group, last_group, current_group;
3948         u32 map_row;
3949         u32 disk_handle;
3950         u64 disk_block;
3951         u32 disk_block_cnt;
3952         u8 cdb[16];
3953         u8 cdb_len;
3954         u16 strip_size;
3955 #if BITS_PER_LONG == 32
3956         u64 tmpdiv;
3957 #endif
3958         int offload_to_mirror;
3959
3960         /* check for valid opcode, get LBA and block count */
3961         switch (cmd->cmnd[0]) {
3962         case WRITE_6:
3963                 is_write = 1;
3964         case READ_6:
3965                 first_block =
3966                         (((u64) cmd->cmnd[2]) << 8) |
3967                         cmd->cmnd[3];
3968                 block_cnt = cmd->cmnd[4];
3969                 if (block_cnt == 0)
3970                         block_cnt = 256;
3971                 break;
3972         case WRITE_10:
3973                 is_write = 1;
3974         case READ_10:
3975                 first_block =
3976                         (((u64) cmd->cmnd[2]) << 24) |
3977                         (((u64) cmd->cmnd[3]) << 16) |
3978                         (((u64) cmd->cmnd[4]) << 8) |
3979                         cmd->cmnd[5];
3980                 block_cnt =
3981                         (((u32) cmd->cmnd[7]) << 8) |
3982                         cmd->cmnd[8];
3983                 break;
3984         case WRITE_12:
3985                 is_write = 1;
3986         case READ_12:
3987                 first_block =
3988                         (((u64) cmd->cmnd[2]) << 24) |
3989                         (((u64) cmd->cmnd[3]) << 16) |
3990                         (((u64) cmd->cmnd[4]) << 8) |
3991                         cmd->cmnd[5];
3992                 block_cnt =
3993                         (((u32) cmd->cmnd[6]) << 24) |
3994                         (((u32) cmd->cmnd[7]) << 16) |
3995                         (((u32) cmd->cmnd[8]) << 8) |
3996                 cmd->cmnd[9];
3997                 break;
3998         case WRITE_16:
3999                 is_write = 1;
4000         case READ_16:
4001                 first_block =
4002                         (((u64) cmd->cmnd[2]) << 56) |
4003                         (((u64) cmd->cmnd[3]) << 48) |
4004                         (((u64) cmd->cmnd[4]) << 40) |
4005                         (((u64) cmd->cmnd[5]) << 32) |
4006                         (((u64) cmd->cmnd[6]) << 24) |
4007                         (((u64) cmd->cmnd[7]) << 16) |
4008                         (((u64) cmd->cmnd[8]) << 8) |
4009                         cmd->cmnd[9];
4010                 block_cnt =
4011                         (((u32) cmd->cmnd[10]) << 24) |
4012                         (((u32) cmd->cmnd[11]) << 16) |
4013                         (((u32) cmd->cmnd[12]) << 8) |
4014                         cmd->cmnd[13];
4015                 break;
4016         default:
4017                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4018         }
4019         last_block = first_block + block_cnt - 1;
4020
4021         /* check for write to non-RAID-0 */
4022         if (is_write && dev->raid_level != 0)
4023                 return IO_ACCEL_INELIGIBLE;
4024
4025         /* check for invalid block or wraparound */
4026         if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4027                 last_block < first_block)
4028                 return IO_ACCEL_INELIGIBLE;
4029
4030         /* calculate stripe information for the request */
4031         blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4032                                 le16_to_cpu(map->strip_size);
4033         strip_size = le16_to_cpu(map->strip_size);
4034 #if BITS_PER_LONG == 32
4035         tmpdiv = first_block;
4036         (void) do_div(tmpdiv, blocks_per_row);
4037         first_row = tmpdiv;
4038         tmpdiv = last_block;
4039         (void) do_div(tmpdiv, blocks_per_row);
4040         last_row = tmpdiv;
4041         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4042         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4043         tmpdiv = first_row_offset;
4044         (void) do_div(tmpdiv, strip_size);
4045         first_column = tmpdiv;
4046         tmpdiv = last_row_offset;
4047         (void) do_div(tmpdiv, strip_size);
4048         last_column = tmpdiv;
4049 #else
4050         first_row = first_block / blocks_per_row;
4051         last_row = last_block / blocks_per_row;
4052         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4053         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4054         first_column = first_row_offset / strip_size;
4055         last_column = last_row_offset / strip_size;
4056 #endif
4057
4058         /* if this isn't a single row/column then give to the controller */
4059         if ((first_row != last_row) || (first_column != last_column))
4060                 return IO_ACCEL_INELIGIBLE;
4061
4062         /* proceeding with driver mapping */
4063         total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4064                                 le16_to_cpu(map->metadata_disks_per_row);
4065         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4066                                 le16_to_cpu(map->row_cnt);
4067         map_index = (map_row * total_disks_per_row) + first_column;
4068
4069         switch (dev->raid_level) {
4070         case HPSA_RAID_0:
4071                 break; /* nothing special to do */
4072         case HPSA_RAID_1:
4073                 /* Handles load balance across RAID 1 members.
4074                  * (2-drive R1 and R10 with even # of drives.)
4075                  * Appropriate for SSDs, not optimal for HDDs
4076                  */
4077                 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4078                 if (dev->offload_to_mirror)
4079                         map_index += le16_to_cpu(map->data_disks_per_row);
4080                 dev->offload_to_mirror = !dev->offload_to_mirror;
4081                 break;
4082         case HPSA_RAID_ADM:
4083                 /* Handles N-way mirrors  (R1-ADM)
4084                  * and R10 with # of drives divisible by 3.)
4085                  */
4086                 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4087
4088                 offload_to_mirror = dev->offload_to_mirror;
4089                 raid_map_helper(map, offload_to_mirror,
4090                                 &map_index, &current_group);
4091                 /* set mirror group to use next time */
4092                 offload_to_mirror =
4093                         (offload_to_mirror >=
4094                         le16_to_cpu(map->layout_map_count) - 1)
4095                         ? 0 : offload_to_mirror + 1;
4096                 dev->offload_to_mirror = offload_to_mirror;
4097                 /* Avoid direct use of dev->offload_to_mirror within this
4098                  * function since multiple threads might simultaneously
4099                  * increment it beyond the range of dev->layout_map_count -1.
4100                  */
4101                 break;
4102         case HPSA_RAID_5:
4103         case HPSA_RAID_6:
4104                 if (le16_to_cpu(map->layout_map_count) <= 1)
4105                         break;
4106
4107                 /* Verify first and last block are in same RAID group */
4108                 r5or6_blocks_per_row =
4109                         le16_to_cpu(map->strip_size) *
4110                         le16_to_cpu(map->data_disks_per_row);
4111                 BUG_ON(r5or6_blocks_per_row == 0);
4112                 stripesize = r5or6_blocks_per_row *
4113                         le16_to_cpu(map->layout_map_count);
4114 #if BITS_PER_LONG == 32
4115                 tmpdiv = first_block;
4116                 first_group = do_div(tmpdiv, stripesize);
4117                 tmpdiv = first_group;
4118                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4119                 first_group = tmpdiv;
4120                 tmpdiv = last_block;
4121                 last_group = do_div(tmpdiv, stripesize);
4122                 tmpdiv = last_group;
4123                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4124                 last_group = tmpdiv;
4125 #else
4126                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4127                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4128 #endif
4129                 if (first_group != last_group)
4130                         return IO_ACCEL_INELIGIBLE;
4131
4132                 /* Verify request is in a single row of RAID 5/6 */
4133 #if BITS_PER_LONG == 32
4134                 tmpdiv = first_block;
4135                 (void) do_div(tmpdiv, stripesize);
4136                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4137                 tmpdiv = last_block;
4138                 (void) do_div(tmpdiv, stripesize);
4139                 r5or6_last_row = r0_last_row = tmpdiv;
4140 #else
4141                 first_row = r5or6_first_row = r0_first_row =
4142                                                 first_block / stripesize;
4143                 r5or6_last_row = r0_last_row = last_block / stripesize;
4144 #endif
4145                 if (r5or6_first_row != r5or6_last_row)
4146                         return IO_ACCEL_INELIGIBLE;
4147
4148
4149                 /* Verify request is in a single column */
4150 #if BITS_PER_LONG == 32
4151                 tmpdiv = first_block;
4152                 first_row_offset = do_div(tmpdiv, stripesize);
4153                 tmpdiv = first_row_offset;
4154                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4155                 r5or6_first_row_offset = first_row_offset;
4156                 tmpdiv = last_block;
4157                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4158                 tmpdiv = r5or6_last_row_offset;
4159                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4160                 tmpdiv = r5or6_first_row_offset;
4161                 (void) do_div(tmpdiv, map->strip_size);
4162                 first_column = r5or6_first_column = tmpdiv;
4163                 tmpdiv = r5or6_last_row_offset;
4164                 (void) do_div(tmpdiv, map->strip_size);
4165                 r5or6_last_column = tmpdiv;
4166 #else
4167                 first_row_offset = r5or6_first_row_offset =
4168                         (u32)((first_block % stripesize) %
4169                                                 r5or6_blocks_per_row);
4170
4171                 r5or6_last_row_offset =
4172                         (u32)((last_block % stripesize) %
4173                                                 r5or6_blocks_per_row);
4174
4175                 first_column = r5or6_first_column =
4176                         r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4177                 r5or6_last_column =
4178                         r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4179 #endif
4180                 if (r5or6_first_column != r5or6_last_column)
4181                         return IO_ACCEL_INELIGIBLE;
4182
4183                 /* Request is eligible */
4184                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4185                         le16_to_cpu(map->row_cnt);
4186
4187                 map_index = (first_group *
4188                         (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4189                         (map_row * total_disks_per_row) + first_column;
4190                 break;
4191         default:
4192                 return IO_ACCEL_INELIGIBLE;
4193         }
4194
4195         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4196                 return IO_ACCEL_INELIGIBLE;
4197
4198         c->phys_disk = dev->phys_disk[map_index];
4199
4200         disk_handle = dd[map_index].ioaccel_handle;
4201         disk_block = le64_to_cpu(map->disk_starting_blk) +
4202                         first_row * le16_to_cpu(map->strip_size) +
4203                         (first_row_offset - first_column *
4204                         le16_to_cpu(map->strip_size));
4205         disk_block_cnt = block_cnt;
4206
4207         /* handle differing logical/physical block sizes */
4208         if (map->phys_blk_shift) {
4209                 disk_block <<= map->phys_blk_shift;
4210                 disk_block_cnt <<= map->phys_blk_shift;
4211         }
4212         BUG_ON(disk_block_cnt > 0xffff);
4213
4214         /* build the new CDB for the physical disk I/O */
4215         if (disk_block > 0xffffffff) {
4216                 cdb[0] = is_write ? WRITE_16 : READ_16;
4217                 cdb[1] = 0;
4218                 cdb[2] = (u8) (disk_block >> 56);
4219                 cdb[3] = (u8) (disk_block >> 48);
4220                 cdb[4] = (u8) (disk_block >> 40);
4221                 cdb[5] = (u8) (disk_block >> 32);
4222                 cdb[6] = (u8) (disk_block >> 24);
4223                 cdb[7] = (u8) (disk_block >> 16);
4224                 cdb[8] = (u8) (disk_block >> 8);
4225                 cdb[9] = (u8) (disk_block);
4226                 cdb[10] = (u8) (disk_block_cnt >> 24);
4227                 cdb[11] = (u8) (disk_block_cnt >> 16);
4228                 cdb[12] = (u8) (disk_block_cnt >> 8);
4229                 cdb[13] = (u8) (disk_block_cnt);
4230                 cdb[14] = 0;
4231                 cdb[15] = 0;
4232                 cdb_len = 16;
4233         } else {
4234                 cdb[0] = is_write ? WRITE_10 : READ_10;
4235                 cdb[1] = 0;
4236                 cdb[2] = (u8) (disk_block >> 24);
4237                 cdb[3] = (u8) (disk_block >> 16);
4238                 cdb[4] = (u8) (disk_block >> 8);
4239                 cdb[5] = (u8) (disk_block);
4240                 cdb[6] = 0;
4241                 cdb[7] = (u8) (disk_block_cnt >> 8);
4242                 cdb[8] = (u8) (disk_block_cnt);
4243                 cdb[9] = 0;
4244                 cdb_len = 10;
4245         }
4246         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4247                                                 dev->scsi3addr,
4248                                                 dev->phys_disk[map_index]);
4249 }
4250
4251 /*
4252  * Submit commands down the "normal" RAID stack path
4253  * All callers to hpsa_ciss_submit must check lockup_detected
4254  * beforehand, before (opt.) and after calling cmd_alloc
4255  */
4256 static int hpsa_ciss_submit(struct ctlr_info *h,
4257         struct CommandList *c, struct scsi_cmnd *cmd,
4258         unsigned char scsi3addr[])
4259 {
4260         cmd->host_scribble = (unsigned char *) c;
4261         c->cmd_type = CMD_SCSI;
4262         c->scsi_cmd = cmd;
4263         c->Header.ReplyQueue = 0;  /* unused in simple mode */
4264         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4265         c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4266
4267         /* Fill in the request block... */
4268
4269         c->Request.Timeout = 0;
4270         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4271         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4272         c->Request.CDBLen = cmd->cmd_len;
4273         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4274         switch (cmd->sc_data_direction) {
4275         case DMA_TO_DEVICE:
4276                 c->Request.type_attr_dir =
4277                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4278                 break;
4279         case DMA_FROM_DEVICE:
4280                 c->Request.type_attr_dir =
4281                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4282                 break;
4283         case DMA_NONE:
4284                 c->Request.type_attr_dir =
4285                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4286                 break;
4287         case DMA_BIDIRECTIONAL:
4288                 /* This can happen if a buggy application does a scsi passthru
4289                  * and sets both inlen and outlen to non-zero. ( see
4290                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4291                  */
4292
4293                 c->Request.type_attr_dir =
4294                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4295                 /* This is technically wrong, and hpsa controllers should
4296                  * reject it with CMD_INVALID, which is the most correct
4297                  * response, but non-fibre backends appear to let it
4298                  * slide by, and give the same results as if this field
4299                  * were set correctly.  Either way is acceptable for
4300                  * our purposes here.
4301                  */
4302
4303                 break;
4304
4305         default:
4306                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4307                         cmd->sc_data_direction);
4308                 BUG();
4309                 break;
4310         }
4311
4312         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4313                 cmd_free(h, c);
4314                 return SCSI_MLQUEUE_HOST_BUSY;
4315         }
4316         enqueue_cmd_and_start_io(h, c);
4317         /* the cmd'll come back via intr handler in complete_scsi_command()  */
4318         return 0;
4319 }
4320
4321 static void hpsa_command_resubmit_worker(struct work_struct *work)
4322 {
4323         struct scsi_cmnd *cmd;
4324         struct hpsa_scsi_dev_t *dev;
4325         struct CommandList *c =
4326                         container_of(work, struct CommandList, work);
4327
4328         cmd = c->scsi_cmd;
4329         dev = cmd->device->hostdata;
4330         if (!dev) {
4331                 cmd->result = DID_NO_CONNECT << 16;
4332                 cmd->scsi_done(cmd);
4333                 return;
4334         }
4335         if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4336                 /*
4337                  * If we get here, it means dma mapping failed. Try
4338                  * again via scsi mid layer, which will then get
4339                  * SCSI_MLQUEUE_HOST_BUSY.
4340                  */
4341                 cmd->result = DID_IMM_RETRY << 16;
4342                 cmd->scsi_done(cmd);
4343         }
4344 }
4345
4346 /* Running in struct Scsi_Host->host_lock less mode */
4347 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4348 {
4349         struct ctlr_info *h;
4350         struct hpsa_scsi_dev_t *dev;
4351         unsigned char scsi3addr[8];
4352         struct CommandList *c;
4353         int rc = 0;
4354
4355         /* Get the ptr to our adapter structure out of cmd->host. */
4356         h = sdev_to_hba(cmd->device);
4357         dev = cmd->device->hostdata;
4358         if (!dev) {
4359                 cmd->result = DID_NO_CONNECT << 16;
4360                 cmd->scsi_done(cmd);
4361                 return 0;
4362         }
4363         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4364
4365         if (unlikely(lockup_detected(h))) {
4366                 cmd->result = DID_NO_CONNECT << 16;
4367                 cmd->scsi_done(cmd);
4368                 return 0;
4369         }
4370         c = cmd_alloc(h);
4371         if (c == NULL) {                        /* trouble... */
4372                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4373                 return SCSI_MLQUEUE_HOST_BUSY;
4374         }
4375         if (unlikely(lockup_detected(h))) {
4376                 cmd->result = DID_NO_CONNECT << 16;
4377                 cmd_free(h, c);
4378                 cmd->scsi_done(cmd);
4379                 return 0;
4380         }
4381
4382         /*
4383          * Call alternate submit routine for I/O accelerated commands.
4384          * Retries always go down the normal I/O path.
4385          */
4386         if (likely(cmd->retries == 0 &&
4387                 cmd->request->cmd_type == REQ_TYPE_FS &&
4388                 h->acciopath_status)) {
4389
4390                 cmd->host_scribble = (unsigned char *) c;
4391                 c->cmd_type = CMD_SCSI;
4392                 c->scsi_cmd = cmd;
4393
4394                 if (dev->offload_enabled) {
4395                         rc = hpsa_scsi_ioaccel_raid_map(h, c);
4396                         if (rc == 0)
4397                                 return 0; /* Sent on ioaccel path */
4398                         if (rc < 0) {   /* scsi_dma_map failed. */
4399                                 cmd_free(h, c);
4400                                 return SCSI_MLQUEUE_HOST_BUSY;
4401                         }
4402                 } else if (dev->ioaccel_handle) {
4403                         rc = hpsa_scsi_ioaccel_direct_map(h, c);
4404                         if (rc == 0)
4405                                 return 0; /* Sent on direct map path */
4406                         if (rc < 0) {   /* scsi_dma_map failed. */
4407                                 cmd_free(h, c);
4408                                 return SCSI_MLQUEUE_HOST_BUSY;
4409                         }
4410                 }
4411         }
4412         return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4413 }
4414
4415 static void hpsa_scan_complete(struct ctlr_info *h)
4416 {
4417         unsigned long flags;
4418
4419         spin_lock_irqsave(&h->scan_lock, flags);
4420         h->scan_finished = 1;
4421         wake_up_all(&h->scan_wait_queue);
4422         spin_unlock_irqrestore(&h->scan_lock, flags);
4423 }
4424
4425 static void hpsa_scan_start(struct Scsi_Host *sh)
4426 {
4427         struct ctlr_info *h = shost_to_hba(sh);
4428         unsigned long flags;
4429
4430         /*
4431          * Don't let rescans be initiated on a controller known to be locked
4432          * up.  If the controller locks up *during* a rescan, that thread is
4433          * probably hosed, but at least we can prevent new rescan threads from
4434          * piling up on a locked up controller.
4435          */
4436         if (unlikely(lockup_detected(h)))
4437                 return hpsa_scan_complete(h);
4438
4439         /* wait until any scan already in progress is finished. */
4440         while (1) {
4441                 spin_lock_irqsave(&h->scan_lock, flags);
4442                 if (h->scan_finished)
4443                         break;
4444                 spin_unlock_irqrestore(&h->scan_lock, flags);
4445                 wait_event(h->scan_wait_queue, h->scan_finished);
4446                 /* Note: We don't need to worry about a race between this
4447                  * thread and driver unload because the midlayer will
4448                  * have incremented the reference count, so unload won't
4449                  * happen if we're in here.
4450                  */
4451         }
4452         h->scan_finished = 0; /* mark scan as in progress */
4453         spin_unlock_irqrestore(&h->scan_lock, flags);
4454
4455         if (unlikely(lockup_detected(h)))
4456                 return hpsa_scan_complete(h);
4457
4458         hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4459
4460         hpsa_scan_complete(h);
4461 }
4462
4463 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4464 {
4465         struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4466
4467         if (!logical_drive)
4468                 return -ENODEV;
4469
4470         if (qdepth < 1)
4471                 qdepth = 1;
4472         else if (qdepth > logical_drive->queue_depth)
4473                 qdepth = logical_drive->queue_depth;
4474
4475         return scsi_change_queue_depth(sdev, qdepth);
4476 }
4477
4478 static int hpsa_scan_finished(struct Scsi_Host *sh,
4479         unsigned long elapsed_time)
4480 {
4481         struct ctlr_info *h = shost_to_hba(sh);
4482         unsigned long flags;
4483         int finished;
4484
4485         spin_lock_irqsave(&h->scan_lock, flags);
4486         finished = h->scan_finished;
4487         spin_unlock_irqrestore(&h->scan_lock, flags);
4488         return finished;
4489 }
4490
4491 static void hpsa_unregister_scsi(struct ctlr_info *h)
4492 {
4493         /* we are being forcibly unloaded, and may not refuse. */
4494         scsi_remove_host(h->scsi_host);
4495         scsi_host_put(h->scsi_host);
4496         h->scsi_host = NULL;
4497 }
4498
4499 static int hpsa_register_scsi(struct ctlr_info *h)
4500 {
4501         struct Scsi_Host *sh;
4502         int error;
4503
4504         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4505         if (sh == NULL)
4506                 goto fail;
4507
4508         sh->io_port = 0;
4509         sh->n_io_port = 0;
4510         sh->this_id = -1;
4511         sh->max_channel = 3;
4512         sh->max_cmd_len = MAX_COMMAND_SIZE;
4513         sh->max_lun = HPSA_MAX_LUN;
4514         sh->max_id = HPSA_MAX_LUN;
4515         sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
4516         sh->cmd_per_lun = sh->can_queue;
4517         sh->sg_tablesize = h->maxsgentries;
4518         h->scsi_host = sh;
4519         sh->hostdata[0] = (unsigned long) h;
4520         sh->irq = h->intr[h->intr_mode];
4521         sh->unique_id = sh->irq;
4522         error = scsi_add_host(sh, &h->pdev->dev);
4523         if (error)
4524                 goto fail_host_put;
4525         scsi_scan_host(sh);
4526         return 0;
4527
4528  fail_host_put:
4529         dev_err(&h->pdev->dev, "%s: scsi_add_host"
4530                 " failed for controller %d\n", __func__, h->ctlr);
4531         scsi_host_put(sh);
4532         return error;
4533  fail:
4534         dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4535                 " failed for controller %d\n", __func__, h->ctlr);
4536         return -ENOMEM;
4537 }
4538
4539 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4540         unsigned char lunaddr[])
4541 {
4542         int rc;
4543         int count = 0;
4544         int waittime = 1; /* seconds */
4545         struct CommandList *c;
4546
4547         c = cmd_alloc(h);
4548         if (!c) {
4549                 dev_warn(&h->pdev->dev, "out of memory in "
4550                         "wait_for_device_to_become_ready.\n");
4551                 return IO_ERROR;
4552         }
4553
4554         /* Send test unit ready until device ready, or give up. */
4555         while (count < HPSA_TUR_RETRY_LIMIT) {
4556
4557                 /* Wait for a bit.  do this first, because if we send
4558                  * the TUR right away, the reset will just abort it.
4559                  */
4560                 msleep(1000 * waittime);
4561                 count++;
4562                 rc = 0; /* Device ready. */
4563
4564                 /* Increase wait time with each try, up to a point. */
4565                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4566                         waittime = waittime * 2;
4567
4568                 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4569                 (void) fill_cmd(c, TEST_UNIT_READY, h,
4570                                 NULL, 0, 0, lunaddr, TYPE_CMD);
4571                 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
4572                                                 NO_TIMEOUT);
4573                 if (rc)
4574                         goto do_it_again;
4575                 /* no unmap needed here because no data xfer. */
4576
4577                 if (c->err_info->CommandStatus == CMD_SUCCESS)
4578                         break;
4579
4580                 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4581                         c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4582                         (c->err_info->SenseInfo[2] == NO_SENSE ||
4583                         c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4584                         break;
4585 do_it_again:
4586                 dev_warn(&h->pdev->dev, "waiting %d secs "
4587                         "for device to become ready.\n", waittime);
4588                 rc = 1; /* device not ready. */
4589         }
4590
4591         if (rc)
4592                 dev_warn(&h->pdev->dev, "giving up on device.\n");
4593         else
4594                 dev_warn(&h->pdev->dev, "device is ready.\n");
4595
4596         cmd_free(h, c);
4597         return rc;
4598 }
4599
4600 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4601  * complaining.  Doing a host- or bus-reset can't do anything good here.
4602  */
4603 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4604 {
4605         int rc;
4606         struct ctlr_info *h;
4607         struct hpsa_scsi_dev_t *dev;
4608
4609         /* find the controller to which the command to be aborted was sent */
4610         h = sdev_to_hba(scsicmd->device);
4611         if (h == NULL) /* paranoia */
4612                 return FAILED;
4613
4614         if (lockup_detected(h))
4615                 return FAILED;
4616
4617         dev = scsicmd->device->hostdata;
4618         if (!dev) {
4619                 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4620                         "device lookup failed.\n");
4621                 return FAILED;
4622         }
4623
4624         /* if controller locked up, we can guarantee command won't complete */
4625         if (lockup_detected(h)) {
4626                 dev_warn(&h->pdev->dev,
4627                         "scsi %d:%d:%d:%d RESET FAILED, lockup detected\n",
4628                         h->scsi_host->host_no, dev->bus, dev->target,
4629                         dev->lun);
4630                 return FAILED;
4631         }
4632
4633         /* this reset request might be the result of a lockup; check */
4634         if (detect_controller_lockup(h)) {
4635                 dev_warn(&h->pdev->dev,
4636                          "scsi %d:%d:%d:%d RESET FAILED, new lockup detected\n",
4637                          h->scsi_host->host_no, dev->bus, dev->target,
4638                          dev->lun);
4639                 return FAILED;
4640         }
4641
4642         hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4643
4644         /* send a reset to the SCSI LUN which the command was sent to */
4645         rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4646                              DEFAULT_REPLY_QUEUE);
4647         if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4648                 return SUCCESS;
4649
4650         dev_warn(&h->pdev->dev,
4651                 "scsi %d:%d:%d:%d reset failed\n",
4652                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4653         return FAILED;
4654 }
4655
4656 static void swizzle_abort_tag(u8 *tag)
4657 {
4658         u8 original_tag[8];
4659
4660         memcpy(original_tag, tag, 8);
4661         tag[0] = original_tag[3];
4662         tag[1] = original_tag[2];
4663         tag[2] = original_tag[1];
4664         tag[3] = original_tag[0];
4665         tag[4] = original_tag[7];
4666         tag[5] = original_tag[6];
4667         tag[6] = original_tag[5];
4668         tag[7] = original_tag[4];
4669 }
4670
4671 static void hpsa_get_tag(struct ctlr_info *h,
4672         struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4673 {
4674         u64 tag;
4675         if (c->cmd_type == CMD_IOACCEL1) {
4676                 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4677                         &h->ioaccel_cmd_pool[c->cmdindex];
4678                 tag = le64_to_cpu(cm1->tag);
4679                 *tagupper = cpu_to_le32(tag >> 32);
4680                 *taglower = cpu_to_le32(tag);
4681                 return;
4682         }
4683         if (c->cmd_type == CMD_IOACCEL2) {
4684                 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4685                         &h->ioaccel2_cmd_pool[c->cmdindex];
4686                 /* upper tag not used in ioaccel2 mode */
4687                 memset(tagupper, 0, sizeof(*tagupper));
4688                 *taglower = cm2->Tag;
4689                 return;
4690         }
4691         tag = le64_to_cpu(c->Header.tag);
4692         *tagupper = cpu_to_le32(tag >> 32);
4693         *taglower = cpu_to_le32(tag);
4694 }
4695
4696 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4697         struct CommandList *abort, int reply_queue)
4698 {
4699         int rc = IO_OK;
4700         struct CommandList *c;
4701         struct ErrorInfo *ei;
4702         __le32 tagupper, taglower;
4703
4704         c = cmd_alloc(h);
4705         if (c == NULL) {        /* trouble... */
4706                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4707                 return -ENOMEM;
4708         }
4709
4710         /* fill_cmd can't fail here, no buffer to map */
4711         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
4712                 0, 0, scsi3addr, TYPE_MSG);
4713         if (h->needs_abort_tags_swizzled)
4714                 swizzle_abort_tag(&c->Request.CDB[4]);
4715         (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4716         hpsa_get_tag(h, abort, &taglower, &tagupper);
4717         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
4718                 __func__, tagupper, taglower);
4719         /* no unmap needed here because no data xfer. */
4720
4721         ei = c->err_info;
4722         switch (ei->CommandStatus) {
4723         case CMD_SUCCESS:
4724                 break;
4725         case CMD_TMF_STATUS:
4726                 rc = hpsa_evaluate_tmf_status(h, c);
4727                 break;
4728         case CMD_UNABORTABLE: /* Very common, don't make noise. */
4729                 rc = -1;
4730                 break;
4731         default:
4732                 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4733                         __func__, tagupper, taglower);
4734                 hpsa_scsi_interpret_error(h, c);
4735                 rc = -1;
4736                 break;
4737         }
4738         cmd_free(h, c);
4739         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4740                 __func__, tagupper, taglower);
4741         return rc;
4742 }
4743
4744 /* ioaccel2 path firmware cannot handle abort task requests.
4745  * Change abort requests to physical target reset, and send to the
4746  * address of the physical disk used for the ioaccel 2 command.
4747  * Return 0 on success (IO_OK)
4748  *       -1 on failure
4749  */
4750
4751 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4752         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
4753 {
4754         int rc = IO_OK;
4755         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4756         struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4757         unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4758         unsigned char *psa = &phys_scsi3addr[0];
4759
4760         /* Get a pointer to the hpsa logical device. */
4761         scmd = abort->scsi_cmd;
4762         dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4763         if (dev == NULL) {
4764                 dev_warn(&h->pdev->dev,
4765                         "Cannot abort: no device pointer for command.\n");
4766                         return -1; /* not abortable */
4767         }
4768
4769         if (h->raid_offload_debug > 0)
4770                 dev_info(&h->pdev->dev,
4771                         "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4772                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4773                         "Reset as abort",
4774                         scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4775                         scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4776
4777         if (!dev->offload_enabled) {
4778                 dev_warn(&h->pdev->dev,
4779                         "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4780                 return -1; /* not abortable */
4781         }
4782
4783         /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4784         if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4785                 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4786                 return -1; /* not abortable */
4787         }
4788
4789         /* send the reset */
4790         if (h->raid_offload_debug > 0)
4791                 dev_info(&h->pdev->dev,
4792                         "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4793                         psa[0], psa[1], psa[2], psa[3],
4794                         psa[4], psa[5], psa[6], psa[7]);
4795         rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
4796         if (rc != 0) {
4797                 dev_warn(&h->pdev->dev,
4798                         "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4799                         psa[0], psa[1], psa[2], psa[3],
4800                         psa[4], psa[5], psa[6], psa[7]);
4801                 return rc; /* failed to reset */
4802         }
4803
4804         /* wait for device to recover */
4805         if (wait_for_device_to_become_ready(h, psa) != 0) {
4806                 dev_warn(&h->pdev->dev,
4807                         "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4808                         psa[0], psa[1], psa[2], psa[3],
4809                         psa[4], psa[5], psa[6], psa[7]);
4810                 return -1;  /* failed to recover */
4811         }
4812
4813         /* device recovered */
4814         dev_info(&h->pdev->dev,
4815                 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4816                 psa[0], psa[1], psa[2], psa[3],
4817                 psa[4], psa[5], psa[6], psa[7]);
4818
4819         return rc; /* success */
4820 }
4821
4822 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4823         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
4824 {
4825         /* ioccelerator mode 2 commands should be aborted via the
4826          * accelerated path, since RAID path is unaware of these commands,
4827          * but underlying firmware can't handle abort TMF.
4828          * Change abort to physical device reset.
4829          */
4830         if (abort->cmd_type == CMD_IOACCEL2)
4831                 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
4832                                                         abort, reply_queue);
4833         return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
4834 }
4835
4836 /* Find out which reply queue a command was meant to return on */
4837 static int hpsa_extract_reply_queue(struct ctlr_info *h,
4838                                         struct CommandList *c)
4839 {
4840         if (c->cmd_type == CMD_IOACCEL2)
4841                 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
4842         return c->Header.ReplyQueue;
4843 }
4844
4845 /*
4846  * Limit concurrency of abort commands to prevent
4847  * over-subscription of commands
4848  */
4849 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
4850 {
4851 #define ABORT_CMD_WAIT_MSECS 5000
4852         return !wait_event_timeout(h->abort_cmd_wait_queue,
4853                         atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
4854                         msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
4855 }
4856
4857 /* Send an abort for the specified command.
4858  *      If the device and controller support it,
4859  *              send a task abort request.
4860  */
4861 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4862 {
4863
4864         int i, rc;
4865         struct ctlr_info *h;
4866         struct hpsa_scsi_dev_t *dev;
4867         struct CommandList *abort; /* pointer to command to be aborted */
4868         struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
4869         char msg[256];          /* For debug messaging. */
4870         int ml = 0;
4871         __le32 tagupper, taglower;
4872         int refcount, reply_queue;
4873
4874         if (sc == NULL)
4875                 return FAILED;
4876
4877         if (sc->device == NULL)
4878                 return FAILED;
4879
4880         /* Find the controller of the command to be aborted */
4881         h = sdev_to_hba(sc->device);
4882         if (h == NULL)
4883                 return FAILED;
4884
4885         /* Find the device of the command to be aborted */
4886         dev = sc->device->hostdata;
4887         if (!dev) {
4888                 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4889                                 msg);
4890                 return FAILED;
4891         }
4892
4893         /* If controller locked up, we can guarantee command won't complete */
4894         if (lockup_detected(h)) {
4895                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4896                                         "ABORT FAILED, lockup detected");
4897                 return FAILED;
4898         }
4899
4900         /* This is a good time to check if controller lockup has occurred */
4901         if (detect_controller_lockup(h)) {
4902                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4903                                         "ABORT FAILED, new lockup detected");
4904                 return FAILED;
4905         }
4906
4907         /* Check that controller supports some kind of task abort */
4908         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4909                 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4910                 return FAILED;
4911
4912         memset(msg, 0, sizeof(msg));
4913         ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s",
4914                 h->scsi_host->host_no, sc->device->channel,
4915                 sc->device->id, sc->device->lun,
4916                 "Aborting command");
4917
4918         /* Get SCSI command to be aborted */
4919         abort = (struct CommandList *) sc->host_scribble;
4920         if (abort == NULL) {
4921                 /* This can happen if the command already completed. */
4922                 return SUCCESS;
4923         }
4924         refcount = atomic_inc_return(&abort->refcount);
4925         if (refcount == 1) { /* Command is done already. */
4926                 cmd_free(h, abort);
4927                 return SUCCESS;
4928         }
4929
4930         /* Don't bother trying the abort if we know it won't work. */
4931         if (abort->cmd_type != CMD_IOACCEL2 &&
4932                 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
4933                 cmd_free(h, abort);
4934                 return FAILED;
4935         }
4936
4937         hpsa_get_tag(h, abort, &taglower, &tagupper);
4938         reply_queue = hpsa_extract_reply_queue(h, abort);
4939         ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4940         as  = abort->scsi_cmd;
4941         if (as != NULL)
4942                 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4943                         as->cmnd[0], as->serial_number);
4944         dev_dbg(&h->pdev->dev, "%s\n", msg);
4945         hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
4946         /*
4947          * Command is in flight, or possibly already completed
4948          * by the firmware (but not to the scsi mid layer) but we can't
4949          * distinguish which.  Send the abort down.
4950          */
4951         if (wait_for_available_abort_cmd(h)) {
4952                 dev_warn(&h->pdev->dev,
4953                         "Timed out waiting for an abort command to become available.\n");
4954                 cmd_free(h, abort);
4955                 return FAILED;
4956         }
4957         rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
4958         atomic_inc(&h->abort_cmds_available);
4959         wake_up_all(&h->abort_cmd_wait_queue);
4960         if (rc != 0) {
4961                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4962                                         "FAILED to abort command");
4963                 cmd_free(h, abort);
4964                 return FAILED;
4965         }
4966         dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4967
4968         /* If the abort(s) above completed and actually aborted the
4969          * command, then the command to be aborted should already be
4970          * completed.  If not, wait around a bit more to see if they
4971          * manage to complete normally.
4972          */
4973 #define ABORT_COMPLETE_WAIT_SECS 30
4974         for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4975                 refcount = atomic_read(&abort->refcount);
4976                 if (refcount < 2) {
4977                         cmd_free(h, abort);
4978                         return SUCCESS;
4979                 } else {
4980                         msleep(100);
4981                 }
4982         }
4983         dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4984                 msg, ABORT_COMPLETE_WAIT_SECS);
4985         cmd_free(h, abort);
4986         return FAILED;
4987 }
4988
4989 /*
4990  * For operations that cannot sleep, a command block is allocated at init,
4991  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4992  * which ones are free or in use.  Lock must be held when calling this.
4993  * cmd_free() is the complement.
4994  */
4995
4996 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4997 {
4998         struct CommandList *c;
4999         int i;
5000         union u64bit temp64;
5001         dma_addr_t cmd_dma_handle, err_dma_handle;
5002         int refcount;
5003         unsigned long offset;
5004
5005         /*
5006          * There is some *extremely* small but non-zero chance that that
5007          * multiple threads could get in here, and one thread could
5008          * be scanning through the list of bits looking for a free
5009          * one, but the free ones are always behind him, and other
5010          * threads sneak in behind him and eat them before he can
5011          * get to them, so that while there is always a free one, a
5012          * very unlucky thread might be starved anyway, never able to
5013          * beat the other threads.  In reality, this happens so
5014          * infrequently as to be indistinguishable from never.
5015          */
5016
5017         offset = h->last_allocation; /* benignly racy */
5018         for (;;) {
5019                 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
5020                 if (unlikely(i == h->nr_cmds)) {
5021                         offset = 0;
5022                         continue;
5023                 }
5024                 c = h->cmd_pool + i;
5025                 refcount = atomic_inc_return(&c->refcount);
5026                 if (unlikely(refcount > 1)) {
5027                         cmd_free(h, c); /* already in use */
5028                         offset = (i + 1) % h->nr_cmds;
5029                         continue;
5030                 }
5031                 set_bit(i & (BITS_PER_LONG - 1),
5032                         h->cmd_pool_bits + (i / BITS_PER_LONG));
5033                 break; /* it's ours now. */
5034         }
5035         h->last_allocation = i; /* benignly racy */
5036
5037         /* Zero out all of commandlist except the last field, refcount */
5038         memset(c, 0, offsetof(struct CommandList, refcount));
5039         c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
5040         cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
5041         c->err_info = h->errinfo_pool + i;
5042         memset(c->err_info, 0, sizeof(*c->err_info));
5043         err_dma_handle = h->errinfo_pool_dhandle
5044             + i * sizeof(*c->err_info);
5045
5046         c->cmdindex = i;
5047
5048         c->busaddr = (u32) cmd_dma_handle;
5049         temp64.val = (u64) err_dma_handle;
5050         c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5051         c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5052
5053         c->h = h;
5054         return c;
5055 }
5056
5057 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5058 {
5059         if (atomic_dec_and_test(&c->refcount)) {
5060                 int i;
5061
5062                 i = c - h->cmd_pool;
5063                 clear_bit(i & (BITS_PER_LONG - 1),
5064                           h->cmd_pool_bits + (i / BITS_PER_LONG));
5065         }
5066 }
5067
5068 #ifdef CONFIG_COMPAT
5069
5070 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5071         void __user *arg)
5072 {
5073         IOCTL32_Command_struct __user *arg32 =
5074             (IOCTL32_Command_struct __user *) arg;
5075         IOCTL_Command_struct arg64;
5076         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5077         int err;
5078         u32 cp;
5079
5080         memset(&arg64, 0, sizeof(arg64));
5081         err = 0;
5082         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5083                            sizeof(arg64.LUN_info));
5084         err |= copy_from_user(&arg64.Request, &arg32->Request,
5085                            sizeof(arg64.Request));
5086         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5087                            sizeof(arg64.error_info));
5088         err |= get_user(arg64.buf_size, &arg32->buf_size);
5089         err |= get_user(cp, &arg32->buf);
5090         arg64.buf = compat_ptr(cp);
5091         err |= copy_to_user(p, &arg64, sizeof(arg64));
5092
5093         if (err)
5094                 return -EFAULT;
5095
5096         err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
5097         if (err)
5098                 return err;
5099         err |= copy_in_user(&arg32->error_info, &p->error_info,
5100                          sizeof(arg32->error_info));
5101         if (err)
5102                 return -EFAULT;
5103         return err;
5104 }
5105
5106 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
5107         int cmd, void __user *arg)
5108 {
5109         BIG_IOCTL32_Command_struct __user *arg32 =
5110             (BIG_IOCTL32_Command_struct __user *) arg;
5111         BIG_IOCTL_Command_struct arg64;
5112         BIG_IOCTL_Command_struct __user *p =
5113             compat_alloc_user_space(sizeof(arg64));
5114         int err;
5115         u32 cp;
5116
5117         memset(&arg64, 0, sizeof(arg64));
5118         err = 0;
5119         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5120                            sizeof(arg64.LUN_info));
5121         err |= copy_from_user(&arg64.Request, &arg32->Request,
5122                            sizeof(arg64.Request));
5123         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5124                            sizeof(arg64.error_info));
5125         err |= get_user(arg64.buf_size, &arg32->buf_size);
5126         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5127         err |= get_user(cp, &arg32->buf);
5128         arg64.buf = compat_ptr(cp);
5129         err |= copy_to_user(p, &arg64, sizeof(arg64));
5130
5131         if (err)
5132                 return -EFAULT;
5133
5134         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
5135         if (err)
5136                 return err;
5137         err |= copy_in_user(&arg32->error_info, &p->error_info,
5138                          sizeof(arg32->error_info));
5139         if (err)
5140                 return -EFAULT;
5141         return err;
5142 }
5143
5144 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5145 {
5146         switch (cmd) {
5147         case CCISS_GETPCIINFO:
5148         case CCISS_GETINTINFO:
5149         case CCISS_SETINTINFO:
5150         case CCISS_GETNODENAME:
5151         case CCISS_SETNODENAME:
5152         case CCISS_GETHEARTBEAT:
5153         case CCISS_GETBUSTYPES:
5154         case CCISS_GETFIRMVER:
5155         case CCISS_GETDRIVVER:
5156         case CCISS_REVALIDVOLS:
5157         case CCISS_DEREGDISK:
5158         case CCISS_REGNEWDISK:
5159         case CCISS_REGNEWD:
5160         case CCISS_RESCANDISK:
5161         case CCISS_GETLUNINFO:
5162                 return hpsa_ioctl(dev, cmd, arg);
5163
5164         case CCISS_PASSTHRU32:
5165                 return hpsa_ioctl32_passthru(dev, cmd, arg);
5166         case CCISS_BIG_PASSTHRU32:
5167                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5168
5169         default:
5170                 return -ENOIOCTLCMD;
5171         }
5172 }
5173 #endif
5174
5175 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5176 {
5177         struct hpsa_pci_info pciinfo;
5178
5179         if (!argp)
5180                 return -EINVAL;
5181         pciinfo.domain = pci_domain_nr(h->pdev->bus);
5182         pciinfo.bus = h->pdev->bus->number;
5183         pciinfo.dev_fn = h->pdev->devfn;
5184         pciinfo.board_id = h->board_id;
5185         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5186                 return -EFAULT;
5187         return 0;
5188 }
5189
5190 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5191 {
5192         DriverVer_type DriverVer;
5193         unsigned char vmaj, vmin, vsubmin;
5194         int rc;
5195
5196         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5197                 &vmaj, &vmin, &vsubmin);
5198         if (rc != 3) {
5199                 dev_info(&h->pdev->dev, "driver version string '%s' "
5200                         "unrecognized.", HPSA_DRIVER_VERSION);
5201                 vmaj = 0;
5202                 vmin = 0;
5203                 vsubmin = 0;
5204         }
5205         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5206         if (!argp)
5207                 return -EINVAL;
5208         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5209                 return -EFAULT;
5210         return 0;
5211 }
5212
5213 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5214 {
5215         IOCTL_Command_struct iocommand;
5216         struct CommandList *c;
5217         char *buff = NULL;
5218         u64 temp64;
5219         int rc = 0;
5220
5221         if (!argp)
5222                 return -EINVAL;
5223         if (!capable(CAP_SYS_RAWIO))
5224                 return -EPERM;
5225         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5226                 return -EFAULT;
5227         if ((iocommand.buf_size < 1) &&
5228             (iocommand.Request.Type.Direction != XFER_NONE)) {
5229                 return -EINVAL;
5230         }
5231         if (iocommand.buf_size > 0) {
5232                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5233                 if (buff == NULL)
5234                         return -EFAULT;
5235                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5236                         /* Copy the data into the buffer we created */
5237                         if (copy_from_user(buff, iocommand.buf,
5238                                 iocommand.buf_size)) {
5239                                 rc = -EFAULT;
5240                                 goto out_kfree;
5241                         }
5242                 } else {
5243                         memset(buff, 0, iocommand.buf_size);
5244                 }
5245         }
5246         c = cmd_alloc(h);
5247         if (c == NULL) {
5248                 rc = -ENOMEM;
5249                 goto out_kfree;
5250         }
5251         /* Fill in the command type */
5252         c->cmd_type = CMD_IOCTL_PEND;
5253         /* Fill in Command Header */
5254         c->Header.ReplyQueue = 0; /* unused in simple mode */
5255         if (iocommand.buf_size > 0) {   /* buffer to fill */
5256                 c->Header.SGList = 1;
5257                 c->Header.SGTotal = cpu_to_le16(1);
5258         } else  { /* no buffers to fill */
5259                 c->Header.SGList = 0;
5260                 c->Header.SGTotal = cpu_to_le16(0);
5261         }
5262         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
5263
5264         /* Fill in Request block */
5265         memcpy(&c->Request, &iocommand.Request,
5266                 sizeof(c->Request));
5267
5268         /* Fill in the scatter gather information */
5269         if (iocommand.buf_size > 0) {
5270                 temp64 = pci_map_single(h->pdev, buff,
5271                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5272                 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5273                         c->SG[0].Addr = cpu_to_le64(0);
5274                         c->SG[0].Len = cpu_to_le32(0);
5275                         rc = -ENOMEM;
5276                         goto out;
5277                 }
5278                 c->SG[0].Addr = cpu_to_le64(temp64);
5279                 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5280                 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
5281         }
5282         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5283         if (iocommand.buf_size > 0)
5284                 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5285         check_ioctl_unit_attention(h, c);
5286         if (rc) {
5287                 rc = -EIO;
5288                 goto out;
5289         }
5290
5291         /* Copy the error information out */
5292         memcpy(&iocommand.error_info, c->err_info,
5293                 sizeof(iocommand.error_info));
5294         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5295                 rc = -EFAULT;
5296                 goto out;
5297         }
5298         if ((iocommand.Request.Type.Direction & XFER_READ) &&
5299                 iocommand.buf_size > 0) {
5300                 /* Copy the data out of the buffer we created */
5301                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5302                         rc = -EFAULT;
5303                         goto out;
5304                 }
5305         }
5306 out:
5307         cmd_free(h, c);
5308 out_kfree:
5309         kfree(buff);
5310         return rc;
5311 }
5312
5313 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5314 {
5315         BIG_IOCTL_Command_struct *ioc;
5316         struct CommandList *c;
5317         unsigned char **buff = NULL;
5318         int *buff_size = NULL;
5319         u64 temp64;
5320         BYTE sg_used = 0;
5321         int status = 0;
5322         u32 left;
5323         u32 sz;
5324         BYTE __user *data_ptr;
5325
5326         if (!argp)
5327                 return -EINVAL;
5328         if (!capable(CAP_SYS_RAWIO))
5329                 return -EPERM;
5330         ioc = (BIG_IOCTL_Command_struct *)
5331             kmalloc(sizeof(*ioc), GFP_KERNEL);
5332         if (!ioc) {
5333                 status = -ENOMEM;
5334                 goto cleanup1;
5335         }
5336         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5337                 status = -EFAULT;
5338                 goto cleanup1;
5339         }
5340         if ((ioc->buf_size < 1) &&
5341             (ioc->Request.Type.Direction != XFER_NONE)) {
5342                 status = -EINVAL;
5343                 goto cleanup1;
5344         }
5345         /* Check kmalloc limits  using all SGs */
5346         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5347                 status = -EINVAL;
5348                 goto cleanup1;
5349         }
5350         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5351                 status = -EINVAL;
5352                 goto cleanup1;
5353         }
5354         buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5355         if (!buff) {
5356                 status = -ENOMEM;
5357                 goto cleanup1;
5358         }
5359         buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5360         if (!buff_size) {
5361                 status = -ENOMEM;
5362                 goto cleanup1;
5363         }
5364         left = ioc->buf_size;
5365         data_ptr = ioc->buf;
5366         while (left) {
5367                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5368                 buff_size[sg_used] = sz;
5369                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5370                 if (buff[sg_used] == NULL) {
5371                         status = -ENOMEM;
5372                         goto cleanup1;
5373                 }
5374                 if (ioc->Request.Type.Direction & XFER_WRITE) {
5375                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5376                                 status = -EFAULT;
5377                                 goto cleanup1;
5378                         }
5379                 } else
5380                         memset(buff[sg_used], 0, sz);
5381                 left -= sz;
5382                 data_ptr += sz;
5383                 sg_used++;
5384         }
5385         c = cmd_alloc(h);
5386         if (c == NULL) {
5387                 status = -ENOMEM;
5388                 goto cleanup1;
5389         }
5390         c->cmd_type = CMD_IOCTL_PEND;
5391         c->Header.ReplyQueue = 0;
5392         c->Header.SGList = (u8) sg_used;
5393         c->Header.SGTotal = cpu_to_le16(sg_used);
5394         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5395         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5396         if (ioc->buf_size > 0) {
5397                 int i;
5398                 for (i = 0; i < sg_used; i++) {
5399                         temp64 = pci_map_single(h->pdev, buff[i],
5400                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
5401                         if (dma_mapping_error(&h->pdev->dev,
5402                                                         (dma_addr_t) temp64)) {
5403                                 c->SG[i].Addr = cpu_to_le64(0);
5404                                 c->SG[i].Len = cpu_to_le32(0);
5405                                 hpsa_pci_unmap(h->pdev, c, i,
5406                                         PCI_DMA_BIDIRECTIONAL);
5407                                 status = -ENOMEM;
5408                                 goto cleanup0;
5409                         }
5410                         c->SG[i].Addr = cpu_to_le64(temp64);
5411                         c->SG[i].Len = cpu_to_le32(buff_size[i]);
5412                         c->SG[i].Ext = cpu_to_le32(0);
5413                 }
5414                 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5415         }
5416         status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5417         if (sg_used)
5418                 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5419         check_ioctl_unit_attention(h, c);
5420         if (status) {
5421                 status = -EIO;
5422                 goto cleanup0;
5423         }
5424
5425         /* Copy the error information out */
5426         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5427         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5428                 status = -EFAULT;
5429                 goto cleanup0;
5430         }
5431         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5432                 int i;
5433
5434                 /* Copy the data out of the buffer we created */
5435                 BYTE __user *ptr = ioc->buf;
5436                 for (i = 0; i < sg_used; i++) {
5437                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
5438                                 status = -EFAULT;
5439                                 goto cleanup0;
5440                         }
5441                         ptr += buff_size[i];
5442                 }
5443         }
5444         status = 0;
5445 cleanup0:
5446         cmd_free(h, c);
5447 cleanup1:
5448         if (buff) {
5449                 int i;
5450
5451                 for (i = 0; i < sg_used; i++)
5452                         kfree(buff[i]);
5453                 kfree(buff);
5454         }
5455         kfree(buff_size);
5456         kfree(ioc);
5457         return status;
5458 }
5459
5460 static void check_ioctl_unit_attention(struct ctlr_info *h,
5461         struct CommandList *c)
5462 {
5463         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5464                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5465                 (void) check_for_unit_attention(h, c);
5466 }
5467
5468 /*
5469  * ioctl
5470  */
5471 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5472 {
5473         struct ctlr_info *h;
5474         void __user *argp = (void __user *)arg;
5475         int rc;
5476
5477         h = sdev_to_hba(dev);
5478
5479         switch (cmd) {
5480         case CCISS_DEREGDISK:
5481         case CCISS_REGNEWDISK:
5482         case CCISS_REGNEWD:
5483                 hpsa_scan_start(h->scsi_host);
5484                 return 0;
5485         case CCISS_GETPCIINFO:
5486                 return hpsa_getpciinfo_ioctl(h, argp);
5487         case CCISS_GETDRIVVER:
5488                 return hpsa_getdrivver_ioctl(h, argp);
5489         case CCISS_PASSTHRU:
5490                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5491                         return -EAGAIN;
5492                 rc = hpsa_passthru_ioctl(h, argp);
5493                 atomic_inc(&h->passthru_cmds_avail);
5494                 return rc;
5495         case CCISS_BIG_PASSTHRU:
5496                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5497                         return -EAGAIN;
5498                 rc = hpsa_big_passthru_ioctl(h, argp);
5499                 atomic_inc(&h->passthru_cmds_avail);
5500                 return rc;
5501         default:
5502                 return -ENOTTY;
5503         }
5504 }
5505
5506 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5507                                 u8 reset_type)
5508 {
5509         struct CommandList *c;
5510
5511         c = cmd_alloc(h);
5512         if (!c)
5513                 return -ENOMEM;
5514         /* fill_cmd can't fail here, no data buffer to map */
5515         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5516                 RAID_CTLR_LUNID, TYPE_MSG);
5517         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5518         c->waiting = NULL;
5519         enqueue_cmd_and_start_io(h, c);
5520         /* Don't wait for completion, the reset won't complete.  Don't free
5521          * the command either.  This is the last command we will send before
5522          * re-initializing everything, so it doesn't matter and won't leak.
5523          */
5524         return 0;
5525 }
5526
5527 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5528         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5529         int cmd_type)
5530 {
5531         int pci_dir = XFER_NONE;
5532         u64 tag; /* for commands to be aborted */
5533
5534         c->cmd_type = CMD_IOCTL_PEND;
5535         c->Header.ReplyQueue = 0;
5536         if (buff != NULL && size > 0) {
5537                 c->Header.SGList = 1;
5538                 c->Header.SGTotal = cpu_to_le16(1);
5539         } else {
5540                 c->Header.SGList = 0;
5541                 c->Header.SGTotal = cpu_to_le16(0);
5542         }
5543         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5544
5545         if (cmd_type == TYPE_CMD) {
5546                 switch (cmd) {
5547                 case HPSA_INQUIRY:
5548                         /* are we trying to read a vital product page */
5549                         if (page_code & VPD_PAGE) {
5550                                 c->Request.CDB[1] = 0x01;
5551                                 c->Request.CDB[2] = (page_code & 0xff);
5552                         }
5553                         c->Request.CDBLen = 6;
5554                         c->Request.type_attr_dir =
5555                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5556                         c->Request.Timeout = 0;
5557                         c->Request.CDB[0] = HPSA_INQUIRY;
5558                         c->Request.CDB[4] = size & 0xFF;
5559                         break;
5560                 case HPSA_REPORT_LOG:
5561                 case HPSA_REPORT_PHYS:
5562                         /* Talking to controller so It's a physical command
5563                            mode = 00 target = 0.  Nothing to write.
5564                          */
5565                         c->Request.CDBLen = 12;
5566                         c->Request.type_attr_dir =
5567                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5568                         c->Request.Timeout = 0;
5569                         c->Request.CDB[0] = cmd;
5570                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5571                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5572                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5573                         c->Request.CDB[9] = size & 0xFF;
5574                         break;
5575                 case HPSA_CACHE_FLUSH:
5576                         c->Request.CDBLen = 12;
5577                         c->Request.type_attr_dir =
5578                                         TYPE_ATTR_DIR(cmd_type,
5579                                                 ATTR_SIMPLE, XFER_WRITE);
5580                         c->Request.Timeout = 0;
5581                         c->Request.CDB[0] = BMIC_WRITE;
5582                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5583                         c->Request.CDB[7] = (size >> 8) & 0xFF;
5584                         c->Request.CDB[8] = size & 0xFF;
5585                         break;
5586                 case TEST_UNIT_READY:
5587                         c->Request.CDBLen = 6;
5588                         c->Request.type_attr_dir =
5589                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5590                         c->Request.Timeout = 0;
5591                         break;
5592                 case HPSA_GET_RAID_MAP:
5593                         c->Request.CDBLen = 12;
5594                         c->Request.type_attr_dir =
5595                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5596                         c->Request.Timeout = 0;
5597                         c->Request.CDB[0] = HPSA_CISS_READ;
5598                         c->Request.CDB[1] = cmd;
5599                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5600                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5601                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5602                         c->Request.CDB[9] = size & 0xFF;
5603                         break;
5604                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5605                         c->Request.CDBLen = 10;
5606                         c->Request.type_attr_dir =
5607                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5608                         c->Request.Timeout = 0;
5609                         c->Request.CDB[0] = BMIC_READ;
5610                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5611                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5612                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5613                         break;
5614                 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5615                         c->Request.CDBLen = 10;
5616                         c->Request.type_attr_dir =
5617                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5618                         c->Request.Timeout = 0;
5619                         c->Request.CDB[0] = BMIC_READ;
5620                         c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5621                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5622                         c->Request.CDB[8] = (size >> 8) & 0XFF;
5623                         break;
5624                 default:
5625                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5626                         BUG();
5627                         return -1;
5628                 }
5629         } else if (cmd_type == TYPE_MSG) {
5630                 switch (cmd) {
5631
5632                 case  HPSA_DEVICE_RESET_MSG:
5633                         c->Request.CDBLen = 16;
5634                         c->Request.type_attr_dir =
5635                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5636                         c->Request.Timeout = 0; /* Don't time out */
5637                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5638                         c->Request.CDB[0] =  cmd;
5639                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5640                         /* If bytes 4-7 are zero, it means reset the */
5641                         /* LunID device */
5642                         c->Request.CDB[4] = 0x00;
5643                         c->Request.CDB[5] = 0x00;
5644                         c->Request.CDB[6] = 0x00;
5645                         c->Request.CDB[7] = 0x00;
5646                         break;
5647                 case  HPSA_ABORT_MSG:
5648                         memcpy(&tag, buff, sizeof(tag));
5649                         dev_dbg(&h->pdev->dev,
5650                                 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
5651                                 tag, c->Header.tag);
5652                         c->Request.CDBLen = 16;
5653                         c->Request.type_attr_dir =
5654                                         TYPE_ATTR_DIR(cmd_type,
5655                                                 ATTR_SIMPLE, XFER_WRITE);
5656                         c->Request.Timeout = 0; /* Don't time out */
5657                         c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5658                         c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5659                         c->Request.CDB[2] = 0x00; /* reserved */
5660                         c->Request.CDB[3] = 0x00; /* reserved */
5661                         /* Tag to abort goes in CDB[4]-CDB[11] */
5662                         memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
5663                         c->Request.CDB[12] = 0x00; /* reserved */
5664                         c->Request.CDB[13] = 0x00; /* reserved */
5665                         c->Request.CDB[14] = 0x00; /* reserved */
5666                         c->Request.CDB[15] = 0x00; /* reserved */
5667                 break;
5668                 default:
5669                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
5670                                 cmd);
5671                         BUG();
5672                 }
5673         } else {
5674                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5675                 BUG();
5676         }
5677
5678         switch (GET_DIR(c->Request.type_attr_dir)) {
5679         case XFER_READ:
5680                 pci_dir = PCI_DMA_FROMDEVICE;
5681                 break;
5682         case XFER_WRITE:
5683                 pci_dir = PCI_DMA_TODEVICE;
5684                 break;
5685         case XFER_NONE:
5686                 pci_dir = PCI_DMA_NONE;
5687                 break;
5688         default:
5689                 pci_dir = PCI_DMA_BIDIRECTIONAL;
5690         }
5691         if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5692                 return -1;
5693         return 0;
5694 }
5695
5696 /*
5697  * Map (physical) PCI mem into (virtual) kernel space
5698  */
5699 static void __iomem *remap_pci_mem(ulong base, ulong size)
5700 {
5701         ulong page_base = ((ulong) base) & PAGE_MASK;
5702         ulong page_offs = ((ulong) base) - page_base;
5703         void __iomem *page_remapped = ioremap_nocache(page_base,
5704                 page_offs + size);
5705
5706         return page_remapped ? (page_remapped + page_offs) : NULL;
5707 }
5708
5709 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5710 {
5711         return h->access.command_completed(h, q);
5712 }
5713
5714 static inline bool interrupt_pending(struct ctlr_info *h)
5715 {
5716         return h->access.intr_pending(h);
5717 }
5718
5719 static inline long interrupt_not_for_us(struct ctlr_info *h)
5720 {
5721         return (h->access.intr_pending(h) == 0) ||
5722                 (h->interrupts_enabled == 0);
5723 }
5724
5725 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5726         u32 raw_tag)
5727 {
5728         if (unlikely(tag_index >= h->nr_cmds)) {
5729                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5730                 return 1;
5731         }
5732         return 0;
5733 }
5734
5735 static inline void finish_cmd(struct CommandList *c)
5736 {
5737         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5738         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5739                         || c->cmd_type == CMD_IOACCEL2))
5740                 complete_scsi_command(c);
5741         else if (c->cmd_type == CMD_IOCTL_PEND)
5742                 complete(c->waiting);
5743 }
5744
5745
5746 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5747 {
5748 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5749 #define HPSA_SIMPLE_ERROR_BITS 0x03
5750         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5751                 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5752         return tag & ~HPSA_PERF_ERROR_BITS;
5753 }
5754
5755 /* process completion of an indexed ("direct lookup") command */
5756 static inline void process_indexed_cmd(struct ctlr_info *h,
5757         u32 raw_tag)
5758 {
5759         u32 tag_index;
5760         struct CommandList *c;
5761
5762         tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
5763         if (!bad_tag(h, tag_index, raw_tag)) {
5764                 c = h->cmd_pool + tag_index;
5765                 finish_cmd(c);
5766         }
5767 }
5768
5769 /* Some controllers, like p400, will give us one interrupt
5770  * after a soft reset, even if we turned interrupts off.
5771  * Only need to check for this in the hpsa_xxx_discard_completions
5772  * functions.
5773  */
5774 static int ignore_bogus_interrupt(struct ctlr_info *h)
5775 {
5776         if (likely(!reset_devices))
5777                 return 0;
5778
5779         if (likely(h->interrupts_enabled))
5780                 return 0;
5781
5782         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5783                 "(known firmware bug.)  Ignoring.\n");
5784
5785         return 1;
5786 }
5787
5788 /*
5789  * Convert &h->q[x] (passed to interrupt handlers) back to h.
5790  * Relies on (h-q[x] == x) being true for x such that
5791  * 0 <= x < MAX_REPLY_QUEUES.
5792  */
5793 static struct ctlr_info *queue_to_hba(u8 *queue)
5794 {
5795         return container_of((queue - *queue), struct ctlr_info, q[0]);
5796 }
5797
5798 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5799 {
5800         struct ctlr_info *h = queue_to_hba(queue);
5801         u8 q = *(u8 *) queue;
5802         u32 raw_tag;
5803
5804         if (ignore_bogus_interrupt(h))
5805                 return IRQ_NONE;
5806
5807         if (interrupt_not_for_us(h))
5808                 return IRQ_NONE;
5809         h->last_intr_timestamp = get_jiffies_64();
5810         while (interrupt_pending(h)) {
5811                 raw_tag = get_next_completion(h, q);
5812                 while (raw_tag != FIFO_EMPTY)
5813                         raw_tag = next_command(h, q);
5814         }
5815         return IRQ_HANDLED;
5816 }
5817
5818 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5819 {
5820         struct ctlr_info *h = queue_to_hba(queue);
5821         u32 raw_tag;
5822         u8 q = *(u8 *) queue;
5823
5824         if (ignore_bogus_interrupt(h))
5825                 return IRQ_NONE;
5826
5827         h->last_intr_timestamp = get_jiffies_64();
5828         raw_tag = get_next_completion(h, q);
5829         while (raw_tag != FIFO_EMPTY)
5830                 raw_tag = next_command(h, q);
5831         return IRQ_HANDLED;
5832 }
5833
5834 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5835 {
5836         struct ctlr_info *h = queue_to_hba((u8 *) queue);
5837         u32 raw_tag;
5838         u8 q = *(u8 *) queue;
5839
5840         if (interrupt_not_for_us(h))
5841                 return IRQ_NONE;
5842         h->last_intr_timestamp = get_jiffies_64();
5843         while (interrupt_pending(h)) {
5844                 raw_tag = get_next_completion(h, q);
5845                 while (raw_tag != FIFO_EMPTY) {
5846                         process_indexed_cmd(h, raw_tag);
5847                         raw_tag = next_command(h, q);
5848                 }
5849         }
5850         return IRQ_HANDLED;
5851 }
5852
5853 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5854 {
5855         struct ctlr_info *h = queue_to_hba(queue);
5856         u32 raw_tag;
5857         u8 q = *(u8 *) queue;
5858
5859         h->last_intr_timestamp = get_jiffies_64();
5860         raw_tag = get_next_completion(h, q);
5861         while (raw_tag != FIFO_EMPTY) {
5862                 process_indexed_cmd(h, raw_tag);
5863                 raw_tag = next_command(h, q);
5864         }
5865         return IRQ_HANDLED;
5866 }
5867
5868 /* Send a message CDB to the firmware. Careful, this only works
5869  * in simple mode, not performant mode due to the tag lookup.
5870  * We only ever use this immediately after a controller reset.
5871  */
5872 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5873                         unsigned char type)
5874 {
5875         struct Command {
5876                 struct CommandListHeader CommandHeader;
5877                 struct RequestBlock Request;
5878                 struct ErrDescriptor ErrorDescriptor;
5879         };
5880         struct Command *cmd;
5881         static const size_t cmd_sz = sizeof(*cmd) +
5882                                         sizeof(cmd->ErrorDescriptor);
5883         dma_addr_t paddr64;
5884         __le32 paddr32;
5885         u32 tag;
5886         void __iomem *vaddr;
5887         int i, err;
5888
5889         vaddr = pci_ioremap_bar(pdev, 0);
5890         if (vaddr == NULL)
5891                 return -ENOMEM;
5892
5893         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5894          * CCISS commands, so they must be allocated from the lower 4GiB of
5895          * memory.
5896          */
5897         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5898         if (err) {
5899                 iounmap(vaddr);
5900                 return err;
5901         }
5902
5903         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5904         if (cmd == NULL) {
5905                 iounmap(vaddr);
5906                 return -ENOMEM;
5907         }
5908
5909         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
5910          * although there's no guarantee, we assume that the address is at
5911          * least 4-byte aligned (most likely, it's page-aligned).
5912          */
5913         paddr32 = cpu_to_le32(paddr64);
5914
5915         cmd->CommandHeader.ReplyQueue = 0;
5916         cmd->CommandHeader.SGList = 0;
5917         cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5918         cmd->CommandHeader.tag = cpu_to_le64(paddr64);
5919         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5920
5921         cmd->Request.CDBLen = 16;
5922         cmd->Request.type_attr_dir =
5923                         TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5924         cmd->Request.Timeout = 0; /* Don't time out */
5925         cmd->Request.CDB[0] = opcode;
5926         cmd->Request.CDB[1] = type;
5927         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5928         cmd->ErrorDescriptor.Addr =
5929                         cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
5930         cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5931
5932         writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
5933
5934         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5935                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5936                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
5937                         break;
5938                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5939         }
5940
5941         iounmap(vaddr);
5942
5943         /* we leak the DMA buffer here ... no choice since the controller could
5944          *  still complete the command.
5945          */
5946         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5947                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5948                         opcode, type);
5949                 return -ETIMEDOUT;
5950         }
5951
5952         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5953
5954         if (tag & HPSA_ERROR_BIT) {
5955                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5956                         opcode, type);
5957                 return -EIO;
5958         }
5959
5960         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5961                 opcode, type);
5962         return 0;
5963 }
5964
5965 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5966
5967 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5968         void __iomem *vaddr, u32 use_doorbell)
5969 {
5970
5971         if (use_doorbell) {
5972                 /* For everything after the P600, the PCI power state method
5973                  * of resetting the controller doesn't work, so we have this
5974                  * other way using the doorbell register.
5975                  */
5976                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5977                 writel(use_doorbell, vaddr + SA5_DOORBELL);
5978
5979                 /* PMC hardware guys tell us we need a 10 second delay after
5980                  * doorbell reset and before any attempt to talk to the board
5981                  * at all to ensure that this actually works and doesn't fall
5982                  * over in some weird corner cases.
5983                  */
5984                 msleep(10000);
5985         } else { /* Try to do it the PCI power state way */
5986
5987                 /* Quoting from the Open CISS Specification: "The Power
5988                  * Management Control/Status Register (CSR) controls the power
5989                  * state of the device.  The normal operating state is D0,
5990                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
5991                  * the controller, place the interface device in D3 then to D0,
5992                  * this causes a secondary PCI reset which will reset the
5993                  * controller." */
5994
5995                 int rc = 0;
5996
5997                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5998
5999                 /* enter the D3hot power management state */
6000                 rc = pci_set_power_state(pdev, PCI_D3hot);
6001                 if (rc)
6002                         return rc;
6003
6004                 msleep(500);
6005
6006                 /* enter the D0 power management state */
6007                 rc = pci_set_power_state(pdev, PCI_D0);
6008                 if (rc)
6009                         return rc;
6010
6011                 /*
6012                  * The P600 requires a small delay when changing states.
6013                  * Otherwise we may think the board did not reset and we bail.
6014                  * This for kdump only and is particular to the P600.
6015                  */
6016                 msleep(500);
6017         }
6018         return 0;
6019 }
6020
6021 static void init_driver_version(char *driver_version, int len)
6022 {
6023         memset(driver_version, 0, len);
6024         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
6025 }
6026
6027 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
6028 {
6029         char *driver_version;
6030         int i, size = sizeof(cfgtable->driver_version);
6031
6032         driver_version = kmalloc(size, GFP_KERNEL);
6033         if (!driver_version)
6034                 return -ENOMEM;
6035
6036         init_driver_version(driver_version, size);
6037         for (i = 0; i < size; i++)
6038                 writeb(driver_version[i], &cfgtable->driver_version[i]);
6039         kfree(driver_version);
6040         return 0;
6041 }
6042
6043 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6044                                           unsigned char *driver_ver)
6045 {
6046         int i;
6047
6048         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6049                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6050 }
6051
6052 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
6053 {
6054
6055         char *driver_ver, *old_driver_ver;
6056         int rc, size = sizeof(cfgtable->driver_version);
6057
6058         old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6059         if (!old_driver_ver)
6060                 return -ENOMEM;
6061         driver_ver = old_driver_ver + size;
6062
6063         /* After a reset, the 32 bytes of "driver version" in the cfgtable
6064          * should have been changed, otherwise we know the reset failed.
6065          */
6066         init_driver_version(old_driver_ver, size);
6067         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6068         rc = !memcmp(driver_ver, old_driver_ver, size);
6069         kfree(old_driver_ver);
6070         return rc;
6071 }
6072 /* This does a hard reset of the controller using PCI power management
6073  * states or the using the doorbell register.
6074  */
6075 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
6076 {
6077         u64 cfg_offset;
6078         u32 cfg_base_addr;
6079         u64 cfg_base_addr_index;
6080         void __iomem *vaddr;
6081         unsigned long paddr;
6082         u32 misc_fw_support;
6083         int rc;
6084         struct CfgTable __iomem *cfgtable;
6085         u32 use_doorbell;
6086         u16 command_register;
6087
6088         /* For controllers as old as the P600, this is very nearly
6089          * the same thing as
6090          *
6091          * pci_save_state(pci_dev);
6092          * pci_set_power_state(pci_dev, PCI_D3hot);
6093          * pci_set_power_state(pci_dev, PCI_D0);
6094          * pci_restore_state(pci_dev);
6095          *
6096          * For controllers newer than the P600, the pci power state
6097          * method of resetting doesn't work so we have another way
6098          * using the doorbell register.
6099          */
6100
6101         if (!ctlr_is_resettable(board_id)) {
6102                 dev_warn(&pdev->dev, "Controller not resettable\n");
6103                 return -ENODEV;
6104         }
6105
6106         /* if controller is soft- but not hard resettable... */
6107         if (!ctlr_is_hard_resettable(board_id))
6108                 return -ENOTSUPP; /* try soft reset later. */
6109
6110         /* Save the PCI command register */
6111         pci_read_config_word(pdev, 4, &command_register);
6112         pci_save_state(pdev);
6113
6114         /* find the first memory BAR, so we can find the cfg table */
6115         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6116         if (rc)
6117                 return rc;
6118         vaddr = remap_pci_mem(paddr, 0x250);
6119         if (!vaddr)
6120                 return -ENOMEM;
6121
6122         /* find cfgtable in order to check if reset via doorbell is supported */
6123         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6124                                         &cfg_base_addr_index, &cfg_offset);
6125         if (rc)
6126                 goto unmap_vaddr;
6127         cfgtable = remap_pci_mem(pci_resource_start(pdev,
6128                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6129         if (!cfgtable) {
6130                 rc = -ENOMEM;
6131                 goto unmap_vaddr;
6132         }
6133         rc = write_driver_ver_to_cfgtable(cfgtable);
6134         if (rc)
6135                 goto unmap_cfgtable;
6136
6137         /* If reset via doorbell register is supported, use that.
6138          * There are two such methods.  Favor the newest method.
6139          */
6140         misc_fw_support = readl(&cfgtable->misc_fw_support);
6141         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6142         if (use_doorbell) {
6143                 use_doorbell = DOORBELL_CTLR_RESET2;
6144         } else {
6145                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6146                 if (use_doorbell) {
6147                         dev_warn(&pdev->dev,
6148                                 "Soft reset not supported. Firmware update is required.\n");
6149                         rc = -ENOTSUPP; /* try soft reset */
6150                         goto unmap_cfgtable;
6151                 }
6152         }
6153
6154         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6155         if (rc)
6156                 goto unmap_cfgtable;
6157
6158         pci_restore_state(pdev);
6159         pci_write_config_word(pdev, 4, command_register);
6160
6161         /* Some devices (notably the HP Smart Array 5i Controller)
6162            need a little pause here */
6163         msleep(HPSA_POST_RESET_PAUSE_MSECS);
6164
6165         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6166         if (rc) {
6167                 dev_warn(&pdev->dev,
6168                         "Failed waiting for board to become ready after hard reset\n");
6169                 goto unmap_cfgtable;
6170         }
6171
6172         rc = controller_reset_failed(vaddr);
6173         if (rc < 0)
6174                 goto unmap_cfgtable;
6175         if (rc) {
6176                 dev_warn(&pdev->dev, "Unable to successfully reset "
6177                         "controller. Will try soft reset.\n");
6178                 rc = -ENOTSUPP;
6179         } else {
6180                 dev_info(&pdev->dev, "board ready after hard reset.\n");
6181         }
6182
6183 unmap_cfgtable:
6184         iounmap(cfgtable);
6185
6186 unmap_vaddr:
6187         iounmap(vaddr);
6188         return rc;
6189 }
6190
6191 /*
6192  *  We cannot read the structure directly, for portability we must use
6193  *   the io functions.
6194  *   This is for debug only.
6195  */
6196 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6197 {
6198 #ifdef HPSA_DEBUG
6199         int i;
6200         char temp_name[17];
6201
6202         dev_info(dev, "Controller Configuration information\n");
6203         dev_info(dev, "------------------------------------\n");
6204         for (i = 0; i < 4; i++)
6205                 temp_name[i] = readb(&(tb->Signature[i]));
6206         temp_name[4] = '\0';
6207         dev_info(dev, "   Signature = %s\n", temp_name);
6208         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
6209         dev_info(dev, "   Transport methods supported = 0x%x\n",
6210                readl(&(tb->TransportSupport)));
6211         dev_info(dev, "   Transport methods active = 0x%x\n",
6212                readl(&(tb->TransportActive)));
6213         dev_info(dev, "   Requested transport Method = 0x%x\n",
6214                readl(&(tb->HostWrite.TransportRequest)));
6215         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
6216                readl(&(tb->HostWrite.CoalIntDelay)));
6217         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
6218                readl(&(tb->HostWrite.CoalIntCount)));
6219         dev_info(dev, "   Max outstanding commands = %d\n",
6220                readl(&(tb->CmdsOutMax)));
6221         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6222         for (i = 0; i < 16; i++)
6223                 temp_name[i] = readb(&(tb->ServerName[i]));
6224         temp_name[16] = '\0';
6225         dev_info(dev, "   Server Name = %s\n", temp_name);
6226         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
6227                 readl(&(tb->HeartBeat)));
6228 #endif                          /* HPSA_DEBUG */
6229 }
6230
6231 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6232 {
6233         int i, offset, mem_type, bar_type;
6234
6235         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6236                 return 0;
6237         offset = 0;
6238         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6239                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6240                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6241                         offset += 4;
6242                 else {
6243                         mem_type = pci_resource_flags(pdev, i) &
6244                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6245                         switch (mem_type) {
6246                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
6247                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6248                                 offset += 4;    /* 32 bit */
6249                                 break;
6250                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
6251                                 offset += 8;
6252                                 break;
6253                         default:        /* reserved in PCI 2.2 */
6254                                 dev_warn(&pdev->dev,
6255                                        "base address is invalid\n");
6256                                 return -1;
6257                                 break;
6258                         }
6259                 }
6260                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6261                         return i + 1;
6262         }
6263         return -1;
6264 }
6265
6266 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6267  * controllers that are capable. If not, we use legacy INTx mode.
6268  */
6269
6270 static void hpsa_interrupt_mode(struct ctlr_info *h)
6271 {
6272 #ifdef CONFIG_PCI_MSI
6273         int err, i;
6274         struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6275
6276         for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6277                 hpsa_msix_entries[i].vector = 0;
6278                 hpsa_msix_entries[i].entry = i;
6279         }
6280
6281         /* Some boards advertise MSI but don't really support it */
6282         if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6283             (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6284                 goto default_int_mode;
6285         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6286                 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
6287                 h->msix_vector = MAX_REPLY_QUEUES;
6288                 if (h->msix_vector > num_online_cpus())
6289                         h->msix_vector = num_online_cpus();
6290                 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6291                                             1, h->msix_vector);
6292                 if (err < 0) {
6293                         dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6294                         h->msix_vector = 0;
6295                         goto single_msi_mode;
6296                 } else if (err < h->msix_vector) {
6297                         dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6298                                "available\n", err);
6299                 }
6300                 h->msix_vector = err;
6301                 for (i = 0; i < h->msix_vector; i++)
6302                         h->intr[i] = hpsa_msix_entries[i].vector;
6303                 return;
6304         }
6305 single_msi_mode:
6306         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6307                 dev_info(&h->pdev->dev, "MSI capable controller\n");
6308                 if (!pci_enable_msi(h->pdev))
6309                         h->msi_vector = 1;
6310                 else
6311                         dev_warn(&h->pdev->dev, "MSI init failed\n");
6312         }
6313 default_int_mode:
6314 #endif                          /* CONFIG_PCI_MSI */
6315         /* if we get here we're going to use the default interrupt mode */
6316         h->intr[h->intr_mode] = h->pdev->irq;
6317 }
6318
6319 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6320 {
6321         int i;
6322         u32 subsystem_vendor_id, subsystem_device_id;
6323
6324         subsystem_vendor_id = pdev->subsystem_vendor;
6325         subsystem_device_id = pdev->subsystem_device;
6326         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6327                     subsystem_vendor_id;
6328
6329         for (i = 0; i < ARRAY_SIZE(products); i++)
6330                 if (*board_id == products[i].board_id)
6331                         return i;
6332
6333         if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6334                 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6335                 !hpsa_allow_any) {
6336                 dev_warn(&pdev->dev, "unrecognized board ID: "
6337                         "0x%08x, ignoring.\n", *board_id);
6338                         return -ENODEV;
6339         }
6340         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6341 }
6342
6343 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6344                                     unsigned long *memory_bar)
6345 {
6346         int i;
6347
6348         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6349                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6350                         /* addressing mode bits already removed */
6351                         *memory_bar = pci_resource_start(pdev, i);
6352                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6353                                 *memory_bar);
6354                         return 0;
6355                 }
6356         dev_warn(&pdev->dev, "no memory BAR found\n");
6357         return -ENODEV;
6358 }
6359
6360 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6361                                      int wait_for_ready)
6362 {
6363         int i, iterations;
6364         u32 scratchpad;
6365         if (wait_for_ready)
6366                 iterations = HPSA_BOARD_READY_ITERATIONS;
6367         else
6368                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6369
6370         for (i = 0; i < iterations; i++) {
6371                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6372                 if (wait_for_ready) {
6373                         if (scratchpad == HPSA_FIRMWARE_READY)
6374                                 return 0;
6375                 } else {
6376                         if (scratchpad != HPSA_FIRMWARE_READY)
6377                                 return 0;
6378                 }
6379                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6380         }
6381         dev_warn(&pdev->dev, "board not ready, timed out.\n");
6382         return -ENODEV;
6383 }
6384
6385 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6386                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6387                                u64 *cfg_offset)
6388 {
6389         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6390         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6391         *cfg_base_addr &= (u32) 0x0000ffff;
6392         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6393         if (*cfg_base_addr_index == -1) {
6394                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6395                 return -ENODEV;
6396         }
6397         return 0;
6398 }
6399
6400 static int hpsa_find_cfgtables(struct ctlr_info *h)
6401 {
6402         u64 cfg_offset;
6403         u32 cfg_base_addr;
6404         u64 cfg_base_addr_index;
6405         u32 trans_offset;
6406         int rc;
6407
6408         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6409                 &cfg_base_addr_index, &cfg_offset);
6410         if (rc)
6411                 return rc;
6412         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6413                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6414         if (!h->cfgtable) {
6415                 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6416                 return -ENOMEM;
6417         }
6418         rc = write_driver_ver_to_cfgtable(h->cfgtable);
6419         if (rc)
6420                 return rc;
6421         /* Find performant mode table. */
6422         trans_offset = readl(&h->cfgtable->TransMethodOffset);
6423         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6424                                 cfg_base_addr_index)+cfg_offset+trans_offset,
6425                                 sizeof(*h->transtable));
6426         if (!h->transtable)
6427                 return -ENOMEM;
6428         return 0;
6429 }
6430
6431 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6432 {
6433 #define MIN_MAX_COMMANDS 16
6434         BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
6435
6436         h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
6437
6438         /* Limit commands in memory limited kdump scenario. */
6439         if (reset_devices && h->max_commands > 32)
6440                 h->max_commands = 32;
6441
6442         if (h->max_commands < MIN_MAX_COMMANDS) {
6443                 dev_warn(&h->pdev->dev,
6444                         "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6445                         h->max_commands,
6446                         MIN_MAX_COMMANDS);
6447                 h->max_commands = MIN_MAX_COMMANDS;
6448         }
6449 }
6450
6451 /* If the controller reports that the total max sg entries is greater than 512,
6452  * then we know that chained SG blocks work.  (Original smart arrays did not
6453  * support chained SG blocks and would return zero for max sg entries.)
6454  */
6455 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6456 {
6457         return h->maxsgentries > 512;
6458 }
6459
6460 /* Interrogate the hardware for some limits:
6461  * max commands, max SG elements without chaining, and with chaining,
6462  * SG chain block size, etc.
6463  */
6464 static void hpsa_find_board_params(struct ctlr_info *h)
6465 {
6466         hpsa_get_max_perf_mode_cmds(h);
6467         h->nr_cmds = h->max_commands;
6468         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6469         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6470         if (hpsa_supports_chained_sg_blocks(h)) {
6471                 /* Limit in-command s/g elements to 32 save dma'able memory. */
6472                 h->max_cmd_sg_entries = 32;
6473                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6474                 h->maxsgentries--; /* save one for chain pointer */
6475         } else {
6476                 /*
6477                  * Original smart arrays supported at most 31 s/g entries
6478                  * embedded inline in the command (trying to use more
6479                  * would lock up the controller)
6480                  */
6481                 h->max_cmd_sg_entries = 31;
6482                 h->maxsgentries = 31; /* default to traditional values */
6483                 h->chainsize = 0;
6484         }
6485
6486         /* Find out what task management functions are supported and cache */
6487         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6488         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6489                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6490         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6491                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6492 }
6493
6494 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6495 {
6496         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6497                 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
6498                 return false;
6499         }
6500         return true;
6501 }
6502
6503 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6504 {
6505         u32 driver_support;
6506
6507         driver_support = readl(&(h->cfgtable->driver_support));
6508         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6509 #ifdef CONFIG_X86
6510         driver_support |= ENABLE_SCSI_PREFETCH;
6511 #endif
6512         driver_support |= ENABLE_UNIT_ATTN;
6513         writel(driver_support, &(h->cfgtable->driver_support));
6514 }
6515
6516 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
6517  * in a prefetch beyond physical memory.
6518  */
6519 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6520 {
6521         u32 dma_prefetch;
6522
6523         if (h->board_id != 0x3225103C)
6524                 return;
6525         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6526         dma_prefetch |= 0x8000;
6527         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6528 }
6529
6530 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6531 {
6532         int i;
6533         u32 doorbell_value;
6534         unsigned long flags;
6535         /* wait until the clear_event_notify bit 6 is cleared by controller. */
6536         for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
6537                 spin_lock_irqsave(&h->lock, flags);
6538                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6539                 spin_unlock_irqrestore(&h->lock, flags);
6540                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6541                         goto done;
6542                 /* delay and try again */
6543                 msleep(CLEAR_EVENT_WAIT_INTERVAL);
6544         }
6545         return -ENODEV;
6546 done:
6547         return 0;
6548 }
6549
6550 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6551 {
6552         int i;
6553         u32 doorbell_value;
6554         unsigned long flags;
6555
6556         /* under certain very rare conditions, this can take awhile.
6557          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6558          * as we enter this code.)
6559          */
6560         for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
6561                 if (h->remove_in_progress)
6562                         goto done;
6563                 spin_lock_irqsave(&h->lock, flags);
6564                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6565                 spin_unlock_irqrestore(&h->lock, flags);
6566                 if (!(doorbell_value & CFGTBL_ChangeReq))
6567                         goto done;
6568                 /* delay and try again */
6569                 msleep(MODE_CHANGE_WAIT_INTERVAL);
6570         }
6571         return -ENODEV;
6572 done:
6573         return 0;
6574 }
6575
6576 /* return -ENODEV or other reason on error, 0 on success */
6577 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6578 {
6579         u32 trans_support;
6580
6581         trans_support = readl(&(h->cfgtable->TransportSupport));
6582         if (!(trans_support & SIMPLE_MODE))
6583                 return -ENOTSUPP;
6584
6585         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6586
6587         /* Update the field, and then ring the doorbell */
6588         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6589         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6590         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6591         if (hpsa_wait_for_mode_change_ack(h))
6592                 goto error;
6593         print_cfg_table(&h->pdev->dev, h->cfgtable);
6594         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6595                 goto error;
6596         h->transMethod = CFGTBL_Trans_Simple;
6597         return 0;
6598 error:
6599         dev_err(&h->pdev->dev, "failed to enter simple mode\n");
6600         return -ENODEV;
6601 }
6602
6603 static int hpsa_pci_init(struct ctlr_info *h)
6604 {
6605         int prod_index, err;
6606
6607         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6608         if (prod_index < 0)
6609                 return prod_index;
6610         h->product_name = products[prod_index].product_name;
6611         h->access = *(products[prod_index].access);
6612
6613         h->needs_abort_tags_swizzled =
6614                 ctlr_needs_abort_tags_swizzled(h->board_id);
6615
6616         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6617                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6618
6619         err = pci_enable_device(h->pdev);
6620         if (err) {
6621                 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6622                 return err;
6623         }
6624
6625         err = pci_request_regions(h->pdev, HPSA);
6626         if (err) {
6627                 dev_err(&h->pdev->dev,
6628                         "cannot obtain PCI resources, aborting\n");
6629                 return err;
6630         }
6631
6632         pci_set_master(h->pdev);
6633
6634         hpsa_interrupt_mode(h);
6635         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6636         if (err)
6637                 goto err_out_free_res;
6638         h->vaddr = remap_pci_mem(h->paddr, 0x250);
6639         if (!h->vaddr) {
6640                 err = -ENOMEM;
6641                 goto err_out_free_res;
6642         }
6643         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6644         if (err)
6645                 goto err_out_free_res;
6646         err = hpsa_find_cfgtables(h);
6647         if (err)
6648                 goto err_out_free_res;
6649         hpsa_find_board_params(h);
6650
6651         if (!hpsa_CISS_signature_present(h)) {
6652                 err = -ENODEV;
6653                 goto err_out_free_res;
6654         }
6655         hpsa_set_driver_support_bits(h);
6656         hpsa_p600_dma_prefetch_quirk(h);
6657         err = hpsa_enter_simple_mode(h);
6658         if (err)
6659                 goto err_out_free_res;
6660         return 0;
6661
6662 err_out_free_res:
6663         if (h->transtable)
6664                 iounmap(h->transtable);
6665         if (h->cfgtable)
6666                 iounmap(h->cfgtable);
6667         if (h->vaddr)
6668                 iounmap(h->vaddr);
6669         pci_disable_device(h->pdev);
6670         pci_release_regions(h->pdev);
6671         return err;
6672 }
6673
6674 static void hpsa_hba_inquiry(struct ctlr_info *h)
6675 {
6676         int rc;
6677
6678 #define HBA_INQUIRY_BYTE_COUNT 64
6679         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6680         if (!h->hba_inquiry_data)
6681                 return;
6682         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6683                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6684         if (rc != 0) {
6685                 kfree(h->hba_inquiry_data);
6686                 h->hba_inquiry_data = NULL;
6687         }
6688 }
6689
6690 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
6691 {
6692         int rc, i;
6693         void __iomem *vaddr;
6694
6695         if (!reset_devices)
6696                 return 0;
6697
6698         /* kdump kernel is loading, we don't know in which state is
6699          * the pci interface. The dev->enable_cnt is equal zero
6700          * so we call enable+disable, wait a while and switch it on.
6701          */
6702         rc = pci_enable_device(pdev);
6703         if (rc) {
6704                 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6705                 return -ENODEV;
6706         }
6707         pci_disable_device(pdev);
6708         msleep(260);                    /* a randomly chosen number */
6709         rc = pci_enable_device(pdev);
6710         if (rc) {
6711                 dev_warn(&pdev->dev, "failed to enable device.\n");
6712                 return -ENODEV;
6713         }
6714
6715         pci_set_master(pdev);
6716
6717         vaddr = pci_ioremap_bar(pdev, 0);
6718         if (vaddr == NULL) {
6719                 rc = -ENOMEM;
6720                 goto out_disable;
6721         }
6722         writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6723         iounmap(vaddr);
6724
6725         /* Reset the controller with a PCI power-cycle or via doorbell */
6726         rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
6727
6728         /* -ENOTSUPP here means we cannot reset the controller
6729          * but it's already (and still) up and running in
6730          * "performant mode".  Or, it might be 640x, which can't reset
6731          * due to concerns about shared bbwc between 6402/6404 pair.
6732          */
6733         if (rc)
6734                 goto out_disable;
6735
6736         /* Now try to get the controller to respond to a no-op */
6737         dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
6738         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6739                 if (hpsa_noop(pdev) == 0)
6740                         break;
6741                 else
6742                         dev_warn(&pdev->dev, "no-op failed%s\n",
6743                                         (i < 11 ? "; re-trying" : ""));
6744         }
6745
6746 out_disable:
6747
6748         pci_disable_device(pdev);
6749         return rc;
6750 }
6751
6752 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6753 {
6754         h->cmd_pool_bits = kzalloc(
6755                 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6756                 sizeof(unsigned long), GFP_KERNEL);
6757         h->cmd_pool = pci_alloc_consistent(h->pdev,
6758                     h->nr_cmds * sizeof(*h->cmd_pool),
6759                     &(h->cmd_pool_dhandle));
6760         h->errinfo_pool = pci_alloc_consistent(h->pdev,
6761                     h->nr_cmds * sizeof(*h->errinfo_pool),
6762                     &(h->errinfo_pool_dhandle));
6763         if ((h->cmd_pool_bits == NULL)
6764             || (h->cmd_pool == NULL)
6765             || (h->errinfo_pool == NULL)) {
6766                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6767                 goto clean_up;
6768         }
6769         return 0;
6770 clean_up:
6771         hpsa_free_cmd_pool(h);
6772         return -ENOMEM;
6773 }
6774
6775 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6776 {
6777         kfree(h->cmd_pool_bits);
6778         if (h->cmd_pool)
6779                 pci_free_consistent(h->pdev,
6780                             h->nr_cmds * sizeof(struct CommandList),
6781                             h->cmd_pool, h->cmd_pool_dhandle);
6782         if (h->ioaccel2_cmd_pool)
6783                 pci_free_consistent(h->pdev,
6784                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6785                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6786         if (h->errinfo_pool)
6787                 pci_free_consistent(h->pdev,
6788                             h->nr_cmds * sizeof(struct ErrorInfo),
6789                             h->errinfo_pool,
6790                             h->errinfo_pool_dhandle);
6791         if (h->ioaccel_cmd_pool)
6792                 pci_free_consistent(h->pdev,
6793                         h->nr_cmds * sizeof(struct io_accel1_cmd),
6794                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6795 }
6796
6797 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6798 {
6799         int i, cpu;
6800
6801         cpu = cpumask_first(cpu_online_mask);
6802         for (i = 0; i < h->msix_vector; i++) {
6803                 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6804                 cpu = cpumask_next(cpu, cpu_online_mask);
6805         }
6806 }
6807
6808 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6809 static void hpsa_free_irqs(struct ctlr_info *h)
6810 {
6811         int i;
6812
6813         if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6814                 /* Single reply queue, only one irq to free */
6815                 i = h->intr_mode;
6816                 irq_set_affinity_hint(h->intr[i], NULL);
6817                 free_irq(h->intr[i], &h->q[i]);
6818                 return;
6819         }
6820
6821         for (i = 0; i < h->msix_vector; i++) {
6822                 irq_set_affinity_hint(h->intr[i], NULL);
6823                 free_irq(h->intr[i], &h->q[i]);
6824         }
6825         for (; i < MAX_REPLY_QUEUES; i++)
6826                 h->q[i] = 0;
6827 }
6828
6829 /* returns 0 on success; cleans up and returns -Enn on error */
6830 static int hpsa_request_irqs(struct ctlr_info *h,
6831         irqreturn_t (*msixhandler)(int, void *),
6832         irqreturn_t (*intxhandler)(int, void *))
6833 {
6834         int rc, i;
6835
6836         /*
6837          * initialize h->q[x] = x so that interrupt handlers know which
6838          * queue to process.
6839          */
6840         for (i = 0; i < MAX_REPLY_QUEUES; i++)
6841                 h->q[i] = (u8) i;
6842
6843         if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6844                 /* If performant mode and MSI-X, use multiple reply queues */
6845                 for (i = 0; i < h->msix_vector; i++) {
6846                         rc = request_irq(h->intr[i], msixhandler,
6847                                         0, h->devname,
6848                                         &h->q[i]);
6849                         if (rc) {
6850                                 int j;
6851
6852                                 dev_err(&h->pdev->dev,
6853                                         "failed to get irq %d for %s\n",
6854                                        h->intr[i], h->devname);
6855                                 for (j = 0; j < i; j++) {
6856                                         free_irq(h->intr[j], &h->q[j]);
6857                                         h->q[j] = 0;
6858                                 }
6859                                 for (; j < MAX_REPLY_QUEUES; j++)
6860                                         h->q[j] = 0;
6861                                 return rc;
6862                         }
6863                 }
6864                 hpsa_irq_affinity_hints(h);
6865         } else {
6866                 /* Use single reply pool */
6867                 if (h->msix_vector > 0 || h->msi_vector) {
6868                         rc = request_irq(h->intr[h->intr_mode],
6869                                 msixhandler, 0, h->devname,
6870                                 &h->q[h->intr_mode]);
6871                 } else {
6872                         rc = request_irq(h->intr[h->intr_mode],
6873                                 intxhandler, IRQF_SHARED, h->devname,
6874                                 &h->q[h->intr_mode]);
6875                 }
6876         }
6877         if (rc) {
6878                 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6879                        h->intr[h->intr_mode], h->devname);
6880                 return -ENODEV;
6881         }
6882         return 0;
6883 }
6884
6885 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6886 {
6887         if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6888                 HPSA_RESET_TYPE_CONTROLLER)) {
6889                 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6890                 return -EIO;
6891         }
6892
6893         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6894         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6895                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6896                 return -1;
6897         }
6898
6899         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6900         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6901                 dev_warn(&h->pdev->dev, "Board failed to become ready "
6902                         "after soft reset.\n");
6903                 return -1;
6904         }
6905
6906         return 0;
6907 }
6908
6909 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6910 {
6911         hpsa_free_irqs(h);
6912 #ifdef CONFIG_PCI_MSI
6913         if (h->msix_vector) {
6914                 if (h->pdev->msix_enabled)
6915                         pci_disable_msix(h->pdev);
6916         } else if (h->msi_vector) {
6917                 if (h->pdev->msi_enabled)
6918                         pci_disable_msi(h->pdev);
6919         }
6920 #endif /* CONFIG_PCI_MSI */
6921 }
6922
6923 static void hpsa_free_reply_queues(struct ctlr_info *h)
6924 {
6925         int i;
6926
6927         for (i = 0; i < h->nreply_queues; i++) {
6928                 if (!h->reply_queue[i].head)
6929                         continue;
6930                 pci_free_consistent(h->pdev, h->reply_queue_size,
6931                         h->reply_queue[i].head, h->reply_queue[i].busaddr);
6932                 h->reply_queue[i].head = NULL;
6933                 h->reply_queue[i].busaddr = 0;
6934         }
6935 }
6936
6937 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6938 {
6939         hpsa_free_irqs_and_disable_msix(h);
6940         hpsa_free_sg_chain_blocks(h);
6941         hpsa_free_cmd_pool(h);
6942         kfree(h->ioaccel1_blockFetchTable);
6943         kfree(h->blockFetchTable);
6944         hpsa_free_reply_queues(h);
6945         if (h->vaddr)
6946                 iounmap(h->vaddr);
6947         if (h->transtable)
6948                 iounmap(h->transtable);
6949         if (h->cfgtable)
6950                 iounmap(h->cfgtable);
6951         pci_disable_device(h->pdev);
6952         pci_release_regions(h->pdev);
6953         kfree(h);
6954 }
6955
6956 /* Called when controller lockup detected. */
6957 static void fail_all_outstanding_cmds(struct ctlr_info *h)
6958 {
6959         int i, refcount;
6960         struct CommandList *c;
6961         int failcount = 0;
6962
6963         flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
6964         for (i = 0; i < h->nr_cmds; i++) {
6965                 c = h->cmd_pool + i;
6966                 refcount = atomic_inc_return(&c->refcount);
6967                 if (refcount > 1) {
6968                         c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
6969                         finish_cmd(c);
6970                         atomic_dec(&h->commands_outstanding);
6971                         failcount++;
6972                 }
6973                 cmd_free(h, c);
6974         }
6975         dev_warn(&h->pdev->dev,
6976                 "failed %d commands in fail_all\n", failcount);
6977 }
6978
6979 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6980 {
6981         int cpu;
6982
6983         for_each_online_cpu(cpu) {
6984                 u32 *lockup_detected;
6985                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6986                 *lockup_detected = value;
6987         }
6988         wmb(); /* be sure the per-cpu variables are out to memory */
6989 }
6990
6991 static void controller_lockup_detected(struct ctlr_info *h)
6992 {
6993         unsigned long flags;
6994         u32 lockup_detected;
6995
6996         h->access.set_intr_mask(h, HPSA_INTR_OFF);
6997         spin_lock_irqsave(&h->lock, flags);
6998         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6999         if (!lockup_detected) {
7000                 /* no heartbeat, but controller gave us a zero. */
7001                 dev_warn(&h->pdev->dev,
7002                         "lockup detected after %d but scratchpad register is zero\n",
7003                         h->heartbeat_sample_interval / HZ);
7004                 lockup_detected = 0xffffffff;
7005         }
7006         set_lockup_detected_for_all_cpus(h, lockup_detected);
7007         spin_unlock_irqrestore(&h->lock, flags);
7008         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7009                         lockup_detected, h->heartbeat_sample_interval / HZ);
7010         pci_disable_device(h->pdev);
7011         fail_all_outstanding_cmds(h);
7012 }
7013
7014 static int detect_controller_lockup(struct ctlr_info *h)
7015 {
7016         u64 now;
7017         u32 heartbeat;
7018         unsigned long flags;
7019
7020         now = get_jiffies_64();
7021         /* If we've received an interrupt recently, we're ok. */
7022         if (time_after64(h->last_intr_timestamp +
7023                                 (h->heartbeat_sample_interval), now))
7024                 return false;
7025
7026         /*
7027          * If we've already checked the heartbeat recently, we're ok.
7028          * This could happen if someone sends us a signal. We
7029          * otherwise don't care about signals in this thread.
7030          */
7031         if (time_after64(h->last_heartbeat_timestamp +
7032                                 (h->heartbeat_sample_interval), now))
7033                 return false;
7034
7035         /* If heartbeat has not changed since we last looked, we're not ok. */
7036         spin_lock_irqsave(&h->lock, flags);
7037         heartbeat = readl(&h->cfgtable->HeartBeat);
7038         spin_unlock_irqrestore(&h->lock, flags);
7039         if (h->last_heartbeat == heartbeat) {
7040                 controller_lockup_detected(h);
7041                 return true;
7042         }
7043
7044         /* We're ok. */
7045         h->last_heartbeat = heartbeat;
7046         h->last_heartbeat_timestamp = now;
7047         return false;
7048 }
7049
7050 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
7051 {
7052         int i;
7053         char *event_type;
7054
7055         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7056                 return;
7057
7058         /* Ask the controller to clear the events we're handling. */
7059         if ((h->transMethod & (CFGTBL_Trans_io_accel1
7060                         | CFGTBL_Trans_io_accel2)) &&
7061                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7062                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7063
7064                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7065                         event_type = "state change";
7066                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7067                         event_type = "configuration change";
7068                 /* Stop sending new RAID offload reqs via the IO accelerator */
7069                 scsi_block_requests(h->scsi_host);
7070                 for (i = 0; i < h->ndevices; i++)
7071                         h->dev[i]->offload_enabled = 0;
7072                 hpsa_drain_accel_commands(h);
7073                 /* Set 'accelerator path config change' bit */
7074                 dev_warn(&h->pdev->dev,
7075                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7076                         h->events, event_type);
7077                 writel(h->events, &(h->cfgtable->clear_event_notify));
7078                 /* Set the "clear event notify field update" bit 6 */
7079                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7080                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7081                 hpsa_wait_for_clear_event_notify_ack(h);
7082                 scsi_unblock_requests(h->scsi_host);
7083         } else {
7084                 /* Acknowledge controller notification events. */
7085                 writel(h->events, &(h->cfgtable->clear_event_notify));
7086                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7087                 hpsa_wait_for_clear_event_notify_ack(h);
7088 #if 0
7089                 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7090                 hpsa_wait_for_mode_change_ack(h);
7091 #endif
7092         }
7093         return;
7094 }
7095
7096 /* Check a register on the controller to see if there are configuration
7097  * changes (added/changed/removed logical drives, etc.) which mean that
7098  * we should rescan the controller for devices.
7099  * Also check flag for driver-initiated rescan.
7100  */
7101 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
7102 {
7103         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7104                 return 0;
7105
7106         h->events = readl(&(h->cfgtable->event_notify));
7107         return h->events & RESCAN_REQUIRED_EVENT_BITS;
7108 }
7109
7110 /*
7111  * Check if any of the offline devices have become ready
7112  */
7113 static int hpsa_offline_devices_ready(struct ctlr_info *h)
7114 {
7115         unsigned long flags;
7116         struct offline_device_entry *d;
7117         struct list_head *this, *tmp;
7118
7119         spin_lock_irqsave(&h->offline_device_lock, flags);
7120         list_for_each_safe(this, tmp, &h->offline_device_list) {
7121                 d = list_entry(this, struct offline_device_entry,
7122                                 offline_list);
7123                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7124                 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7125                         spin_lock_irqsave(&h->offline_device_lock, flags);
7126                         list_del(&d->offline_list);
7127                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
7128                         return 1;
7129                 }
7130                 spin_lock_irqsave(&h->offline_device_lock, flags);
7131         }
7132         spin_unlock_irqrestore(&h->offline_device_lock, flags);
7133         return 0;
7134 }
7135
7136 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
7137 {
7138         unsigned long flags;
7139         struct ctlr_info *h = container_of(to_delayed_work(work),
7140                                         struct ctlr_info, rescan_ctlr_work);
7141
7142
7143         if (h->remove_in_progress)
7144                 return;
7145
7146         if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7147                 scsi_host_get(h->scsi_host);
7148                 hpsa_ack_ctlr_events(h);
7149                 hpsa_scan_start(h->scsi_host);
7150                 scsi_host_put(h->scsi_host);
7151         }
7152         spin_lock_irqsave(&h->lock, flags);
7153         if (!h->remove_in_progress)
7154                 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7155                                 h->heartbeat_sample_interval);
7156         spin_unlock_irqrestore(&h->lock, flags);
7157 }
7158
7159 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7160 {
7161         unsigned long flags;
7162         struct ctlr_info *h = container_of(to_delayed_work(work),
7163                                         struct ctlr_info, monitor_ctlr_work);
7164
7165         detect_controller_lockup(h);
7166         if (lockup_detected(h))
7167                 return;
7168
7169         spin_lock_irqsave(&h->lock, flags);
7170         if (!h->remove_in_progress)
7171                 schedule_delayed_work(&h->monitor_ctlr_work,
7172                                 h->heartbeat_sample_interval);
7173         spin_unlock_irqrestore(&h->lock, flags);
7174 }
7175
7176 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7177                                                 char *name)
7178 {
7179         struct workqueue_struct *wq = NULL;
7180
7181         wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
7182         if (!wq)
7183                 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7184
7185         return wq;
7186 }
7187
7188 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7189 {
7190         int dac, rc;
7191         struct ctlr_info *h;
7192         int try_soft_reset = 0;
7193         unsigned long flags;
7194         u32 board_id;
7195
7196         if (number_of_controllers == 0)
7197                 printk(KERN_INFO DRIVER_NAME "\n");
7198
7199         rc = hpsa_lookup_board_id(pdev, &board_id);
7200         if (rc < 0) {
7201                 dev_warn(&pdev->dev, "Board ID not found\n");
7202                 return rc;
7203         }
7204
7205         rc = hpsa_init_reset_devices(pdev, board_id);
7206         if (rc) {
7207                 if (rc != -ENOTSUPP)
7208                         return rc;
7209                 /* If the reset fails in a particular way (it has no way to do
7210                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
7211                  * a soft reset once we get the controller configured up to the
7212                  * point that it can accept a command.
7213                  */
7214                 try_soft_reset = 1;
7215                 rc = 0;
7216         }
7217
7218 reinit_after_soft_reset:
7219
7220         /* Command structures must be aligned on a 32-byte boundary because
7221          * the 5 lower bits of the address are used by the hardware. and by
7222          * the driver.  See comments in hpsa.h for more info.
7223          */
7224         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
7225         h = kzalloc(sizeof(*h), GFP_KERNEL);
7226         if (!h)
7227                 return -ENOMEM;
7228
7229         h->pdev = pdev;
7230         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
7231         INIT_LIST_HEAD(&h->offline_device_list);
7232         spin_lock_init(&h->lock);
7233         spin_lock_init(&h->offline_device_lock);
7234         spin_lock_init(&h->scan_lock);
7235         atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
7236         atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
7237
7238         h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
7239         if (!h->rescan_ctlr_wq) {
7240                 rc = -ENOMEM;
7241                 goto clean1;
7242         }
7243
7244         h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
7245         if (!h->resubmit_wq) {
7246                 rc = -ENOMEM;
7247                 goto clean1;
7248         }
7249
7250         /* Allocate and clear per-cpu variable lockup_detected */
7251         h->lockup_detected = alloc_percpu(u32);
7252         if (!h->lockup_detected) {
7253                 rc = -ENOMEM;
7254                 goto clean1;
7255         }
7256         set_lockup_detected_for_all_cpus(h, 0);
7257
7258         rc = hpsa_pci_init(h);
7259         if (rc != 0)
7260                 goto clean1;
7261
7262         sprintf(h->devname, HPSA "%d", number_of_controllers);
7263         h->ctlr = number_of_controllers;
7264         number_of_controllers++;
7265
7266         /* configure PCI DMA stuff */
7267         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7268         if (rc == 0) {
7269                 dac = 1;
7270         } else {
7271                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7272                 if (rc == 0) {
7273                         dac = 0;
7274                 } else {
7275                         dev_err(&pdev->dev, "no suitable DMA available\n");
7276                         goto clean1;
7277                 }
7278         }
7279
7280         /* make sure the board interrupts are off */
7281         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7282
7283         if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
7284                 goto clean2;
7285         dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7286                h->devname, pdev->device,
7287                h->intr[h->intr_mode], dac ? "" : " not");
7288         rc = hpsa_allocate_cmd_pool(h);
7289         if (rc)
7290                 goto clean2_and_free_irqs;
7291         if (hpsa_allocate_sg_chain_blocks(h))
7292                 goto clean4;
7293         init_waitqueue_head(&h->scan_wait_queue);
7294         init_waitqueue_head(&h->abort_cmd_wait_queue);
7295         h->scan_finished = 1; /* no scan currently in progress */
7296
7297         pci_set_drvdata(pdev, h);
7298         h->ndevices = 0;
7299         h->hba_mode_enabled = 0;
7300         h->scsi_host = NULL;
7301         spin_lock_init(&h->devlock);
7302         hpsa_put_ctlr_into_performant_mode(h);
7303
7304         /* At this point, the controller is ready to take commands.
7305          * Now, if reset_devices and the hard reset didn't work, try
7306          * the soft reset and see if that works.
7307          */
7308         if (try_soft_reset) {
7309
7310                 /* This is kind of gross.  We may or may not get a completion
7311                  * from the soft reset command, and if we do, then the value
7312                  * from the fifo may or may not be valid.  So, we wait 10 secs
7313                  * after the reset throwing away any completions we get during
7314                  * that time.  Unregister the interrupt handler and register
7315                  * fake ones to scoop up any residual completions.
7316                  */
7317                 spin_lock_irqsave(&h->lock, flags);
7318                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7319                 spin_unlock_irqrestore(&h->lock, flags);
7320                 hpsa_free_irqs(h);
7321                 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
7322                                         hpsa_intx_discard_completions);
7323                 if (rc) {
7324                         dev_warn(&h->pdev->dev,
7325                                 "Failed to request_irq after soft reset.\n");
7326                         goto clean4;
7327                 }
7328
7329                 rc = hpsa_kdump_soft_reset(h);
7330                 if (rc)
7331                         /* Neither hard nor soft reset worked, we're hosed. */
7332                         goto clean4;
7333
7334                 dev_info(&h->pdev->dev, "Board READY.\n");
7335                 dev_info(&h->pdev->dev,
7336                         "Waiting for stale completions to drain.\n");
7337                 h->access.set_intr_mask(h, HPSA_INTR_ON);
7338                 msleep(10000);
7339                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7340
7341                 rc = controller_reset_failed(h->cfgtable);
7342                 if (rc)
7343                         dev_info(&h->pdev->dev,
7344                                 "Soft reset appears to have failed.\n");
7345
7346                 /* since the controller's reset, we have to go back and re-init
7347                  * everything.  Easiest to just forget what we've done and do it
7348                  * all over again.
7349                  */
7350                 hpsa_undo_allocations_after_kdump_soft_reset(h);
7351                 try_soft_reset = 0;
7352                 if (rc)
7353                         /* don't go to clean4, we already unallocated */
7354                         return -ENODEV;
7355
7356                 goto reinit_after_soft_reset;
7357         }
7358
7359                 /* Enable Accelerated IO path at driver layer */
7360                 h->acciopath_status = 1;
7361
7362
7363         /* Turn the interrupts on so we can service requests */
7364         h->access.set_intr_mask(h, HPSA_INTR_ON);
7365
7366         hpsa_hba_inquiry(h);
7367         hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
7368
7369         /* Monitor the controller for firmware lockups */
7370         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7371         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7372         schedule_delayed_work(&h->monitor_ctlr_work,
7373                                 h->heartbeat_sample_interval);
7374         INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7375         queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7376                                 h->heartbeat_sample_interval);
7377         return 0;
7378
7379 clean4:
7380         hpsa_free_sg_chain_blocks(h);
7381         hpsa_free_cmd_pool(h);
7382 clean2_and_free_irqs:
7383         hpsa_free_irqs(h);
7384 clean2:
7385 clean1:
7386         if (h->resubmit_wq)
7387                 destroy_workqueue(h->resubmit_wq);
7388         if (h->rescan_ctlr_wq)
7389                 destroy_workqueue(h->rescan_ctlr_wq);
7390         if (h->lockup_detected)
7391                 free_percpu(h->lockup_detected);
7392         kfree(h);
7393         return rc;
7394 }
7395
7396 static void hpsa_flush_cache(struct ctlr_info *h)
7397 {
7398         char *flush_buf;
7399         struct CommandList *c;
7400         int rc;
7401
7402         /* Don't bother trying to flush the cache if locked up */
7403         /* FIXME not necessary if do_simple_cmd does the check */
7404         if (unlikely(lockup_detected(h)))
7405                 return;
7406         flush_buf = kzalloc(4, GFP_KERNEL);
7407         if (!flush_buf)
7408                 return;
7409
7410         c = cmd_alloc(h);
7411         if (!c) {
7412                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
7413                 goto out_of_memory;
7414         }
7415         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7416                 RAID_CTLR_LUNID, TYPE_CMD)) {
7417                 goto out;
7418         }
7419         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
7420                                         PCI_DMA_TODEVICE, NO_TIMEOUT);
7421         if (rc)
7422                 goto out;
7423         if (c->err_info->CommandStatus != 0)
7424 out:
7425                 dev_warn(&h->pdev->dev,
7426                         "error flushing cache on controller\n");
7427         cmd_free(h, c);
7428 out_of_memory:
7429         kfree(flush_buf);
7430 }
7431
7432 static void hpsa_shutdown(struct pci_dev *pdev)
7433 {
7434         struct ctlr_info *h;
7435
7436         h = pci_get_drvdata(pdev);
7437         /* Turn board interrupts off  and send the flush cache command
7438          * sendcmd will turn off interrupt, and send the flush...
7439          * To write all data in the battery backed cache to disks
7440          */
7441         hpsa_flush_cache(h);
7442         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7443         hpsa_free_irqs_and_disable_msix(h);
7444 }
7445
7446 static void hpsa_free_device_info(struct ctlr_info *h)
7447 {
7448         int i;
7449
7450         for (i = 0; i < h->ndevices; i++)
7451                 kfree(h->dev[i]);
7452 }
7453
7454 static void hpsa_remove_one(struct pci_dev *pdev)
7455 {
7456         struct ctlr_info *h;
7457         unsigned long flags;
7458
7459         if (pci_get_drvdata(pdev) == NULL) {
7460                 dev_err(&pdev->dev, "unable to remove device\n");
7461                 return;
7462         }
7463         h = pci_get_drvdata(pdev);
7464
7465         /* Get rid of any controller monitoring work items */
7466         spin_lock_irqsave(&h->lock, flags);
7467         h->remove_in_progress = 1;
7468         spin_unlock_irqrestore(&h->lock, flags);
7469         cancel_delayed_work_sync(&h->monitor_ctlr_work);
7470         cancel_delayed_work_sync(&h->rescan_ctlr_work);
7471         destroy_workqueue(h->rescan_ctlr_wq);
7472         destroy_workqueue(h->resubmit_wq);
7473         hpsa_unregister_scsi(h);        /* unhook from SCSI subsystem */
7474         hpsa_shutdown(pdev);
7475         iounmap(h->vaddr);
7476         iounmap(h->transtable);
7477         iounmap(h->cfgtable);
7478         hpsa_free_device_info(h);
7479         hpsa_free_sg_chain_blocks(h);
7480         pci_free_consistent(h->pdev,
7481                 h->nr_cmds * sizeof(struct CommandList),
7482                 h->cmd_pool, h->cmd_pool_dhandle);
7483         pci_free_consistent(h->pdev,
7484                 h->nr_cmds * sizeof(struct ErrorInfo),
7485                 h->errinfo_pool, h->errinfo_pool_dhandle);
7486         hpsa_free_reply_queues(h);
7487         kfree(h->cmd_pool_bits);
7488         kfree(h->blockFetchTable);
7489         kfree(h->ioaccel1_blockFetchTable);
7490         kfree(h->ioaccel2_blockFetchTable);
7491         kfree(h->hba_inquiry_data);
7492         pci_disable_device(pdev);
7493         pci_release_regions(pdev);
7494         free_percpu(h->lockup_detected);
7495         kfree(h);
7496 }
7497
7498 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7499         __attribute__((unused)) pm_message_t state)
7500 {
7501         return -ENOSYS;
7502 }
7503
7504 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7505 {
7506         return -ENOSYS;
7507 }
7508
7509 static struct pci_driver hpsa_pci_driver = {
7510         .name = HPSA,
7511         .probe = hpsa_init_one,
7512         .remove = hpsa_remove_one,
7513         .id_table = hpsa_pci_device_id, /* id_table */
7514         .shutdown = hpsa_shutdown,
7515         .suspend = hpsa_suspend,
7516         .resume = hpsa_resume,
7517 };
7518
7519 /* Fill in bucket_map[], given nsgs (the max number of
7520  * scatter gather elements supported) and bucket[],
7521  * which is an array of 8 integers.  The bucket[] array
7522  * contains 8 different DMA transfer sizes (in 16
7523  * byte increments) which the controller uses to fetch
7524  * commands.  This function fills in bucket_map[], which
7525  * maps a given number of scatter gather elements to one of
7526  * the 8 DMA transfer sizes.  The point of it is to allow the
7527  * controller to only do as much DMA as needed to fetch the
7528  * command, with the DMA transfer size encoded in the lower
7529  * bits of the command address.
7530  */
7531 static void  calc_bucket_map(int bucket[], int num_buckets,
7532         int nsgs, int min_blocks, u32 *bucket_map)
7533 {
7534         int i, j, b, size;
7535
7536         /* Note, bucket_map must have nsgs+1 entries. */
7537         for (i = 0; i <= nsgs; i++) {
7538                 /* Compute size of a command with i SG entries */
7539                 size = i + min_blocks;
7540                 b = num_buckets; /* Assume the biggest bucket */
7541                 /* Find the bucket that is just big enough */
7542                 for (j = 0; j < num_buckets; j++) {
7543                         if (bucket[j] >= size) {
7544                                 b = j;
7545                                 break;
7546                         }
7547                 }
7548                 /* for a command with i SG entries, use bucket b. */
7549                 bucket_map[i] = b;
7550         }
7551 }
7552
7553 /* return -ENODEV or other reason on error, 0 on success */
7554 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7555 {
7556         int i;
7557         unsigned long register_value;
7558         unsigned long transMethod = CFGTBL_Trans_Performant |
7559                         (trans_support & CFGTBL_Trans_use_short_tags) |
7560                                 CFGTBL_Trans_enable_directed_msix |
7561                         (trans_support & (CFGTBL_Trans_io_accel1 |
7562                                 CFGTBL_Trans_io_accel2));
7563         struct access_method access = SA5_performant_access;
7564
7565         /* This is a bit complicated.  There are 8 registers on
7566          * the controller which we write to to tell it 8 different
7567          * sizes of commands which there may be.  It's a way of
7568          * reducing the DMA done to fetch each command.  Encoded into
7569          * each command's tag are 3 bits which communicate to the controller
7570          * which of the eight sizes that command fits within.  The size of
7571          * each command depends on how many scatter gather entries there are.
7572          * Each SG entry requires 16 bytes.  The eight registers are programmed
7573          * with the number of 16-byte blocks a command of that size requires.
7574          * The smallest command possible requires 5 such 16 byte blocks.
7575          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7576          * blocks.  Note, this only extends to the SG entries contained
7577          * within the command block, and does not extend to chained blocks
7578          * of SG elements.   bft[] contains the eight values we write to
7579          * the registers.  They are not evenly distributed, but have more
7580          * sizes for small commands, and fewer sizes for larger commands.
7581          */
7582         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7583 #define MIN_IOACCEL2_BFT_ENTRY 5
7584 #define HPSA_IOACCEL2_HEADER_SZ 4
7585         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7586                         13, 14, 15, 16, 17, 18, 19,
7587                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7588         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7589         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7590         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7591                                  16 * MIN_IOACCEL2_BFT_ENTRY);
7592         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7593         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7594         /*  5 = 1 s/g entry or 4k
7595          *  6 = 2 s/g entry or 8k
7596          *  8 = 4 s/g entry or 16k
7597          * 10 = 6 s/g entry or 24k
7598          */
7599
7600         /* If the controller supports either ioaccel method then
7601          * we can also use the RAID stack submit path that does not
7602          * perform the superfluous readl() after each command submission.
7603          */
7604         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7605                 access = SA5_performant_access_no_read;
7606
7607         /* Controller spec: zero out this buffer. */
7608         for (i = 0; i < h->nreply_queues; i++)
7609                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7610
7611         bft[7] = SG_ENTRIES_IN_CMD + 4;
7612         calc_bucket_map(bft, ARRAY_SIZE(bft),
7613                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7614         for (i = 0; i < 8; i++)
7615                 writel(bft[i], &h->transtable->BlockFetch[i]);
7616
7617         /* size of controller ring buffer */
7618         writel(h->max_commands, &h->transtable->RepQSize);
7619         writel(h->nreply_queues, &h->transtable->RepQCount);
7620         writel(0, &h->transtable->RepQCtrAddrLow32);
7621         writel(0, &h->transtable->RepQCtrAddrHigh32);
7622
7623         for (i = 0; i < h->nreply_queues; i++) {
7624                 writel(0, &h->transtable->RepQAddr[i].upper);
7625                 writel(h->reply_queue[i].busaddr,
7626                         &h->transtable->RepQAddr[i].lower);
7627         }
7628
7629         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7630         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7631         /*
7632          * enable outbound interrupt coalescing in accelerator mode;
7633          */
7634         if (trans_support & CFGTBL_Trans_io_accel1) {
7635                 access = SA5_ioaccel_mode1_access;
7636                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7637                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7638         } else {
7639                 if (trans_support & CFGTBL_Trans_io_accel2) {
7640                         access = SA5_ioaccel_mode2_access;
7641                         writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7642                         writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7643                 }
7644         }
7645         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7646         if (hpsa_wait_for_mode_change_ack(h)) {
7647                 dev_err(&h->pdev->dev,
7648                         "performant mode problem - doorbell timeout\n");
7649                 return -ENODEV;
7650         }
7651         register_value = readl(&(h->cfgtable->TransportActive));
7652         if (!(register_value & CFGTBL_Trans_Performant)) {
7653                 dev_err(&h->pdev->dev,
7654                         "performant mode problem - transport not active\n");
7655                 return -ENODEV;
7656         }
7657         /* Change the access methods to the performant access methods */
7658         h->access = access;
7659         h->transMethod = transMethod;
7660
7661         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7662                 (trans_support & CFGTBL_Trans_io_accel2)))
7663                 return 0;
7664
7665         if (trans_support & CFGTBL_Trans_io_accel1) {
7666                 /* Set up I/O accelerator mode */
7667                 for (i = 0; i < h->nreply_queues; i++) {
7668                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7669                         h->reply_queue[i].current_entry =
7670                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7671                 }
7672                 bft[7] = h->ioaccel_maxsg + 8;
7673                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7674                                 h->ioaccel1_blockFetchTable);
7675
7676                 /* initialize all reply queue entries to unused */
7677                 for (i = 0; i < h->nreply_queues; i++)
7678                         memset(h->reply_queue[i].head,
7679                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7680                                 h->reply_queue_size);
7681
7682                 /* set all the constant fields in the accelerator command
7683                  * frames once at init time to save CPU cycles later.
7684                  */
7685                 for (i = 0; i < h->nr_cmds; i++) {
7686                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7687
7688                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
7689                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
7690                                         (i * sizeof(struct ErrorInfo)));
7691                         cp->err_info_len = sizeof(struct ErrorInfo);
7692                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
7693                         cp->host_context_flags =
7694                                 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7695                         cp->timeout_sec = 0;
7696                         cp->ReplyQueue = 0;
7697                         cp->tag =
7698                                 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
7699                         cp->host_addr =
7700                                 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7701                                         (i * sizeof(struct io_accel1_cmd)));
7702                 }
7703         } else if (trans_support & CFGTBL_Trans_io_accel2) {
7704                 u64 cfg_offset, cfg_base_addr_index;
7705                 u32 bft2_offset, cfg_base_addr;
7706                 int rc;
7707
7708                 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7709                         &cfg_base_addr_index, &cfg_offset);
7710                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7711                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7712                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7713                                 4, h->ioaccel2_blockFetchTable);
7714                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7715                 BUILD_BUG_ON(offsetof(struct CfgTable,
7716                                 io_accel_request_size_offset) != 0xb8);
7717                 h->ioaccel2_bft2_regs =
7718                         remap_pci_mem(pci_resource_start(h->pdev,
7719                                         cfg_base_addr_index) +
7720                                         cfg_offset + bft2_offset,
7721                                         ARRAY_SIZE(bft2) *
7722                                         sizeof(*h->ioaccel2_bft2_regs));
7723                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7724                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7725         }
7726         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7727         if (hpsa_wait_for_mode_change_ack(h)) {
7728                 dev_err(&h->pdev->dev,
7729                         "performant mode problem - enabling ioaccel mode\n");
7730                 return -ENODEV;
7731         }
7732         return 0;
7733 }
7734
7735 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7736 {
7737         h->ioaccel_maxsg =
7738                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7739         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7740                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7741
7742         /* Command structures must be aligned on a 128-byte boundary
7743          * because the 7 lower bits of the address are used by the
7744          * hardware.
7745          */
7746         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7747                         IOACCEL1_COMMANDLIST_ALIGNMENT);
7748         h->ioaccel_cmd_pool =
7749                 pci_alloc_consistent(h->pdev,
7750                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7751                         &(h->ioaccel_cmd_pool_dhandle));
7752
7753         h->ioaccel1_blockFetchTable =
7754                 kmalloc(((h->ioaccel_maxsg + 1) *
7755                                 sizeof(u32)), GFP_KERNEL);
7756
7757         if ((h->ioaccel_cmd_pool == NULL) ||
7758                 (h->ioaccel1_blockFetchTable == NULL))
7759                 goto clean_up;
7760
7761         memset(h->ioaccel_cmd_pool, 0,
7762                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7763         return 0;
7764
7765 clean_up:
7766         if (h->ioaccel_cmd_pool)
7767                 pci_free_consistent(h->pdev,
7768                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7769                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7770         kfree(h->ioaccel1_blockFetchTable);
7771         return 1;
7772 }
7773
7774 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7775 {
7776         /* Allocate ioaccel2 mode command blocks and block fetch table */
7777
7778         h->ioaccel_maxsg =
7779                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7780         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7781                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7782
7783         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7784                         IOACCEL2_COMMANDLIST_ALIGNMENT);
7785         h->ioaccel2_cmd_pool =
7786                 pci_alloc_consistent(h->pdev,
7787                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7788                         &(h->ioaccel2_cmd_pool_dhandle));
7789
7790         h->ioaccel2_blockFetchTable =
7791                 kmalloc(((h->ioaccel_maxsg + 1) *
7792                                 sizeof(u32)), GFP_KERNEL);
7793
7794         if ((h->ioaccel2_cmd_pool == NULL) ||
7795                 (h->ioaccel2_blockFetchTable == NULL))
7796                 goto clean_up;
7797
7798         memset(h->ioaccel2_cmd_pool, 0,
7799                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7800         return 0;
7801
7802 clean_up:
7803         if (h->ioaccel2_cmd_pool)
7804                 pci_free_consistent(h->pdev,
7805                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7806                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7807         kfree(h->ioaccel2_blockFetchTable);
7808         return 1;
7809 }
7810
7811 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7812 {
7813         u32 trans_support;
7814         unsigned long transMethod = CFGTBL_Trans_Performant |
7815                                         CFGTBL_Trans_use_short_tags;
7816         int i;
7817
7818         if (hpsa_simple_mode)
7819                 return;
7820
7821         trans_support = readl(&(h->cfgtable->TransportSupport));
7822         if (!(trans_support & PERFORMANT_MODE))
7823                 return;
7824
7825         /* Check for I/O accelerator mode support */
7826         if (trans_support & CFGTBL_Trans_io_accel1) {
7827                 transMethod |= CFGTBL_Trans_io_accel1 |
7828                                 CFGTBL_Trans_enable_directed_msix;
7829                 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7830                         goto clean_up;
7831         } else {
7832                 if (trans_support & CFGTBL_Trans_io_accel2) {
7833                                 transMethod |= CFGTBL_Trans_io_accel2 |
7834                                 CFGTBL_Trans_enable_directed_msix;
7835                 if (ioaccel2_alloc_cmds_and_bft(h))
7836                         goto clean_up;
7837                 }
7838         }
7839
7840         h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7841         hpsa_get_max_perf_mode_cmds(h);
7842         /* Performant mode ring buffer and supporting data structures */
7843         h->reply_queue_size = h->max_commands * sizeof(u64);
7844
7845         for (i = 0; i < h->nreply_queues; i++) {
7846                 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7847                                                 h->reply_queue_size,
7848                                                 &(h->reply_queue[i].busaddr));
7849                 if (!h->reply_queue[i].head)
7850                         goto clean_up;
7851                 h->reply_queue[i].size = h->max_commands;
7852                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
7853                 h->reply_queue[i].current_entry = 0;
7854         }
7855
7856         /* Need a block fetch table for performant mode */
7857         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7858                                 sizeof(u32)), GFP_KERNEL);
7859         if (!h->blockFetchTable)
7860                 goto clean_up;
7861
7862         hpsa_enter_performant_mode(h, trans_support);
7863         return;
7864
7865 clean_up:
7866         hpsa_free_reply_queues(h);
7867         kfree(h->blockFetchTable);
7868 }
7869
7870 static int is_accelerated_cmd(struct CommandList *c)
7871 {
7872         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7873 }
7874
7875 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7876 {
7877         struct CommandList *c = NULL;
7878         int i, accel_cmds_out;
7879         int refcount;
7880
7881         do { /* wait for all outstanding ioaccel commands to drain out */
7882                 accel_cmds_out = 0;
7883                 for (i = 0; i < h->nr_cmds; i++) {
7884                         c = h->cmd_pool + i;
7885                         refcount = atomic_inc_return(&c->refcount);
7886                         if (refcount > 1) /* Command is allocated */
7887                                 accel_cmds_out += is_accelerated_cmd(c);
7888                         cmd_free(h, c);
7889                 }
7890                 if (accel_cmds_out <= 0)
7891                         break;
7892                 msleep(100);
7893         } while (1);
7894 }
7895
7896 /*
7897  *  This is it.  Register the PCI driver information for the cards we control
7898  *  the OS will call our registered routines when it finds one of our cards.
7899  */
7900 static int __init hpsa_init(void)
7901 {
7902         return pci_register_driver(&hpsa_pci_driver);
7903 }
7904
7905 static void __exit hpsa_cleanup(void)
7906 {
7907         pci_unregister_driver(&hpsa_pci_driver);
7908 }
7909
7910 static void __attribute__((unused)) verify_offsets(void)
7911 {
7912 #define VERIFY_OFFSET(member, offset) \
7913         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7914
7915         VERIFY_OFFSET(structure_size, 0);
7916         VERIFY_OFFSET(volume_blk_size, 4);
7917         VERIFY_OFFSET(volume_blk_cnt, 8);
7918         VERIFY_OFFSET(phys_blk_shift, 16);
7919         VERIFY_OFFSET(parity_rotation_shift, 17);
7920         VERIFY_OFFSET(strip_size, 18);
7921         VERIFY_OFFSET(disk_starting_blk, 20);
7922         VERIFY_OFFSET(disk_blk_cnt, 28);
7923         VERIFY_OFFSET(data_disks_per_row, 36);
7924         VERIFY_OFFSET(metadata_disks_per_row, 38);
7925         VERIFY_OFFSET(row_cnt, 40);
7926         VERIFY_OFFSET(layout_map_count, 42);
7927         VERIFY_OFFSET(flags, 44);
7928         VERIFY_OFFSET(dekindex, 46);
7929         /* VERIFY_OFFSET(reserved, 48 */
7930         VERIFY_OFFSET(data, 64);
7931
7932 #undef VERIFY_OFFSET
7933
7934 #define VERIFY_OFFSET(member, offset) \
7935         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7936
7937         VERIFY_OFFSET(IU_type, 0);
7938         VERIFY_OFFSET(direction, 1);
7939         VERIFY_OFFSET(reply_queue, 2);
7940         /* VERIFY_OFFSET(reserved1, 3);  */
7941         VERIFY_OFFSET(scsi_nexus, 4);
7942         VERIFY_OFFSET(Tag, 8);
7943         VERIFY_OFFSET(cdb, 16);
7944         VERIFY_OFFSET(cciss_lun, 32);
7945         VERIFY_OFFSET(data_len, 40);
7946         VERIFY_OFFSET(cmd_priority_task_attr, 44);
7947         VERIFY_OFFSET(sg_count, 45);
7948         /* VERIFY_OFFSET(reserved3 */
7949         VERIFY_OFFSET(err_ptr, 48);
7950         VERIFY_OFFSET(err_len, 56);
7951         /* VERIFY_OFFSET(reserved4  */
7952         VERIFY_OFFSET(sg, 64);
7953
7954 #undef VERIFY_OFFSET
7955
7956 #define VERIFY_OFFSET(member, offset) \
7957         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7958
7959         VERIFY_OFFSET(dev_handle, 0x00);
7960         VERIFY_OFFSET(reserved1, 0x02);
7961         VERIFY_OFFSET(function, 0x03);
7962         VERIFY_OFFSET(reserved2, 0x04);
7963         VERIFY_OFFSET(err_info, 0x0C);
7964         VERIFY_OFFSET(reserved3, 0x10);
7965         VERIFY_OFFSET(err_info_len, 0x12);
7966         VERIFY_OFFSET(reserved4, 0x13);
7967         VERIFY_OFFSET(sgl_offset, 0x14);
7968         VERIFY_OFFSET(reserved5, 0x15);
7969         VERIFY_OFFSET(transfer_len, 0x1C);
7970         VERIFY_OFFSET(reserved6, 0x20);
7971         VERIFY_OFFSET(io_flags, 0x24);
7972         VERIFY_OFFSET(reserved7, 0x26);
7973         VERIFY_OFFSET(LUN, 0x34);
7974         VERIFY_OFFSET(control, 0x3C);
7975         VERIFY_OFFSET(CDB, 0x40);
7976         VERIFY_OFFSET(reserved8, 0x50);
7977         VERIFY_OFFSET(host_context_flags, 0x60);
7978         VERIFY_OFFSET(timeout_sec, 0x62);
7979         VERIFY_OFFSET(ReplyQueue, 0x64);
7980         VERIFY_OFFSET(reserved9, 0x65);
7981         VERIFY_OFFSET(tag, 0x68);
7982         VERIFY_OFFSET(host_addr, 0x70);
7983         VERIFY_OFFSET(CISS_LUN, 0x78);
7984         VERIFY_OFFSET(SG, 0x78 + 8);
7985 #undef VERIFY_OFFSET
7986 }
7987
7988 module_init(hpsa_init);
7989 module_exit(hpsa_cleanup);